diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-28 11:46:44 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-28 11:46:44 -0500 |
commit | 2b2b15c32ae951c3609c01e74d22d6de64b2595c (patch) | |
tree | f23a1e0f6929312cc9b7f742dffc2a4999283c06 | |
parent | bf3d846b783327359ddc4bd4f52627b36abb4d1d (diff) | |
parent | ed7e5423014ad89720fcf315c0b73f2c5d0c7bd2 (diff) |
Merge tag 'nfs-for-3.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs
Pull NFS client updates from Trond Myklebust:
"Highlights include:
- stable fix for an infinite loop in RPC state machine
- stable fix for a use after free situation in the NFSv4 trunking discovery
- stable fix for error handling in the NFSv4 trunking discovery
- stable fix for the page write update code
- stable fix for the NFSv4.1 mount time security negotiation
- stable fix for the NFSv4 open code.
- O_DIRECT locking fixes
- fix an Oops in the pnfs file commit code
- RPC layer needs finer grained handling of connection errors
- more RPC GSS upcall fixes"
* tag 'nfs-for-3.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (30 commits)
pnfs: Proper delay for NFS4ERR_RECALLCONFLICT in layout_get_done
pnfs: fix BUG in filelayout_recover_commit_reqs
nfs4: fix discover_server_trunking use after free
NFSv4.1: Handle errors correctly in nfs41_walk_client_list
nfs: always make sure page is up-to-date before extending a write to cover the entire page
nfs: page cache invalidation for dio
nfs: take i_mutex during direct I/O reads
nfs: merge nfs_direct_write into nfs_file_direct_write
nfs: merge nfs_direct_read into nfs_file_direct_read
nfs: increment i_dio_count for reads, too
nfs: defer inode_dio_done call until size update is done
nfs: fix size updates for aio writes
nfs4.1: properly handle ENOTSUP in SECINFO_NO_NAME
NFSv4.1: Fix a race in nfs4_write_inode
NFSv4.1: Don't trust attributes if a pNFS LAYOUTCOMMIT is outstanding
point to the right include file in a comment (left over from a9004abc3)
NFS: dprintk() should not print negative fileids and inode numbers
nfs: fix dead code of ipv6_addr_scope
sunrpc: Fix infinite loop in RPC state machine
SUNRPC: Add tracepoint for socket errors
...
-rw-r--r-- | fs/nfs/dir.c | 18 | ||||
-rw-r--r-- | fs/nfs/direct.c | 279 | ||||
-rw-r--r-- | fs/nfs/file.c | 6 | ||||
-rw-r--r-- | fs/nfs/inode.c | 48 | ||||
-rw-r--r-- | fs/nfs/nfs4client.c | 25 | ||||
-rw-r--r-- | fs/nfs/nfs4filelayout.c | 16 | ||||
-rw-r--r-- | fs/nfs/nfs4filelayoutdev.c | 2 | ||||
-rw-r--r-- | fs/nfs/nfs4proc.c | 43 | ||||
-rw-r--r-- | fs/nfs/nfs4state.c | 4 | ||||
-rw-r--r-- | fs/nfs/nfs4super.c | 14 | ||||
-rw-r--r-- | fs/nfs/nfs4xdr.c | 47 | ||||
-rw-r--r-- | fs/nfs/pnfs.c | 67 | ||||
-rw-r--r-- | fs/nfs/pnfs.h | 16 | ||||
-rw-r--r-- | fs/nfs/read.c | 12 | ||||
-rw-r--r-- | fs/nfs/write.c | 19 | ||||
-rw-r--r-- | include/linux/sunrpc/rpc_pipe_fs.h | 5 | ||||
-rw-r--r-- | include/trace/events/sunrpc.h | 1 | ||||
-rw-r--r-- | net/sunrpc/auth_gss/auth_gss.c | 17 | ||||
-rw-r--r-- | net/sunrpc/clnt.c | 15 | ||||
-rw-r--r-- | net/sunrpc/netns.h | 3 | ||||
-rw-r--r-- | net/sunrpc/rpc_pipe.c | 169 | ||||
-rw-r--r-- | net/sunrpc/sunrpc_syms.c | 8 | ||||
-rw-r--r-- | net/sunrpc/xprt.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtsock.c | 42 |
24 files changed, 595 insertions, 286 deletions
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 812154aff981..b266f734bd53 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c | |||
@@ -1404,7 +1404,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry, | |||
1404 | /* Expect a negative dentry */ | 1404 | /* Expect a negative dentry */ |
1405 | BUG_ON(dentry->d_inode); | 1405 | BUG_ON(dentry->d_inode); |
1406 | 1406 | ||
1407 | dfprintk(VFS, "NFS: atomic_open(%s/%ld), %pd\n", | 1407 | dfprintk(VFS, "NFS: atomic_open(%s/%lu), %pd\n", |
1408 | dir->i_sb->s_id, dir->i_ino, dentry); | 1408 | dir->i_sb->s_id, dir->i_ino, dentry); |
1409 | 1409 | ||
1410 | err = nfs_check_flags(open_flags); | 1410 | err = nfs_check_flags(open_flags); |
@@ -1594,7 +1594,7 @@ int nfs_create(struct inode *dir, struct dentry *dentry, | |||
1594 | int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT; | 1594 | int open_flags = excl ? O_CREAT | O_EXCL : O_CREAT; |
1595 | int error; | 1595 | int error; |
1596 | 1596 | ||
1597 | dfprintk(VFS, "NFS: create(%s/%ld), %pd\n", | 1597 | dfprintk(VFS, "NFS: create(%s/%lu), %pd\n", |
1598 | dir->i_sb->s_id, dir->i_ino, dentry); | 1598 | dir->i_sb->s_id, dir->i_ino, dentry); |
1599 | 1599 | ||
1600 | attr.ia_mode = mode; | 1600 | attr.ia_mode = mode; |
@@ -1621,7 +1621,7 @@ nfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rdev) | |||
1621 | struct iattr attr; | 1621 | struct iattr attr; |
1622 | int status; | 1622 | int status; |
1623 | 1623 | ||
1624 | dfprintk(VFS, "NFS: mknod(%s/%ld), %pd\n", | 1624 | dfprintk(VFS, "NFS: mknod(%s/%lu), %pd\n", |
1625 | dir->i_sb->s_id, dir->i_ino, dentry); | 1625 | dir->i_sb->s_id, dir->i_ino, dentry); |
1626 | 1626 | ||
1627 | if (!new_valid_dev(rdev)) | 1627 | if (!new_valid_dev(rdev)) |
@@ -1650,7 +1650,7 @@ int nfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) | |||
1650 | struct iattr attr; | 1650 | struct iattr attr; |
1651 | int error; | 1651 | int error; |
1652 | 1652 | ||
1653 | dfprintk(VFS, "NFS: mkdir(%s/%ld), %pd\n", | 1653 | dfprintk(VFS, "NFS: mkdir(%s/%lu), %pd\n", |
1654 | dir->i_sb->s_id, dir->i_ino, dentry); | 1654 | dir->i_sb->s_id, dir->i_ino, dentry); |
1655 | 1655 | ||
1656 | attr.ia_valid = ATTR_MODE; | 1656 | attr.ia_valid = ATTR_MODE; |
@@ -1678,7 +1678,7 @@ int nfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1678 | { | 1678 | { |
1679 | int error; | 1679 | int error; |
1680 | 1680 | ||
1681 | dfprintk(VFS, "NFS: rmdir(%s/%ld), %pd\n", | 1681 | dfprintk(VFS, "NFS: rmdir(%s/%lu), %pd\n", |
1682 | dir->i_sb->s_id, dir->i_ino, dentry); | 1682 | dir->i_sb->s_id, dir->i_ino, dentry); |
1683 | 1683 | ||
1684 | trace_nfs_rmdir_enter(dir, dentry); | 1684 | trace_nfs_rmdir_enter(dir, dentry); |
@@ -1747,7 +1747,7 @@ int nfs_unlink(struct inode *dir, struct dentry *dentry) | |||
1747 | int error; | 1747 | int error; |
1748 | int need_rehash = 0; | 1748 | int need_rehash = 0; |
1749 | 1749 | ||
1750 | dfprintk(VFS, "NFS: unlink(%s/%ld, %pd)\n", dir->i_sb->s_id, | 1750 | dfprintk(VFS, "NFS: unlink(%s/%lu, %pd)\n", dir->i_sb->s_id, |
1751 | dir->i_ino, dentry); | 1751 | dir->i_ino, dentry); |
1752 | 1752 | ||
1753 | trace_nfs_unlink_enter(dir, dentry); | 1753 | trace_nfs_unlink_enter(dir, dentry); |
@@ -1798,7 +1798,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
1798 | unsigned int pathlen = strlen(symname); | 1798 | unsigned int pathlen = strlen(symname); |
1799 | int error; | 1799 | int error; |
1800 | 1800 | ||
1801 | dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s)\n", dir->i_sb->s_id, | 1801 | dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s)\n", dir->i_sb->s_id, |
1802 | dir->i_ino, dentry, symname); | 1802 | dir->i_ino, dentry, symname); |
1803 | 1803 | ||
1804 | if (pathlen > PAGE_SIZE) | 1804 | if (pathlen > PAGE_SIZE) |
@@ -1821,7 +1821,7 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) | |||
1821 | error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr); | 1821 | error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr); |
1822 | trace_nfs_symlink_exit(dir, dentry, error); | 1822 | trace_nfs_symlink_exit(dir, dentry, error); |
1823 | if (error != 0) { | 1823 | if (error != 0) { |
1824 | dfprintk(VFS, "NFS: symlink(%s/%ld, %pd, %s) error %d\n", | 1824 | dfprintk(VFS, "NFS: symlink(%s/%lu, %pd, %s) error %d\n", |
1825 | dir->i_sb->s_id, dir->i_ino, | 1825 | dir->i_sb->s_id, dir->i_ino, |
1826 | dentry, symname, error); | 1826 | dentry, symname, error); |
1827 | d_drop(dentry); | 1827 | d_drop(dentry); |
@@ -2304,7 +2304,7 @@ out: | |||
2304 | if (!res && (mask & MAY_EXEC) && !execute_ok(inode)) | 2304 | if (!res && (mask & MAY_EXEC) && !execute_ok(inode)) |
2305 | res = -EACCES; | 2305 | res = -EACCES; |
2306 | 2306 | ||
2307 | dfprintk(VFS, "NFS: permission(%s/%ld), mask=0x%x, res=%d\n", | 2307 | dfprintk(VFS, "NFS: permission(%s/%lu), mask=0x%x, res=%d\n", |
2308 | inode->i_sb->s_id, inode->i_ino, mask, res); | 2308 | inode->i_sb->s_id, inode->i_ino, mask, res); |
2309 | return res; | 2309 | return res; |
2310 | out_notsup: | 2310 | out_notsup: |
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c index d71d66c9e0a1..b8797ae6831f 100644 --- a/fs/nfs/direct.c +++ b/fs/nfs/direct.c | |||
@@ -222,14 +222,31 @@ out: | |||
222 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust | 222 | * Synchronous I/O uses a stack-allocated iocb. Thus we can't trust |
223 | * the iocb is still valid here if this is a synchronous request. | 223 | * the iocb is still valid here if this is a synchronous request. |
224 | */ | 224 | */ |
225 | static void nfs_direct_complete(struct nfs_direct_req *dreq) | 225 | static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) |
226 | { | 226 | { |
227 | struct inode *inode = dreq->inode; | ||
228 | |||
229 | if (dreq->iocb && write) { | ||
230 | loff_t pos = dreq->iocb->ki_pos + dreq->count; | ||
231 | |||
232 | spin_lock(&inode->i_lock); | ||
233 | if (i_size_read(inode) < pos) | ||
234 | i_size_write(inode, pos); | ||
235 | spin_unlock(&inode->i_lock); | ||
236 | } | ||
237 | |||
238 | if (write) | ||
239 | nfs_zap_mapping(inode, inode->i_mapping); | ||
240 | |||
241 | inode_dio_done(inode); | ||
242 | |||
227 | if (dreq->iocb) { | 243 | if (dreq->iocb) { |
228 | long res = (long) dreq->error; | 244 | long res = (long) dreq->error; |
229 | if (!res) | 245 | if (!res) |
230 | res = (long) dreq->count; | 246 | res = (long) dreq->count; |
231 | aio_complete(dreq->iocb, res, 0); | 247 | aio_complete(dreq->iocb, res, 0); |
232 | } | 248 | } |
249 | |||
233 | complete_all(&dreq->completion); | 250 | complete_all(&dreq->completion); |
234 | 251 | ||
235 | nfs_direct_req_release(dreq); | 252 | nfs_direct_req_release(dreq); |
@@ -237,9 +254,9 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq) | |||
237 | 254 | ||
238 | static void nfs_direct_readpage_release(struct nfs_page *req) | 255 | static void nfs_direct_readpage_release(struct nfs_page *req) |
239 | { | 256 | { |
240 | dprintk("NFS: direct read done (%s/%lld %d@%lld)\n", | 257 | dprintk("NFS: direct read done (%s/%llu %d@%lld)\n", |
241 | req->wb_context->dentry->d_inode->i_sb->s_id, | 258 | req->wb_context->dentry->d_inode->i_sb->s_id, |
242 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 259 | (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
243 | req->wb_bytes, | 260 | req->wb_bytes, |
244 | (long long)req_offset(req)); | 261 | (long long)req_offset(req)); |
245 | nfs_release_request(req); | 262 | nfs_release_request(req); |
@@ -272,7 +289,7 @@ static void nfs_direct_read_completion(struct nfs_pgio_header *hdr) | |||
272 | } | 289 | } |
273 | out_put: | 290 | out_put: |
274 | if (put_dreq(dreq)) | 291 | if (put_dreq(dreq)) |
275 | nfs_direct_complete(dreq); | 292 | nfs_direct_complete(dreq, false); |
276 | hdr->release(hdr); | 293 | hdr->release(hdr); |
277 | } | 294 | } |
278 | 295 | ||
@@ -402,6 +419,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |||
402 | loff_t pos, bool uio) | 419 | loff_t pos, bool uio) |
403 | { | 420 | { |
404 | struct nfs_pageio_descriptor desc; | 421 | struct nfs_pageio_descriptor desc; |
422 | struct inode *inode = dreq->inode; | ||
405 | ssize_t result = -EINVAL; | 423 | ssize_t result = -EINVAL; |
406 | size_t requested_bytes = 0; | 424 | size_t requested_bytes = 0; |
407 | unsigned long seg; | 425 | unsigned long seg; |
@@ -410,6 +428,7 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |||
410 | &nfs_direct_read_completion_ops); | 428 | &nfs_direct_read_completion_ops); |
411 | get_dreq(dreq); | 429 | get_dreq(dreq); |
412 | desc.pg_dreq = dreq; | 430 | desc.pg_dreq = dreq; |
431 | atomic_inc(&inode->i_dio_count); | ||
413 | 432 | ||
414 | for (seg = 0; seg < nr_segs; seg++) { | 433 | for (seg = 0; seg < nr_segs; seg++) { |
415 | const struct iovec *vec = &iov[seg]; | 434 | const struct iovec *vec = &iov[seg]; |
@@ -429,26 +448,69 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, | |||
429 | * generic layer handle the completion. | 448 | * generic layer handle the completion. |
430 | */ | 449 | */ |
431 | if (requested_bytes == 0) { | 450 | if (requested_bytes == 0) { |
451 | inode_dio_done(inode); | ||
432 | nfs_direct_req_release(dreq); | 452 | nfs_direct_req_release(dreq); |
433 | return result < 0 ? result : -EIO; | 453 | return result < 0 ? result : -EIO; |
434 | } | 454 | } |
435 | 455 | ||
436 | if (put_dreq(dreq)) | 456 | if (put_dreq(dreq)) |
437 | nfs_direct_complete(dreq); | 457 | nfs_direct_complete(dreq, false); |
438 | return 0; | 458 | return 0; |
439 | } | 459 | } |
440 | 460 | ||
441 | static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, | 461 | /** |
442 | unsigned long nr_segs, loff_t pos, bool uio) | 462 | * nfs_file_direct_read - file direct read operation for NFS files |
463 | * @iocb: target I/O control block | ||
464 | * @iov: vector of user buffers into which to read data | ||
465 | * @nr_segs: size of iov vector | ||
466 | * @pos: byte offset in file where reading starts | ||
467 | * | ||
468 | * We use this function for direct reads instead of calling | ||
469 | * generic_file_aio_read() in order to avoid gfar's check to see if | ||
470 | * the request starts before the end of the file. For that check | ||
471 | * to work, we must generate a GETATTR before each direct read, and | ||
472 | * even then there is a window between the GETATTR and the subsequent | ||
473 | * READ where the file size could change. Our preference is simply | ||
474 | * to do all reads the application wants, and the server will take | ||
475 | * care of managing the end of file boundary. | ||
476 | * | ||
477 | * This function also eliminates unnecessarily updating the file's | ||
478 | * atime locally, as the NFS server sets the file's atime, and this | ||
479 | * client must read the updated atime from the server back into its | ||
480 | * cache. | ||
481 | */ | ||
482 | ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | ||
483 | unsigned long nr_segs, loff_t pos, bool uio) | ||
443 | { | 484 | { |
444 | ssize_t result = -ENOMEM; | 485 | struct file *file = iocb->ki_filp; |
445 | struct inode *inode = iocb->ki_filp->f_mapping->host; | 486 | struct address_space *mapping = file->f_mapping; |
487 | struct inode *inode = mapping->host; | ||
446 | struct nfs_direct_req *dreq; | 488 | struct nfs_direct_req *dreq; |
447 | struct nfs_lock_context *l_ctx; | 489 | struct nfs_lock_context *l_ctx; |
490 | ssize_t result = -EINVAL; | ||
491 | size_t count; | ||
448 | 492 | ||
493 | count = iov_length(iov, nr_segs); | ||
494 | nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); | ||
495 | |||
496 | dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", | ||
497 | file, count, (long long) pos); | ||
498 | |||
499 | result = 0; | ||
500 | if (!count) | ||
501 | goto out; | ||
502 | |||
503 | mutex_lock(&inode->i_mutex); | ||
504 | result = nfs_sync_mapping(mapping); | ||
505 | if (result) | ||
506 | goto out_unlock; | ||
507 | |||
508 | task_io_account_read(count); | ||
509 | |||
510 | result = -ENOMEM; | ||
449 | dreq = nfs_direct_req_alloc(); | 511 | dreq = nfs_direct_req_alloc(); |
450 | if (dreq == NULL) | 512 | if (dreq == NULL) |
451 | goto out; | 513 | goto out_unlock; |
452 | 514 | ||
453 | dreq->inode = inode; | 515 | dreq->inode = inode; |
454 | dreq->bytes_left = iov_length(iov, nr_segs); | 516 | dreq->bytes_left = iov_length(iov, nr_segs); |
@@ -464,20 +526,26 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov, | |||
464 | 526 | ||
465 | NFS_I(inode)->read_io += iov_length(iov, nr_segs); | 527 | NFS_I(inode)->read_io += iov_length(iov, nr_segs); |
466 | result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio); | 528 | result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos, uio); |
467 | if (!result) | 529 | |
530 | mutex_unlock(&inode->i_mutex); | ||
531 | |||
532 | if (!result) { | ||
468 | result = nfs_direct_wait(dreq); | 533 | result = nfs_direct_wait(dreq); |
534 | if (result > 0) | ||
535 | iocb->ki_pos = pos + result; | ||
536 | } | ||
537 | |||
538 | nfs_direct_req_release(dreq); | ||
539 | return result; | ||
540 | |||
469 | out_release: | 541 | out_release: |
470 | nfs_direct_req_release(dreq); | 542 | nfs_direct_req_release(dreq); |
543 | out_unlock: | ||
544 | mutex_unlock(&inode->i_mutex); | ||
471 | out: | 545 | out: |
472 | return result; | 546 | return result; |
473 | } | 547 | } |
474 | 548 | ||
475 | static void nfs_inode_dio_write_done(struct inode *inode) | ||
476 | { | ||
477 | nfs_zap_mapping(inode, inode->i_mapping); | ||
478 | inode_dio_done(inode); | ||
479 | } | ||
480 | |||
481 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) | 549 | #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) |
482 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) | 550 | static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) |
483 | { | 551 | { |
@@ -593,8 +661,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work) | |||
593 | nfs_direct_write_reschedule(dreq); | 661 | nfs_direct_write_reschedule(dreq); |
594 | break; | 662 | break; |
595 | default: | 663 | default: |
596 | nfs_inode_dio_write_done(dreq->inode); | 664 | nfs_direct_complete(dreq, true); |
597 | nfs_direct_complete(dreq); | ||
598 | } | 665 | } |
599 | } | 666 | } |
600 | 667 | ||
@@ -610,8 +677,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work) | |||
610 | 677 | ||
611 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) | 678 | static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) |
612 | { | 679 | { |
613 | nfs_inode_dio_write_done(inode); | 680 | nfs_direct_complete(dreq, true); |
614 | nfs_direct_complete(dreq); | ||
615 | } | 681 | } |
616 | #endif | 682 | #endif |
617 | 683 | ||
@@ -842,93 +908,6 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, | |||
842 | return 0; | 908 | return 0; |
843 | } | 909 | } |
844 | 910 | ||
845 | static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov, | ||
846 | unsigned long nr_segs, loff_t pos, | ||
847 | size_t count, bool uio) | ||
848 | { | ||
849 | ssize_t result = -ENOMEM; | ||
850 | struct inode *inode = iocb->ki_filp->f_mapping->host; | ||
851 | struct nfs_direct_req *dreq; | ||
852 | struct nfs_lock_context *l_ctx; | ||
853 | |||
854 | dreq = nfs_direct_req_alloc(); | ||
855 | if (!dreq) | ||
856 | goto out; | ||
857 | |||
858 | dreq->inode = inode; | ||
859 | dreq->bytes_left = count; | ||
860 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); | ||
861 | l_ctx = nfs_get_lock_context(dreq->ctx); | ||
862 | if (IS_ERR(l_ctx)) { | ||
863 | result = PTR_ERR(l_ctx); | ||
864 | goto out_release; | ||
865 | } | ||
866 | dreq->l_ctx = l_ctx; | ||
867 | if (!is_sync_kiocb(iocb)) | ||
868 | dreq->iocb = iocb; | ||
869 | |||
870 | result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio); | ||
871 | if (!result) | ||
872 | result = nfs_direct_wait(dreq); | ||
873 | out_release: | ||
874 | nfs_direct_req_release(dreq); | ||
875 | out: | ||
876 | return result; | ||
877 | } | ||
878 | |||
879 | /** | ||
880 | * nfs_file_direct_read - file direct read operation for NFS files | ||
881 | * @iocb: target I/O control block | ||
882 | * @iov: vector of user buffers into which to read data | ||
883 | * @nr_segs: size of iov vector | ||
884 | * @pos: byte offset in file where reading starts | ||
885 | * | ||
886 | * We use this function for direct reads instead of calling | ||
887 | * generic_file_aio_read() in order to avoid gfar's check to see if | ||
888 | * the request starts before the end of the file. For that check | ||
889 | * to work, we must generate a GETATTR before each direct read, and | ||
890 | * even then there is a window between the GETATTR and the subsequent | ||
891 | * READ where the file size could change. Our preference is simply | ||
892 | * to do all reads the application wants, and the server will take | ||
893 | * care of managing the end of file boundary. | ||
894 | * | ||
895 | * This function also eliminates unnecessarily updating the file's | ||
896 | * atime locally, as the NFS server sets the file's atime, and this | ||
897 | * client must read the updated atime from the server back into its | ||
898 | * cache. | ||
899 | */ | ||
900 | ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov, | ||
901 | unsigned long nr_segs, loff_t pos, bool uio) | ||
902 | { | ||
903 | ssize_t retval = -EINVAL; | ||
904 | struct file *file = iocb->ki_filp; | ||
905 | struct address_space *mapping = file->f_mapping; | ||
906 | size_t count; | ||
907 | |||
908 | count = iov_length(iov, nr_segs); | ||
909 | nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count); | ||
910 | |||
911 | dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n", | ||
912 | file, count, (long long) pos); | ||
913 | |||
914 | retval = 0; | ||
915 | if (!count) | ||
916 | goto out; | ||
917 | |||
918 | retval = nfs_sync_mapping(mapping); | ||
919 | if (retval) | ||
920 | goto out; | ||
921 | |||
922 | task_io_account_read(count); | ||
923 | |||
924 | retval = nfs_direct_read(iocb, iov, nr_segs, pos, uio); | ||
925 | if (retval > 0) | ||
926 | iocb->ki_pos = pos + retval; | ||
927 | |||
928 | out: | ||
929 | return retval; | ||
930 | } | ||
931 | |||
932 | /** | 911 | /** |
933 | * nfs_file_direct_write - file direct write operation for NFS files | 912 | * nfs_file_direct_write - file direct write operation for NFS files |
934 | * @iocb: target I/O control block | 913 | * @iocb: target I/O control block |
@@ -954,46 +933,96 @@ out: | |||
954 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, | 933 | ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov, |
955 | unsigned long nr_segs, loff_t pos, bool uio) | 934 | unsigned long nr_segs, loff_t pos, bool uio) |
956 | { | 935 | { |
957 | ssize_t retval = -EINVAL; | 936 | ssize_t result = -EINVAL; |
958 | struct file *file = iocb->ki_filp; | 937 | struct file *file = iocb->ki_filp; |
959 | struct address_space *mapping = file->f_mapping; | 938 | struct address_space *mapping = file->f_mapping; |
939 | struct inode *inode = mapping->host; | ||
940 | struct nfs_direct_req *dreq; | ||
941 | struct nfs_lock_context *l_ctx; | ||
942 | loff_t end; | ||
960 | size_t count; | 943 | size_t count; |
961 | 944 | ||
962 | count = iov_length(iov, nr_segs); | 945 | count = iov_length(iov, nr_segs); |
946 | end = (pos + count - 1) >> PAGE_CACHE_SHIFT; | ||
947 | |||
963 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); | 948 | nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count); |
964 | 949 | ||
965 | dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", | 950 | dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n", |
966 | file, count, (long long) pos); | 951 | file, count, (long long) pos); |
967 | 952 | ||
968 | retval = generic_write_checks(file, &pos, &count, 0); | 953 | result = generic_write_checks(file, &pos, &count, 0); |
969 | if (retval) | 954 | if (result) |
970 | goto out; | 955 | goto out; |
971 | 956 | ||
972 | retval = -EINVAL; | 957 | result = -EINVAL; |
973 | if ((ssize_t) count < 0) | 958 | if ((ssize_t) count < 0) |
974 | goto out; | 959 | goto out; |
975 | retval = 0; | 960 | result = 0; |
976 | if (!count) | 961 | if (!count) |
977 | goto out; | 962 | goto out; |
978 | 963 | ||
979 | retval = nfs_sync_mapping(mapping); | 964 | mutex_lock(&inode->i_mutex); |
980 | if (retval) | 965 | |
981 | goto out; | 966 | result = nfs_sync_mapping(mapping); |
967 | if (result) | ||
968 | goto out_unlock; | ||
969 | |||
970 | if (mapping->nrpages) { | ||
971 | result = invalidate_inode_pages2_range(mapping, | ||
972 | pos >> PAGE_CACHE_SHIFT, end); | ||
973 | if (result) | ||
974 | goto out_unlock; | ||
975 | } | ||
982 | 976 | ||
983 | task_io_account_write(count); | 977 | task_io_account_write(count); |
984 | 978 | ||
985 | retval = nfs_direct_write(iocb, iov, nr_segs, pos, count, uio); | 979 | result = -ENOMEM; |
986 | if (retval > 0) { | 980 | dreq = nfs_direct_req_alloc(); |
987 | struct inode *inode = mapping->host; | 981 | if (!dreq) |
982 | goto out_unlock; | ||
988 | 983 | ||
989 | iocb->ki_pos = pos + retval; | 984 | dreq->inode = inode; |
990 | spin_lock(&inode->i_lock); | 985 | dreq->bytes_left = count; |
991 | if (i_size_read(inode) < iocb->ki_pos) | 986 | dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); |
992 | i_size_write(inode, iocb->ki_pos); | 987 | l_ctx = nfs_get_lock_context(dreq->ctx); |
993 | spin_unlock(&inode->i_lock); | 988 | if (IS_ERR(l_ctx)) { |
989 | result = PTR_ERR(l_ctx); | ||
990 | goto out_release; | ||
991 | } | ||
992 | dreq->l_ctx = l_ctx; | ||
993 | if (!is_sync_kiocb(iocb)) | ||
994 | dreq->iocb = iocb; | ||
995 | |||
996 | result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, uio); | ||
997 | |||
998 | if (mapping->nrpages) { | ||
999 | invalidate_inode_pages2_range(mapping, | ||
1000 | pos >> PAGE_CACHE_SHIFT, end); | ||
994 | } | 1001 | } |
1002 | |||
1003 | mutex_unlock(&inode->i_mutex); | ||
1004 | |||
1005 | if (!result) { | ||
1006 | result = nfs_direct_wait(dreq); | ||
1007 | if (result > 0) { | ||
1008 | struct inode *inode = mapping->host; | ||
1009 | |||
1010 | iocb->ki_pos = pos + result; | ||
1011 | spin_lock(&inode->i_lock); | ||
1012 | if (i_size_read(inode) < iocb->ki_pos) | ||
1013 | i_size_write(inode, iocb->ki_pos); | ||
1014 | spin_unlock(&inode->i_lock); | ||
1015 | } | ||
1016 | } | ||
1017 | nfs_direct_req_release(dreq); | ||
1018 | return result; | ||
1019 | |||
1020 | out_release: | ||
1021 | nfs_direct_req_release(dreq); | ||
1022 | out_unlock: | ||
1023 | mutex_unlock(&inode->i_mutex); | ||
995 | out: | 1024 | out: |
996 | return retval; | 1025 | return result; |
997 | } | 1026 | } |
998 | 1027 | ||
999 | /** | 1028 | /** |
diff --git a/fs/nfs/file.c b/fs/nfs/file.c index e2fcacf07de3..5bb790a69c71 100644 --- a/fs/nfs/file.c +++ b/fs/nfs/file.c | |||
@@ -354,7 +354,7 @@ static int nfs_write_begin(struct file *file, struct address_space *mapping, | |||
354 | struct page *page; | 354 | struct page *page; |
355 | int once_thru = 0; | 355 | int once_thru = 0; |
356 | 356 | ||
357 | dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%ld), %u@%lld)\n", | 357 | dfprintk(PAGECACHE, "NFS: write_begin(%pD2(%lu), %u@%lld)\n", |
358 | file, mapping->host->i_ino, len, (long long) pos); | 358 | file, mapping->host->i_ino, len, (long long) pos); |
359 | 359 | ||
360 | start: | 360 | start: |
@@ -395,7 +395,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping, | |||
395 | struct nfs_open_context *ctx = nfs_file_open_context(file); | 395 | struct nfs_open_context *ctx = nfs_file_open_context(file); |
396 | int status; | 396 | int status; |
397 | 397 | ||
398 | dfprintk(PAGECACHE, "NFS: write_end(%pD2(%ld), %u@%lld)\n", | 398 | dfprintk(PAGECACHE, "NFS: write_end(%pD2(%lu), %u@%lld)\n", |
399 | file, mapping->host->i_ino, len, (long long) pos); | 399 | file, mapping->host->i_ino, len, (long long) pos); |
400 | 400 | ||
401 | /* | 401 | /* |
@@ -585,7 +585,7 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
585 | int ret = VM_FAULT_NOPAGE; | 585 | int ret = VM_FAULT_NOPAGE; |
586 | struct address_space *mapping; | 586 | struct address_space *mapping; |
587 | 587 | ||
588 | dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%ld), offset %lld)\n", | 588 | dfprintk(PAGECACHE, "NFS: vm_page_mkwrite(%pD2(%lu), offset %lld)\n", |
589 | filp, filp->f_mapping->host->i_ino, | 589 | filp, filp->f_mapping->host->i_ino, |
590 | (long long)page_offset(page)); | 590 | (long long)page_offset(page)); |
591 | 591 | ||
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c index ecd11ba7f960..ea00b34ff071 100644 --- a/fs/nfs/inode.c +++ b/fs/nfs/inode.c | |||
@@ -458,9 +458,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st | |||
458 | unlock_new_inode(inode); | 458 | unlock_new_inode(inode); |
459 | } else | 459 | } else |
460 | nfs_refresh_inode(inode, fattr); | 460 | nfs_refresh_inode(inode, fattr); |
461 | dprintk("NFS: nfs_fhget(%s/%Ld fh_crc=0x%08x ct=%d)\n", | 461 | dprintk("NFS: nfs_fhget(%s/%Lu fh_crc=0x%08x ct=%d)\n", |
462 | inode->i_sb->s_id, | 462 | inode->i_sb->s_id, |
463 | (long long)NFS_FILEID(inode), | 463 | (unsigned long long)NFS_FILEID(inode), |
464 | nfs_display_fhandle_hash(fh), | 464 | nfs_display_fhandle_hash(fh), |
465 | atomic_read(&inode->i_count)); | 465 | atomic_read(&inode->i_count)); |
466 | 466 | ||
@@ -870,8 +870,8 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
870 | struct nfs_fattr *fattr = NULL; | 870 | struct nfs_fattr *fattr = NULL; |
871 | struct nfs_inode *nfsi = NFS_I(inode); | 871 | struct nfs_inode *nfsi = NFS_I(inode); |
872 | 872 | ||
873 | dfprintk(PAGECACHE, "NFS: revalidating (%s/%Ld)\n", | 873 | dfprintk(PAGECACHE, "NFS: revalidating (%s/%Lu)\n", |
874 | inode->i_sb->s_id, (long long)NFS_FILEID(inode)); | 874 | inode->i_sb->s_id, (unsigned long long)NFS_FILEID(inode)); |
875 | 875 | ||
876 | trace_nfs_revalidate_inode_enter(inode); | 876 | trace_nfs_revalidate_inode_enter(inode); |
877 | 877 | ||
@@ -895,9 +895,9 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
895 | 895 | ||
896 | status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr, label); | 896 | status = NFS_PROTO(inode)->getattr(server, NFS_FH(inode), fattr, label); |
897 | if (status != 0) { | 897 | if (status != 0) { |
898 | dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) getattr failed, error=%d\n", | 898 | dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) getattr failed, error=%d\n", |
899 | inode->i_sb->s_id, | 899 | inode->i_sb->s_id, |
900 | (long long)NFS_FILEID(inode), status); | 900 | (unsigned long long)NFS_FILEID(inode), status); |
901 | if (status == -ESTALE) { | 901 | if (status == -ESTALE) { |
902 | nfs_zap_caches(inode); | 902 | nfs_zap_caches(inode); |
903 | if (!S_ISDIR(inode->i_mode)) | 903 | if (!S_ISDIR(inode->i_mode)) |
@@ -908,9 +908,9 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
908 | 908 | ||
909 | status = nfs_refresh_inode(inode, fattr); | 909 | status = nfs_refresh_inode(inode, fattr); |
910 | if (status) { | 910 | if (status) { |
911 | dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Ld) refresh failed, error=%d\n", | 911 | dfprintk(PAGECACHE, "nfs_revalidate_inode: (%s/%Lu) refresh failed, error=%d\n", |
912 | inode->i_sb->s_id, | 912 | inode->i_sb->s_id, |
913 | (long long)NFS_FILEID(inode), status); | 913 | (unsigned long long)NFS_FILEID(inode), status); |
914 | goto err_out; | 914 | goto err_out; |
915 | } | 915 | } |
916 | 916 | ||
@@ -919,9 +919,9 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode) | |||
919 | 919 | ||
920 | nfs_setsecurity(inode, fattr, label); | 920 | nfs_setsecurity(inode, fattr, label); |
921 | 921 | ||
922 | dfprintk(PAGECACHE, "NFS: (%s/%Ld) revalidation complete\n", | 922 | dfprintk(PAGECACHE, "NFS: (%s/%Lu) revalidation complete\n", |
923 | inode->i_sb->s_id, | 923 | inode->i_sb->s_id, |
924 | (long long)NFS_FILEID(inode)); | 924 | (unsigned long long)NFS_FILEID(inode)); |
925 | 925 | ||
926 | err_out: | 926 | err_out: |
927 | nfs4_label_free(label); | 927 | nfs4_label_free(label); |
@@ -985,8 +985,9 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map | |||
985 | nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE); | 985 | nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE); |
986 | nfs_fscache_wait_on_invalidate(inode); | 986 | nfs_fscache_wait_on_invalidate(inode); |
987 | 987 | ||
988 | dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n", | 988 | dfprintk(PAGECACHE, "NFS: (%s/%Lu) data cache invalidated\n", |
989 | inode->i_sb->s_id, (long long)NFS_FILEID(inode)); | 989 | inode->i_sb->s_id, |
990 | (unsigned long long)NFS_FILEID(inode)); | ||
990 | return 0; | 991 | return 0; |
991 | } | 992 | } |
992 | 993 | ||
@@ -1282,12 +1283,28 @@ static int nfs_inode_attrs_need_update(const struct inode *inode, const struct n | |||
1282 | ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); | 1283 | ((long)nfsi->attr_gencount - (long)nfs_read_attr_generation_counter() > 0); |
1283 | } | 1284 | } |
1284 | 1285 | ||
1286 | /* | ||
1287 | * Don't trust the change_attribute, mtime, ctime or size if | ||
1288 | * a pnfs LAYOUTCOMMIT is outstanding | ||
1289 | */ | ||
1290 | static void nfs_inode_attrs_handle_layoutcommit(struct inode *inode, | ||
1291 | struct nfs_fattr *fattr) | ||
1292 | { | ||
1293 | if (pnfs_layoutcommit_outstanding(inode)) | ||
1294 | fattr->valid &= ~(NFS_ATTR_FATTR_CHANGE | | ||
1295 | NFS_ATTR_FATTR_MTIME | | ||
1296 | NFS_ATTR_FATTR_CTIME | | ||
1297 | NFS_ATTR_FATTR_SIZE); | ||
1298 | } | ||
1299 | |||
1285 | static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr) | 1300 | static int nfs_refresh_inode_locked(struct inode *inode, struct nfs_fattr *fattr) |
1286 | { | 1301 | { |
1287 | int ret; | 1302 | int ret; |
1288 | 1303 | ||
1289 | trace_nfs_refresh_inode_enter(inode); | 1304 | trace_nfs_refresh_inode_enter(inode); |
1290 | 1305 | ||
1306 | nfs_inode_attrs_handle_layoutcommit(inode, fattr); | ||
1307 | |||
1291 | if (nfs_inode_attrs_need_update(inode, fattr)) | 1308 | if (nfs_inode_attrs_need_update(inode, fattr)) |
1292 | ret = nfs_update_inode(inode, fattr); | 1309 | ret = nfs_update_inode(inode, fattr); |
1293 | else | 1310 | else |
@@ -1434,7 +1451,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1434 | unsigned long now = jiffies; | 1451 | unsigned long now = jiffies; |
1435 | unsigned long save_cache_validity; | 1452 | unsigned long save_cache_validity; |
1436 | 1453 | ||
1437 | dfprintk(VFS, "NFS: %s(%s/%ld fh_crc=0x%08x ct=%d info=0x%x)\n", | 1454 | dfprintk(VFS, "NFS: %s(%s/%lu fh_crc=0x%08x ct=%d info=0x%x)\n", |
1438 | __func__, inode->i_sb->s_id, inode->i_ino, | 1455 | __func__, inode->i_sb->s_id, inode->i_ino, |
1439 | nfs_display_fhandle_hash(NFS_FH(inode)), | 1456 | nfs_display_fhandle_hash(NFS_FH(inode)), |
1440 | atomic_read(&inode->i_count), fattr->valid); | 1457 | atomic_read(&inode->i_count), fattr->valid); |
@@ -1455,7 +1472,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1455 | /* | 1472 | /* |
1456 | * Big trouble! The inode has become a different object. | 1473 | * Big trouble! The inode has become a different object. |
1457 | */ | 1474 | */ |
1458 | printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n", | 1475 | printk(KERN_DEBUG "NFS: %s: inode %lu mode changed, %07o to %07o\n", |
1459 | __func__, inode->i_ino, inode->i_mode, fattr->mode); | 1476 | __func__, inode->i_ino, inode->i_mode, fattr->mode); |
1460 | goto out_err; | 1477 | goto out_err; |
1461 | } | 1478 | } |
@@ -1517,8 +1534,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) | |||
1517 | if (new_isize != cur_isize) { | 1534 | if (new_isize != cur_isize) { |
1518 | /* Do we perhaps have any outstanding writes, or has | 1535 | /* Do we perhaps have any outstanding writes, or has |
1519 | * the file grown beyond our last write? */ | 1536 | * the file grown beyond our last write? */ |
1520 | if ((nfsi->npages == 0 && !test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) || | 1537 | if ((nfsi->npages == 0) || new_isize > cur_isize) { |
1521 | new_isize > cur_isize) { | ||
1522 | i_size_write(inode, new_isize); | 1538 | i_size_write(inode, new_isize); |
1523 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; | 1539 | invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; |
1524 | } | 1540 | } |
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index b4a160a405ce..73d4ecda1e36 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/sunrpc/auth.h> | 10 | #include <linux/sunrpc/auth.h> |
11 | #include <linux/sunrpc/xprt.h> | 11 | #include <linux/sunrpc/xprt.h> |
12 | #include <linux/sunrpc/bc_xprt.h> | 12 | #include <linux/sunrpc/bc_xprt.h> |
13 | #include <linux/sunrpc/rpc_pipe_fs.h> | ||
13 | #include "internal.h" | 14 | #include "internal.h" |
14 | #include "callback.h" | 15 | #include "callback.h" |
15 | #include "delegation.h" | 16 | #include "delegation.h" |
@@ -370,7 +371,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, | |||
370 | __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags); | 371 | __set_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags); |
371 | __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); | 372 | __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags); |
372 | __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); | 373 | __set_bit(NFS_CS_NO_RETRANS_TIMEOUT, &clp->cl_flags); |
373 | error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_GSS_KRB5I); | 374 | |
375 | error = -EINVAL; | ||
376 | if (gssd_running(clp->cl_net)) | ||
377 | error = nfs_create_rpc_client(clp, timeparms, | ||
378 | RPC_AUTH_GSS_KRB5I); | ||
374 | if (error == -EINVAL) | 379 | if (error == -EINVAL) |
375 | error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX); | 380 | error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX); |
376 | if (error < 0) | 381 | if (error < 0) |
@@ -409,13 +414,11 @@ struct nfs_client *nfs4_init_client(struct nfs_client *clp, | |||
409 | error = nfs4_discover_server_trunking(clp, &old); | 414 | error = nfs4_discover_server_trunking(clp, &old); |
410 | if (error < 0) | 415 | if (error < 0) |
411 | goto error; | 416 | goto error; |
412 | nfs_put_client(clp); | ||
413 | if (clp != old) { | ||
414 | clp->cl_preserve_clid = true; | ||
415 | clp = old; | ||
416 | } | ||
417 | 417 | ||
418 | return clp; | 418 | if (clp != old) |
419 | clp->cl_preserve_clid = true; | ||
420 | nfs_put_client(clp); | ||
421 | return old; | ||
419 | 422 | ||
420 | error: | 423 | error: |
421 | nfs_mark_client_ready(clp, error); | 424 | nfs_mark_client_ready(clp, error); |
@@ -493,9 +496,10 @@ int nfs40_walk_client_list(struct nfs_client *new, | |||
493 | prev = pos; | 496 | prev = pos; |
494 | 497 | ||
495 | status = nfs_wait_client_init_complete(pos); | 498 | status = nfs_wait_client_init_complete(pos); |
496 | spin_lock(&nn->nfs_client_lock); | ||
497 | if (status < 0) | 499 | if (status < 0) |
498 | continue; | 500 | goto out; |
501 | status = -NFS4ERR_STALE_CLIENTID; | ||
502 | spin_lock(&nn->nfs_client_lock); | ||
499 | } | 503 | } |
500 | if (pos->cl_cons_state != NFS_CS_READY) | 504 | if (pos->cl_cons_state != NFS_CS_READY) |
501 | continue; | 505 | continue; |
@@ -633,7 +637,8 @@ int nfs41_walk_client_list(struct nfs_client *new, | |||
633 | } | 637 | } |
634 | spin_lock(&nn->nfs_client_lock); | 638 | spin_lock(&nn->nfs_client_lock); |
635 | if (status < 0) | 639 | if (status < 0) |
636 | continue; | 640 | break; |
641 | status = -NFS4ERR_STALE_CLIENTID; | ||
637 | } | 642 | } |
638 | if (pos->cl_cons_state != NFS_CS_READY) | 643 | if (pos->cl_cons_state != NFS_CS_READY) |
639 | continue; | 644 | continue; |
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index b86464ba25e1..03fd8be8c0c5 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c | |||
@@ -91,10 +91,10 @@ static void filelayout_reset_write(struct nfs_write_data *data) | |||
91 | 91 | ||
92 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 92 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
93 | dprintk("%s Reset task %5u for i/o through MDS " | 93 | dprintk("%s Reset task %5u for i/o through MDS " |
94 | "(req %s/%lld, %u bytes @ offset %llu)\n", __func__, | 94 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, |
95 | data->task.tk_pid, | 95 | data->task.tk_pid, |
96 | hdr->inode->i_sb->s_id, | 96 | hdr->inode->i_sb->s_id, |
97 | (long long)NFS_FILEID(hdr->inode), | 97 | (unsigned long long)NFS_FILEID(hdr->inode), |
98 | data->args.count, | 98 | data->args.count, |
99 | (unsigned long long)data->args.offset); | 99 | (unsigned long long)data->args.offset); |
100 | 100 | ||
@@ -112,10 +112,10 @@ static void filelayout_reset_read(struct nfs_read_data *data) | |||
112 | 112 | ||
113 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { | 113 | if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) { |
114 | dprintk("%s Reset task %5u for i/o through MDS " | 114 | dprintk("%s Reset task %5u for i/o through MDS " |
115 | "(req %s/%lld, %u bytes @ offset %llu)\n", __func__, | 115 | "(req %s/%llu, %u bytes @ offset %llu)\n", __func__, |
116 | data->task.tk_pid, | 116 | data->task.tk_pid, |
117 | hdr->inode->i_sb->s_id, | 117 | hdr->inode->i_sb->s_id, |
118 | (long long)NFS_FILEID(hdr->inode), | 118 | (unsigned long long)NFS_FILEID(hdr->inode), |
119 | data->args.count, | 119 | data->args.count, |
120 | (unsigned long long)data->args.offset); | 120 | (unsigned long long)data->args.offset); |
121 | 121 | ||
@@ -1216,17 +1216,17 @@ static void filelayout_recover_commit_reqs(struct list_head *dst, | |||
1216 | struct pnfs_commit_bucket *b; | 1216 | struct pnfs_commit_bucket *b; |
1217 | int i; | 1217 | int i; |
1218 | 1218 | ||
1219 | /* NOTE cinfo->lock is NOT held, relying on fact that this is | 1219 | spin_lock(cinfo->lock); |
1220 | * only called on single thread per dreq. | ||
1221 | * Can't take the lock because need to do pnfs_put_lseg | ||
1222 | */ | ||
1223 | for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { | 1220 | for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) { |
1224 | if (transfer_commit_list(&b->written, dst, cinfo, 0)) { | 1221 | if (transfer_commit_list(&b->written, dst, cinfo, 0)) { |
1222 | spin_unlock(cinfo->lock); | ||
1225 | pnfs_put_lseg(b->wlseg); | 1223 | pnfs_put_lseg(b->wlseg); |
1226 | b->wlseg = NULL; | 1224 | b->wlseg = NULL; |
1225 | spin_lock(cinfo->lock); | ||
1227 | } | 1226 | } |
1228 | } | 1227 | } |
1229 | cinfo->ds->nwritten = 0; | 1228 | cinfo->ds->nwritten = 0; |
1229 | spin_unlock(cinfo->lock); | ||
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static unsigned int | 1232 | static unsigned int |
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c index c7c295e556ed..efac602edb37 100644 --- a/fs/nfs/nfs4filelayoutdev.c +++ b/fs/nfs/nfs4filelayoutdev.c | |||
@@ -95,7 +95,7 @@ same_sockaddr(struct sockaddr *addr1, struct sockaddr *addr2) | |||
95 | b6 = (struct sockaddr_in6 *)addr2; | 95 | b6 = (struct sockaddr_in6 *)addr2; |
96 | 96 | ||
97 | /* LINKLOCAL addresses must have matching scope_id */ | 97 | /* LINKLOCAL addresses must have matching scope_id */ |
98 | if (ipv6_addr_scope(&a6->sin6_addr) == | 98 | if (ipv6_addr_src_scope(&a6->sin6_addr) == |
99 | IPV6_ADDR_SCOPE_LINKLOCAL && | 99 | IPV6_ADDR_SCOPE_LINKLOCAL && |
100 | a6->sin6_scope_id != b6->sin6_scope_id) | 100 | a6->sin6_scope_id != b6->sin6_scope_id) |
101 | return false; | 101 | return false; |
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 15052b81df42..a1965329a12c 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c | |||
@@ -7409,9 +7409,9 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) | |||
7409 | struct nfs_server *server = NFS_SERVER(inode); | 7409 | struct nfs_server *server = NFS_SERVER(inode); |
7410 | struct pnfs_layout_hdr *lo; | 7410 | struct pnfs_layout_hdr *lo; |
7411 | struct nfs4_state *state = NULL; | 7411 | struct nfs4_state *state = NULL; |
7412 | unsigned long timeo, giveup; | 7412 | unsigned long timeo, now, giveup; |
7413 | 7413 | ||
7414 | dprintk("--> %s\n", __func__); | 7414 | dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status); |
7415 | 7415 | ||
7416 | if (!nfs41_sequence_done(task, &lgp->res.seq_res)) | 7416 | if (!nfs41_sequence_done(task, &lgp->res.seq_res)) |
7417 | goto out; | 7417 | goto out; |
@@ -7419,12 +7419,38 @@ static void nfs4_layoutget_done(struct rpc_task *task, void *calldata) | |||
7419 | switch (task->tk_status) { | 7419 | switch (task->tk_status) { |
7420 | case 0: | 7420 | case 0: |
7421 | goto out; | 7421 | goto out; |
7422 | /* | ||
7423 | * NFS4ERR_LAYOUTTRYLATER is a conflict with another client | ||
7424 | * (or clients) writing to the same RAID stripe | ||
7425 | */ | ||
7422 | case -NFS4ERR_LAYOUTTRYLATER: | 7426 | case -NFS4ERR_LAYOUTTRYLATER: |
7427 | /* | ||
7428 | * NFS4ERR_RECALLCONFLICT is when conflict with self (must recall | ||
7429 | * existing layout before getting a new one). | ||
7430 | */ | ||
7423 | case -NFS4ERR_RECALLCONFLICT: | 7431 | case -NFS4ERR_RECALLCONFLICT: |
7424 | timeo = rpc_get_timeout(task->tk_client); | 7432 | timeo = rpc_get_timeout(task->tk_client); |
7425 | giveup = lgp->args.timestamp + timeo; | 7433 | giveup = lgp->args.timestamp + timeo; |
7426 | if (time_after(giveup, jiffies)) | 7434 | now = jiffies; |
7427 | task->tk_status = -NFS4ERR_DELAY; | 7435 | if (time_after(giveup, now)) { |
7436 | unsigned long delay; | ||
7437 | |||
7438 | /* Delay for: | ||
7439 | * - Not less then NFS4_POLL_RETRY_MIN. | ||
7440 | * - One last time a jiffie before we give up | ||
7441 | * - exponential backoff (time_now minus start_attempt) | ||
7442 | */ | ||
7443 | delay = max_t(unsigned long, NFS4_POLL_RETRY_MIN, | ||
7444 | min((giveup - now - 1), | ||
7445 | now - lgp->args.timestamp)); | ||
7446 | |||
7447 | dprintk("%s: NFS4ERR_RECALLCONFLICT waiting %lu\n", | ||
7448 | __func__, delay); | ||
7449 | rpc_delay(task, delay); | ||
7450 | task->tk_status = 0; | ||
7451 | rpc_restart_call_prepare(task); | ||
7452 | goto out; /* Do not call nfs4_async_handle_error() */ | ||
7453 | } | ||
7428 | break; | 7454 | break; |
7429 | case -NFS4ERR_EXPIRED: | 7455 | case -NFS4ERR_EXPIRED: |
7430 | case -NFS4ERR_BAD_STATEID: | 7456 | case -NFS4ERR_BAD_STATEID: |
@@ -7780,10 +7806,7 @@ nfs4_layoutcommit_done(struct rpc_task *task, void *calldata) | |||
7780 | case -NFS4ERR_BADLAYOUT: /* no layout */ | 7806 | case -NFS4ERR_BADLAYOUT: /* no layout */ |
7781 | case -NFS4ERR_GRACE: /* loca_recalim always false */ | 7807 | case -NFS4ERR_GRACE: /* loca_recalim always false */ |
7782 | task->tk_status = 0; | 7808 | task->tk_status = 0; |
7783 | break; | ||
7784 | case 0: | 7809 | case 0: |
7785 | nfs_post_op_update_inode_force_wcc(data->args.inode, | ||
7786 | data->res.fattr); | ||
7787 | break; | 7810 | break; |
7788 | default: | 7811 | default: |
7789 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { | 7812 | if (nfs4_async_handle_error(task, server, NULL) == -EAGAIN) { |
@@ -7798,6 +7821,8 @@ static void nfs4_layoutcommit_release(void *calldata) | |||
7798 | struct nfs4_layoutcommit_data *data = calldata; | 7821 | struct nfs4_layoutcommit_data *data = calldata; |
7799 | 7822 | ||
7800 | pnfs_cleanup_layoutcommit(data); | 7823 | pnfs_cleanup_layoutcommit(data); |
7824 | nfs_post_op_update_inode_force_wcc(data->args.inode, | ||
7825 | data->res.fattr); | ||
7801 | put_rpccred(data->cred); | 7826 | put_rpccred(data->cred); |
7802 | kfree(data); | 7827 | kfree(data); |
7803 | } | 7828 | } |
@@ -7920,7 +7945,7 @@ nfs41_proc_secinfo_no_name(struct nfs_server *server, struct nfs_fh *fhandle, | |||
7920 | switch (err) { | 7945 | switch (err) { |
7921 | case 0: | 7946 | case 0: |
7922 | case -NFS4ERR_WRONGSEC: | 7947 | case -NFS4ERR_WRONGSEC: |
7923 | case -NFS4ERR_NOTSUPP: | 7948 | case -ENOTSUPP: |
7924 | goto out; | 7949 | goto out; |
7925 | default: | 7950 | default: |
7926 | err = nfs4_handle_exception(server, err, &exception); | 7951 | err = nfs4_handle_exception(server, err, &exception); |
@@ -7954,7 +7979,7 @@ nfs41_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle, | |||
7954 | * Fall back on "guess and check" method if | 7979 | * Fall back on "guess and check" method if |
7955 | * the server doesn't support SECINFO_NO_NAME | 7980 | * the server doesn't support SECINFO_NO_NAME |
7956 | */ | 7981 | */ |
7957 | if (err == -NFS4ERR_WRONGSEC || err == -NFS4ERR_NOTSUPP) { | 7982 | if (err == -NFS4ERR_WRONGSEC || err == -ENOTSUPP) { |
7958 | err = nfs4_find_root_sec(server, fhandle, info); | 7983 | err = nfs4_find_root_sec(server, fhandle, info); |
7959 | goto out_freepage; | 7984 | goto out_freepage; |
7960 | } | 7985 | } |
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index 059c01b67a71..e5be72518bd7 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c | |||
@@ -1071,7 +1071,7 @@ void nfs_free_seqid(struct nfs_seqid *seqid) | |||
1071 | /* | 1071 | /* |
1072 | * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or | 1072 | * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or |
1073 | * failed with a seqid incrementing error - | 1073 | * failed with a seqid incrementing error - |
1074 | * see comments nfs_fs.h:seqid_mutating_error() | 1074 | * see comments nfs4.h:seqid_mutating_error() |
1075 | */ | 1075 | */ |
1076 | static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) | 1076 | static void nfs_increment_seqid(int status, struct nfs_seqid *seqid) |
1077 | { | 1077 | { |
@@ -1116,7 +1116,7 @@ void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid) | |||
1116 | /* | 1116 | /* |
1117 | * Increment the seqid if the LOCK/LOCKU succeeded, or | 1117 | * Increment the seqid if the LOCK/LOCKU succeeded, or |
1118 | * failed with a seqid incrementing error - | 1118 | * failed with a seqid incrementing error - |
1119 | * see comments nfs_fs.h:seqid_mutating_error() | 1119 | * see comments nfs4.h:seqid_mutating_error() |
1120 | */ | 1120 | */ |
1121 | void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) | 1121 | void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid) |
1122 | { | 1122 | { |
diff --git a/fs/nfs/nfs4super.c b/fs/nfs/nfs4super.c index 65ab0a0ca1c4..808f29574412 100644 --- a/fs/nfs/nfs4super.c +++ b/fs/nfs/nfs4super.c | |||
@@ -77,17 +77,9 @@ static int nfs4_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
77 | { | 77 | { |
78 | int ret = nfs_write_inode(inode, wbc); | 78 | int ret = nfs_write_inode(inode, wbc); |
79 | 79 | ||
80 | if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) { | 80 | if (ret == 0) |
81 | int status; | 81 | ret = pnfs_layoutcommit_inode(inode, |
82 | bool sync = true; | 82 | wbc->sync_mode == WB_SYNC_ALL); |
83 | |||
84 | if (wbc->sync_mode == WB_SYNC_NONE) | ||
85 | sync = false; | ||
86 | |||
87 | status = pnfs_layoutcommit_inode(inode, sync); | ||
88 | if (status < 0) | ||
89 | return status; | ||
90 | } | ||
91 | return ret; | 83 | return ret; |
92 | } | 84 | } |
93 | 85 | ||
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c index 5be2868c02f1..8c21d69a9dc1 100644 --- a/fs/nfs/nfs4xdr.c +++ b/fs/nfs/nfs4xdr.c | |||
@@ -3097,7 +3097,8 @@ out_overflow: | |||
3097 | return -EIO; | 3097 | return -EIO; |
3098 | } | 3098 | } |
3099 | 3099 | ||
3100 | static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) | 3100 | static bool __decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected, |
3101 | int *nfs_retval) | ||
3101 | { | 3102 | { |
3102 | __be32 *p; | 3103 | __be32 *p; |
3103 | uint32_t opnum; | 3104 | uint32_t opnum; |
@@ -3107,19 +3108,32 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) | |||
3107 | if (unlikely(!p)) | 3108 | if (unlikely(!p)) |
3108 | goto out_overflow; | 3109 | goto out_overflow; |
3109 | opnum = be32_to_cpup(p++); | 3110 | opnum = be32_to_cpup(p++); |
3110 | if (opnum != expected) { | 3111 | if (unlikely(opnum != expected)) |
3111 | dprintk("nfs: Server returned operation" | 3112 | goto out_bad_operation; |
3112 | " %d but we issued a request for %d\n", | ||
3113 | opnum, expected); | ||
3114 | return -EIO; | ||
3115 | } | ||
3116 | nfserr = be32_to_cpup(p); | 3113 | nfserr = be32_to_cpup(p); |
3117 | if (nfserr != NFS_OK) | 3114 | if (nfserr == NFS_OK) |
3118 | return nfs4_stat_to_errno(nfserr); | 3115 | *nfs_retval = 0; |
3119 | return 0; | 3116 | else |
3117 | *nfs_retval = nfs4_stat_to_errno(nfserr); | ||
3118 | return true; | ||
3119 | out_bad_operation: | ||
3120 | dprintk("nfs: Server returned operation" | ||
3121 | " %d but we issued a request for %d\n", | ||
3122 | opnum, expected); | ||
3123 | *nfs_retval = -EREMOTEIO; | ||
3124 | return false; | ||
3120 | out_overflow: | 3125 | out_overflow: |
3121 | print_overflow_msg(__func__, xdr); | 3126 | print_overflow_msg(__func__, xdr); |
3122 | return -EIO; | 3127 | *nfs_retval = -EIO; |
3128 | return false; | ||
3129 | } | ||
3130 | |||
3131 | static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected) | ||
3132 | { | ||
3133 | int retval; | ||
3134 | |||
3135 | __decode_op_hdr(xdr, expected, &retval); | ||
3136 | return retval; | ||
3123 | } | 3137 | } |
3124 | 3138 | ||
3125 | /* Dummy routine */ | 3139 | /* Dummy routine */ |
@@ -5001,11 +5015,12 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res) | |||
5001 | uint32_t savewords, bmlen, i; | 5015 | uint32_t savewords, bmlen, i; |
5002 | int status; | 5016 | int status; |
5003 | 5017 | ||
5004 | status = decode_op_hdr(xdr, OP_OPEN); | 5018 | if (!__decode_op_hdr(xdr, OP_OPEN, &status)) |
5005 | if (status != -EIO) | 5019 | return status; |
5006 | nfs_increment_open_seqid(status, res->seqid); | 5020 | nfs_increment_open_seqid(status, res->seqid); |
5007 | if (!status) | 5021 | if (status) |
5008 | status = decode_stateid(xdr, &res->stateid); | 5022 | return status; |
5023 | status = decode_stateid(xdr, &res->stateid); | ||
5009 | if (unlikely(status)) | 5024 | if (unlikely(status)) |
5010 | return status; | 5025 | return status; |
5011 | 5026 | ||
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c index d75d938d36cb..4755858e37a0 100644 --- a/fs/nfs/pnfs.c +++ b/fs/nfs/pnfs.c | |||
@@ -1790,6 +1790,15 @@ pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) | |||
1790 | } | 1790 | } |
1791 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); | 1791 | EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); |
1792 | 1792 | ||
1793 | static void pnfs_clear_layoutcommitting(struct inode *inode) | ||
1794 | { | ||
1795 | unsigned long *bitlock = &NFS_I(inode)->flags; | ||
1796 | |||
1797 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | ||
1798 | smp_mb__after_clear_bit(); | ||
1799 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); | ||
1800 | } | ||
1801 | |||
1793 | /* | 1802 | /* |
1794 | * There can be multiple RW segments. | 1803 | * There can be multiple RW segments. |
1795 | */ | 1804 | */ |
@@ -1807,7 +1816,6 @@ static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp) | |||
1807 | static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) | 1816 | static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp) |
1808 | { | 1817 | { |
1809 | struct pnfs_layout_segment *lseg, *tmp; | 1818 | struct pnfs_layout_segment *lseg, *tmp; |
1810 | unsigned long *bitlock = &NFS_I(inode)->flags; | ||
1811 | 1819 | ||
1812 | /* Matched by references in pnfs_set_layoutcommit */ | 1820 | /* Matched by references in pnfs_set_layoutcommit */ |
1813 | list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { | 1821 | list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) { |
@@ -1815,9 +1823,7 @@ static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *lis | |||
1815 | pnfs_put_lseg(lseg); | 1823 | pnfs_put_lseg(lseg); |
1816 | } | 1824 | } |
1817 | 1825 | ||
1818 | clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock); | 1826 | pnfs_clear_layoutcommitting(inode); |
1819 | smp_mb__after_clear_bit(); | ||
1820 | wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING); | ||
1821 | } | 1827 | } |
1822 | 1828 | ||
1823 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) | 1829 | void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg) |
@@ -1881,43 +1887,37 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync) | |||
1881 | struct nfs4_layoutcommit_data *data; | 1887 | struct nfs4_layoutcommit_data *data; |
1882 | struct nfs_inode *nfsi = NFS_I(inode); | 1888 | struct nfs_inode *nfsi = NFS_I(inode); |
1883 | loff_t end_pos; | 1889 | loff_t end_pos; |
1884 | int status = 0; | 1890 | int status; |
1885 | 1891 | ||
1886 | dprintk("--> %s inode %lu\n", __func__, inode->i_ino); | 1892 | if (!pnfs_layoutcommit_outstanding(inode)) |
1887 | |||
1888 | if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) | ||
1889 | return 0; | 1893 | return 0; |
1890 | 1894 | ||
1891 | /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ | 1895 | dprintk("--> %s inode %lu\n", __func__, inode->i_ino); |
1892 | data = kzalloc(sizeof(*data), GFP_NOFS); | ||
1893 | if (!data) { | ||
1894 | status = -ENOMEM; | ||
1895 | goto out; | ||
1896 | } | ||
1897 | |||
1898 | if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) | ||
1899 | goto out_free; | ||
1900 | 1896 | ||
1897 | status = -EAGAIN; | ||
1901 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { | 1898 | if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) { |
1902 | if (!sync) { | 1899 | if (!sync) |
1903 | status = -EAGAIN; | 1900 | goto out; |
1904 | goto out_free; | 1901 | status = wait_on_bit_lock(&nfsi->flags, |
1905 | } | 1902 | NFS_INO_LAYOUTCOMMITTING, |
1906 | status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING, | 1903 | nfs_wait_bit_killable, |
1907 | nfs_wait_bit_killable, TASK_KILLABLE); | 1904 | TASK_KILLABLE); |
1908 | if (status) | 1905 | if (status) |
1909 | goto out_free; | 1906 | goto out; |
1910 | } | 1907 | } |
1911 | 1908 | ||
1912 | INIT_LIST_HEAD(&data->lseg_list); | 1909 | status = -ENOMEM; |
1910 | /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */ | ||
1911 | data = kzalloc(sizeof(*data), GFP_NOFS); | ||
1912 | if (!data) | ||
1913 | goto clear_layoutcommitting; | ||
1914 | |||
1915 | status = 0; | ||
1913 | spin_lock(&inode->i_lock); | 1916 | spin_lock(&inode->i_lock); |
1914 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { | 1917 | if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) |
1915 | clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags); | 1918 | goto out_unlock; |
1916 | spin_unlock(&inode->i_lock); | ||
1917 | wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING); | ||
1918 | goto out_free; | ||
1919 | } | ||
1920 | 1919 | ||
1920 | INIT_LIST_HEAD(&data->lseg_list); | ||
1921 | pnfs_list_write_lseg(inode, &data->lseg_list); | 1921 | pnfs_list_write_lseg(inode, &data->lseg_list); |
1922 | 1922 | ||
1923 | end_pos = nfsi->layout->plh_lwb; | 1923 | end_pos = nfsi->layout->plh_lwb; |
@@ -1940,8 +1940,11 @@ out: | |||
1940 | mark_inode_dirty_sync(inode); | 1940 | mark_inode_dirty_sync(inode); |
1941 | dprintk("<-- %s status %d\n", __func__, status); | 1941 | dprintk("<-- %s status %d\n", __func__, status); |
1942 | return status; | 1942 | return status; |
1943 | out_free: | 1943 | out_unlock: |
1944 | spin_unlock(&inode->i_lock); | ||
1944 | kfree(data); | 1945 | kfree(data); |
1946 | clear_layoutcommitting: | ||
1947 | pnfs_clear_layoutcommitting(inode); | ||
1945 | goto out; | 1948 | goto out; |
1946 | } | 1949 | } |
1947 | 1950 | ||
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h index a4f41810a7f4..023793909778 100644 --- a/fs/nfs/pnfs.h +++ b/fs/nfs/pnfs.h | |||
@@ -359,6 +359,15 @@ pnfs_ld_layoutret_on_setattr(struct inode *inode) | |||
359 | PNFS_LAYOUTRET_ON_SETATTR; | 359 | PNFS_LAYOUTRET_ON_SETATTR; |
360 | } | 360 | } |
361 | 361 | ||
362 | static inline bool | ||
363 | pnfs_layoutcommit_outstanding(struct inode *inode) | ||
364 | { | ||
365 | struct nfs_inode *nfsi = NFS_I(inode); | ||
366 | |||
367 | return test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags) != 0 || | ||
368 | test_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags) != 0; | ||
369 | } | ||
370 | |||
362 | static inline int pnfs_return_layout(struct inode *ino) | 371 | static inline int pnfs_return_layout(struct inode *ino) |
363 | { | 372 | { |
364 | struct nfs_inode *nfsi = NFS_I(ino); | 373 | struct nfs_inode *nfsi = NFS_I(ino); |
@@ -515,6 +524,13 @@ pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, | |||
515 | return false; | 524 | return false; |
516 | } | 525 | } |
517 | 526 | ||
527 | static inline bool | ||
528 | pnfs_layoutcommit_outstanding(struct inode *inode) | ||
529 | { | ||
530 | return false; | ||
531 | } | ||
532 | |||
533 | |||
518 | static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) | 534 | static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void) |
519 | { | 535 | { |
520 | return NULL; | 536 | return NULL; |
diff --git a/fs/nfs/read.c b/fs/nfs/read.c index 31db5c366b81..411aedda14bb 100644 --- a/fs/nfs/read.c +++ b/fs/nfs/read.c | |||
@@ -163,9 +163,9 @@ static void nfs_readpage_release(struct nfs_page *req) | |||
163 | 163 | ||
164 | unlock_page(req->wb_page); | 164 | unlock_page(req->wb_page); |
165 | 165 | ||
166 | dprintk("NFS: read done (%s/%Ld %d@%Ld)\n", | 166 | dprintk("NFS: read done (%s/%Lu %d@%Ld)\n", |
167 | req->wb_context->dentry->d_inode->i_sb->s_id, | 167 | req->wb_context->dentry->d_inode->i_sb->s_id, |
168 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 168 | (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
169 | req->wb_bytes, | 169 | req->wb_bytes, |
170 | (long long)req_offset(req)); | 170 | (long long)req_offset(req)); |
171 | nfs_release_request(req); | 171 | nfs_release_request(req); |
@@ -228,11 +228,11 @@ int nfs_initiate_read(struct rpc_clnt *clnt, | |||
228 | /* Set up the initial task struct. */ | 228 | /* Set up the initial task struct. */ |
229 | NFS_PROTO(inode)->read_setup(data, &msg); | 229 | NFS_PROTO(inode)->read_setup(data, &msg); |
230 | 230 | ||
231 | dprintk("NFS: %5u initiated read call (req %s/%lld, %u bytes @ " | 231 | dprintk("NFS: %5u initiated read call (req %s/%llu, %u bytes @ " |
232 | "offset %llu)\n", | 232 | "offset %llu)\n", |
233 | data->task.tk_pid, | 233 | data->task.tk_pid, |
234 | inode->i_sb->s_id, | 234 | inode->i_sb->s_id, |
235 | (long long)NFS_FILEID(inode), | 235 | (unsigned long long)NFS_FILEID(inode), |
236 | data->args.count, | 236 | data->args.count, |
237 | (unsigned long long)data->args.offset); | 237 | (unsigned long long)data->args.offset); |
238 | 238 | ||
@@ -630,9 +630,9 @@ int nfs_readpages(struct file *filp, struct address_space *mapping, | |||
630 | unsigned long npages; | 630 | unsigned long npages; |
631 | int ret = -ESTALE; | 631 | int ret = -ESTALE; |
632 | 632 | ||
633 | dprintk("NFS: nfs_readpages (%s/%Ld %d)\n", | 633 | dprintk("NFS: nfs_readpages (%s/%Lu %d)\n", |
634 | inode->i_sb->s_id, | 634 | inode->i_sb->s_id, |
635 | (long long)NFS_FILEID(inode), | 635 | (unsigned long long)NFS_FILEID(inode), |
636 | nr_pages); | 636 | nr_pages); |
637 | nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); | 637 | nfs_inc_stats(inode, NFSIOS_VFSREADPAGES); |
638 | 638 | ||
diff --git a/fs/nfs/write.c b/fs/nfs/write.c index c1d548211c31..a44a87268a6e 100644 --- a/fs/nfs/write.c +++ b/fs/nfs/write.c | |||
@@ -922,19 +922,20 @@ out: | |||
922 | * extend the write to cover the entire page in order to avoid fragmentation | 922 | * extend the write to cover the entire page in order to avoid fragmentation |
923 | * inefficiencies. | 923 | * inefficiencies. |
924 | * | 924 | * |
925 | * If the file is opened for synchronous writes or if we have a write delegation | 925 | * If the file is opened for synchronous writes then we can just skip the rest |
926 | * from the server then we can just skip the rest of the checks. | 926 | * of the checks. |
927 | */ | 927 | */ |
928 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) | 928 | static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) |
929 | { | 929 | { |
930 | if (file->f_flags & O_DSYNC) | 930 | if (file->f_flags & O_DSYNC) |
931 | return 0; | 931 | return 0; |
932 | if (!nfs_write_pageuptodate(page, inode)) | ||
933 | return 0; | ||
932 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) | 934 | if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) |
933 | return 1; | 935 | return 1; |
934 | if (nfs_write_pageuptodate(page, inode) && (inode->i_flock == NULL || | 936 | if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && |
935 | (inode->i_flock->fl_start == 0 && | ||
936 | inode->i_flock->fl_end == OFFSET_MAX && | 937 | inode->i_flock->fl_end == OFFSET_MAX && |
937 | inode->i_flock->fl_type != F_RDLCK))) | 938 | inode->i_flock->fl_type != F_RDLCK)) |
938 | return 1; | 939 | return 1; |
939 | return 0; | 940 | return 0; |
940 | } | 941 | } |
@@ -1013,10 +1014,10 @@ int nfs_initiate_write(struct rpc_clnt *clnt, | |||
1013 | NFS_PROTO(inode)->write_setup(data, &msg); | 1014 | NFS_PROTO(inode)->write_setup(data, &msg); |
1014 | 1015 | ||
1015 | dprintk("NFS: %5u initiated write call " | 1016 | dprintk("NFS: %5u initiated write call " |
1016 | "(req %s/%lld, %u bytes @ offset %llu)\n", | 1017 | "(req %s/%llu, %u bytes @ offset %llu)\n", |
1017 | data->task.tk_pid, | 1018 | data->task.tk_pid, |
1018 | inode->i_sb->s_id, | 1019 | inode->i_sb->s_id, |
1019 | (long long)NFS_FILEID(inode), | 1020 | (unsigned long long)NFS_FILEID(inode), |
1020 | data->args.count, | 1021 | data->args.count, |
1021 | (unsigned long long)data->args.offset); | 1022 | (unsigned long long)data->args.offset); |
1022 | 1023 | ||
@@ -1606,9 +1607,9 @@ static void nfs_commit_release_pages(struct nfs_commit_data *data) | |||
1606 | nfs_list_remove_request(req); | 1607 | nfs_list_remove_request(req); |
1607 | nfs_clear_page_commit(req->wb_page); | 1608 | nfs_clear_page_commit(req->wb_page); |
1608 | 1609 | ||
1609 | dprintk("NFS: commit (%s/%lld %d@%lld)", | 1610 | dprintk("NFS: commit (%s/%llu %d@%lld)", |
1610 | req->wb_context->dentry->d_sb->s_id, | 1611 | req->wb_context->dentry->d_sb->s_id, |
1611 | (long long)NFS_FILEID(req->wb_context->dentry->d_inode), | 1612 | (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), |
1612 | req->wb_bytes, | 1613 | req->wb_bytes, |
1613 | (long long)req_offset(req)); | 1614 | (long long)req_offset(req)); |
1614 | if (status < 0) { | 1615 | if (status < 0) { |
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h index a353e0300b54..7f490bef9e99 100644 --- a/include/linux/sunrpc/rpc_pipe_fs.h +++ b/include/linux/sunrpc/rpc_pipe_fs.h | |||
@@ -84,7 +84,8 @@ enum { | |||
84 | 84 | ||
85 | extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, | 85 | extern struct dentry *rpc_d_lookup_sb(const struct super_block *sb, |
86 | const unsigned char *dir_name); | 86 | const unsigned char *dir_name); |
87 | extern void rpc_pipefs_init_net(struct net *net); | 87 | extern int rpc_pipefs_init_net(struct net *net); |
88 | extern void rpc_pipefs_exit_net(struct net *net); | ||
88 | extern struct super_block *rpc_get_sb_net(const struct net *net); | 89 | extern struct super_block *rpc_get_sb_net(const struct net *net); |
89 | extern void rpc_put_sb_net(const struct net *net); | 90 | extern void rpc_put_sb_net(const struct net *net); |
90 | 91 | ||
@@ -130,5 +131,7 @@ extern int rpc_unlink(struct dentry *); | |||
130 | extern int register_rpc_pipefs(void); | 131 | extern int register_rpc_pipefs(void); |
131 | extern void unregister_rpc_pipefs(void); | 132 | extern void unregister_rpc_pipefs(void); |
132 | 133 | ||
134 | extern bool gssd_running(struct net *net); | ||
135 | |||
133 | #endif | 136 | #endif |
134 | #endif | 137 | #endif |
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index d51d16c7afd8..ddc179b7a105 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h | |||
@@ -301,6 +301,7 @@ DECLARE_EVENT_CLASS(xs_socket_event_done, | |||
301 | 301 | ||
302 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); | 302 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_state_change); |
303 | DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); | 303 | DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_connect); |
304 | DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_error); | ||
304 | DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); | 305 | DEFINE_RPC_SOCKET_EVENT_DONE(rpc_socket_reset_connection); |
305 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); | 306 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_close); |
306 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); | 307 | DEFINE_RPC_SOCKET_EVENT(rpc_socket_shutdown); |
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c index 42fdfc634e56..0a2aee060f9f 100644 --- a/net/sunrpc/auth_gss/auth_gss.c +++ b/net/sunrpc/auth_gss/auth_gss.c | |||
@@ -536,8 +536,7 @@ static void warn_gssd(void) | |||
536 | unsigned long now = jiffies; | 536 | unsigned long now = jiffies; |
537 | 537 | ||
538 | if (time_after(now, ratelimit)) { | 538 | if (time_after(now, ratelimit)) { |
539 | printk(KERN_WARNING "RPC: AUTH_GSS upcall timed out.\n" | 539 | pr_warn("RPC: AUTH_GSS upcall failed. Please check user daemon is running.\n"); |
540 | "Please check user daemon is running.\n"); | ||
541 | ratelimit = now + 15*HZ; | 540 | ratelimit = now + 15*HZ; |
542 | } | 541 | } |
543 | } | 542 | } |
@@ -600,7 +599,6 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
600 | struct rpc_pipe *pipe; | 599 | struct rpc_pipe *pipe; |
601 | struct rpc_cred *cred = &gss_cred->gc_base; | 600 | struct rpc_cred *cred = &gss_cred->gc_base; |
602 | struct gss_upcall_msg *gss_msg; | 601 | struct gss_upcall_msg *gss_msg; |
603 | unsigned long timeout; | ||
604 | DEFINE_WAIT(wait); | 602 | DEFINE_WAIT(wait); |
605 | int err; | 603 | int err; |
606 | 604 | ||
@@ -608,17 +606,16 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred) | |||
608 | __func__, from_kuid(&init_user_ns, cred->cr_uid)); | 606 | __func__, from_kuid(&init_user_ns, cred->cr_uid)); |
609 | retry: | 607 | retry: |
610 | err = 0; | 608 | err = 0; |
611 | /* Default timeout is 15s unless we know that gssd is not running */ | 609 | /* if gssd is down, just skip upcalling altogether */ |
612 | timeout = 15 * HZ; | 610 | if (!gssd_running(net)) { |
613 | if (!sn->gssd_running) | 611 | warn_gssd(); |
614 | timeout = HZ >> 2; | 612 | return -EACCES; |
613 | } | ||
615 | gss_msg = gss_setup_upcall(gss_auth, cred); | 614 | gss_msg = gss_setup_upcall(gss_auth, cred); |
616 | if (PTR_ERR(gss_msg) == -EAGAIN) { | 615 | if (PTR_ERR(gss_msg) == -EAGAIN) { |
617 | err = wait_event_interruptible_timeout(pipe_version_waitqueue, | 616 | err = wait_event_interruptible_timeout(pipe_version_waitqueue, |
618 | sn->pipe_version >= 0, timeout); | 617 | sn->pipe_version >= 0, 15 * HZ); |
619 | if (sn->pipe_version < 0) { | 618 | if (sn->pipe_version < 0) { |
620 | if (err == 0) | ||
621 | sn->gssd_running = 0; | ||
622 | warn_gssd(); | 619 | warn_gssd(); |
623 | err = -EACCES; | 620 | err = -EACCES; |
624 | } | 621 | } |
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index f09b7db2c492..0edada973434 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c | |||
@@ -1529,9 +1529,13 @@ call_refreshresult(struct rpc_task *task) | |||
1529 | task->tk_action = call_refresh; | 1529 | task->tk_action = call_refresh; |
1530 | switch (status) { | 1530 | switch (status) { |
1531 | case 0: | 1531 | case 0: |
1532 | if (rpcauth_uptodatecred(task)) | 1532 | if (rpcauth_uptodatecred(task)) { |
1533 | task->tk_action = call_allocate; | 1533 | task->tk_action = call_allocate; |
1534 | return; | 1534 | return; |
1535 | } | ||
1536 | /* Use rate-limiting and a max number of retries if refresh | ||
1537 | * had status 0 but failed to update the cred. | ||
1538 | */ | ||
1535 | case -ETIMEDOUT: | 1539 | case -ETIMEDOUT: |
1536 | rpc_delay(task, 3*HZ); | 1540 | rpc_delay(task, 3*HZ); |
1537 | case -EAGAIN: | 1541 | case -EAGAIN: |
@@ -1729,6 +1733,7 @@ call_bind_status(struct rpc_task *task) | |||
1729 | return; | 1733 | return; |
1730 | case -ECONNREFUSED: /* connection problems */ | 1734 | case -ECONNREFUSED: /* connection problems */ |
1731 | case -ECONNRESET: | 1735 | case -ECONNRESET: |
1736 | case -ECONNABORTED: | ||
1732 | case -ENOTCONN: | 1737 | case -ENOTCONN: |
1733 | case -EHOSTDOWN: | 1738 | case -EHOSTDOWN: |
1734 | case -EHOSTUNREACH: | 1739 | case -EHOSTUNREACH: |
@@ -1799,7 +1804,9 @@ call_connect_status(struct rpc_task *task) | |||
1799 | return; | 1804 | return; |
1800 | case -ECONNREFUSED: | 1805 | case -ECONNREFUSED: |
1801 | case -ECONNRESET: | 1806 | case -ECONNRESET: |
1807 | case -ECONNABORTED: | ||
1802 | case -ENETUNREACH: | 1808 | case -ENETUNREACH: |
1809 | case -EHOSTUNREACH: | ||
1803 | /* retry with existing socket, after a delay */ | 1810 | /* retry with existing socket, after a delay */ |
1804 | rpc_delay(task, 3*HZ); | 1811 | rpc_delay(task, 3*HZ); |
1805 | if (RPC_IS_SOFTCONN(task)) | 1812 | if (RPC_IS_SOFTCONN(task)) |
@@ -1902,6 +1909,7 @@ call_transmit_status(struct rpc_task *task) | |||
1902 | break; | 1909 | break; |
1903 | } | 1910 | } |
1904 | case -ECONNRESET: | 1911 | case -ECONNRESET: |
1912 | case -ECONNABORTED: | ||
1905 | case -ENOTCONN: | 1913 | case -ENOTCONN: |
1906 | case -EPIPE: | 1914 | case -EPIPE: |
1907 | rpc_task_force_reencode(task); | 1915 | rpc_task_force_reencode(task); |
@@ -2011,8 +2019,9 @@ call_status(struct rpc_task *task) | |||
2011 | xprt_conditional_disconnect(req->rq_xprt, | 2019 | xprt_conditional_disconnect(req->rq_xprt, |
2012 | req->rq_connect_cookie); | 2020 | req->rq_connect_cookie); |
2013 | break; | 2021 | break; |
2014 | case -ECONNRESET: | ||
2015 | case -ECONNREFUSED: | 2022 | case -ECONNREFUSED: |
2023 | case -ECONNRESET: | ||
2024 | case -ECONNABORTED: | ||
2016 | rpc_force_rebind(clnt); | 2025 | rpc_force_rebind(clnt); |
2017 | rpc_delay(task, 3*HZ); | 2026 | rpc_delay(task, 3*HZ); |
2018 | case -EPIPE: | 2027 | case -EPIPE: |
diff --git a/net/sunrpc/netns.h b/net/sunrpc/netns.h index 779742cfc1ff..94e506f9d72b 100644 --- a/net/sunrpc/netns.h +++ b/net/sunrpc/netns.h | |||
@@ -14,6 +14,7 @@ struct sunrpc_net { | |||
14 | struct cache_detail *rsi_cache; | 14 | struct cache_detail *rsi_cache; |
15 | 15 | ||
16 | struct super_block *pipefs_sb; | 16 | struct super_block *pipefs_sb; |
17 | struct rpc_pipe *gssd_dummy; | ||
17 | struct mutex pipefs_sb_lock; | 18 | struct mutex pipefs_sb_lock; |
18 | 19 | ||
19 | struct list_head all_clients; | 20 | struct list_head all_clients; |
@@ -32,8 +33,6 @@ struct sunrpc_net { | |||
32 | int pipe_version; | 33 | int pipe_version; |
33 | atomic_t pipe_users; | 34 | atomic_t pipe_users; |
34 | struct proc_dir_entry *use_gssp_proc; | 35 | struct proc_dir_entry *use_gssp_proc; |
35 | |||
36 | unsigned int gssd_running; | ||
37 | }; | 36 | }; |
38 | 37 | ||
39 | extern int sunrpc_net_id; | 38 | extern int sunrpc_net_id; |
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index bf04b30a788a..b18554898562 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/fsnotify.h> | 17 | #include <linux/fsnotify.h> |
18 | #include <linux/kernel.h> | 18 | #include <linux/kernel.h> |
19 | #include <linux/rcupdate.h> | 19 | #include <linux/rcupdate.h> |
20 | #include <linux/utsname.h> | ||
20 | 21 | ||
21 | #include <asm/ioctls.h> | 22 | #include <asm/ioctls.h> |
22 | #include <linux/poll.h> | 23 | #include <linux/poll.h> |
@@ -38,7 +39,7 @@ | |||
38 | #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") | 39 | #define NET_NAME(net) ((net == &init_net) ? " (init_net)" : "") |
39 | 40 | ||
40 | static struct file_system_type rpc_pipe_fs_type; | 41 | static struct file_system_type rpc_pipe_fs_type; |
41 | 42 | static const struct rpc_pipe_ops gssd_dummy_pipe_ops; | |
42 | 43 | ||
43 | static struct kmem_cache *rpc_inode_cachep __read_mostly; | 44 | static struct kmem_cache *rpc_inode_cachep __read_mostly; |
44 | 45 | ||
@@ -216,14 +217,11 @@ rpc_destroy_inode(struct inode *inode) | |||
216 | static int | 217 | static int |
217 | rpc_pipe_open(struct inode *inode, struct file *filp) | 218 | rpc_pipe_open(struct inode *inode, struct file *filp) |
218 | { | 219 | { |
219 | struct net *net = inode->i_sb->s_fs_info; | ||
220 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
221 | struct rpc_pipe *pipe; | 220 | struct rpc_pipe *pipe; |
222 | int first_open; | 221 | int first_open; |
223 | int res = -ENXIO; | 222 | int res = -ENXIO; |
224 | 223 | ||
225 | mutex_lock(&inode->i_mutex); | 224 | mutex_lock(&inode->i_mutex); |
226 | sn->gssd_running = 1; | ||
227 | pipe = RPC_I(inode)->pipe; | 225 | pipe = RPC_I(inode)->pipe; |
228 | if (pipe == NULL) | 226 | if (pipe == NULL) |
229 | goto out; | 227 | goto out; |
@@ -1159,6 +1157,7 @@ enum { | |||
1159 | RPCAUTH_nfsd4_cb, | 1157 | RPCAUTH_nfsd4_cb, |
1160 | RPCAUTH_cache, | 1158 | RPCAUTH_cache, |
1161 | RPCAUTH_nfsd, | 1159 | RPCAUTH_nfsd, |
1160 | RPCAUTH_gssd, | ||
1162 | RPCAUTH_RootEOF | 1161 | RPCAUTH_RootEOF |
1163 | }; | 1162 | }; |
1164 | 1163 | ||
@@ -1195,6 +1194,10 @@ static const struct rpc_filelist files[] = { | |||
1195 | .name = "nfsd", | 1194 | .name = "nfsd", |
1196 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, | 1195 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, |
1197 | }, | 1196 | }, |
1197 | [RPCAUTH_gssd] = { | ||
1198 | .name = "gssd", | ||
1199 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, | ||
1200 | }, | ||
1198 | }; | 1201 | }; |
1199 | 1202 | ||
1200 | /* | 1203 | /* |
@@ -1208,13 +1211,24 @@ struct dentry *rpc_d_lookup_sb(const struct super_block *sb, | |||
1208 | } | 1211 | } |
1209 | EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); | 1212 | EXPORT_SYMBOL_GPL(rpc_d_lookup_sb); |
1210 | 1213 | ||
1211 | void rpc_pipefs_init_net(struct net *net) | 1214 | int rpc_pipefs_init_net(struct net *net) |
1212 | { | 1215 | { |
1213 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | 1216 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); |
1214 | 1217 | ||
1218 | sn->gssd_dummy = rpc_mkpipe_data(&gssd_dummy_pipe_ops, 0); | ||
1219 | if (IS_ERR(sn->gssd_dummy)) | ||
1220 | return PTR_ERR(sn->gssd_dummy); | ||
1221 | |||
1215 | mutex_init(&sn->pipefs_sb_lock); | 1222 | mutex_init(&sn->pipefs_sb_lock); |
1216 | sn->gssd_running = 1; | ||
1217 | sn->pipe_version = -1; | 1223 | sn->pipe_version = -1; |
1224 | return 0; | ||
1225 | } | ||
1226 | |||
1227 | void rpc_pipefs_exit_net(struct net *net) | ||
1228 | { | ||
1229 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1230 | |||
1231 | rpc_destroy_pipe_data(sn->gssd_dummy); | ||
1218 | } | 1232 | } |
1219 | 1233 | ||
1220 | /* | 1234 | /* |
@@ -1244,11 +1258,134 @@ void rpc_put_sb_net(const struct net *net) | |||
1244 | } | 1258 | } |
1245 | EXPORT_SYMBOL_GPL(rpc_put_sb_net); | 1259 | EXPORT_SYMBOL_GPL(rpc_put_sb_net); |
1246 | 1260 | ||
1261 | static const struct rpc_filelist gssd_dummy_clnt_dir[] = { | ||
1262 | [0] = { | ||
1263 | .name = "clntXX", | ||
1264 | .mode = S_IFDIR | S_IRUGO | S_IXUGO, | ||
1265 | }, | ||
1266 | }; | ||
1267 | |||
1268 | static ssize_t | ||
1269 | dummy_downcall(struct file *filp, const char __user *src, size_t len) | ||
1270 | { | ||
1271 | return -EINVAL; | ||
1272 | } | ||
1273 | |||
1274 | static const struct rpc_pipe_ops gssd_dummy_pipe_ops = { | ||
1275 | .upcall = rpc_pipe_generic_upcall, | ||
1276 | .downcall = dummy_downcall, | ||
1277 | }; | ||
1278 | |||
1279 | /* | ||
1280 | * Here we present a bogus "info" file to keep rpc.gssd happy. We don't expect | ||
1281 | * that it will ever use this info to handle an upcall, but rpc.gssd expects | ||
1282 | * that this file will be there and have a certain format. | ||
1283 | */ | ||
1284 | static int | ||
1285 | rpc_show_dummy_info(struct seq_file *m, void *v) | ||
1286 | { | ||
1287 | seq_printf(m, "RPC server: %s\n", utsname()->nodename); | ||
1288 | seq_printf(m, "service: foo (1) version 0\n"); | ||
1289 | seq_printf(m, "address: 127.0.0.1\n"); | ||
1290 | seq_printf(m, "protocol: tcp\n"); | ||
1291 | seq_printf(m, "port: 0\n"); | ||
1292 | return 0; | ||
1293 | } | ||
1294 | |||
1295 | static int | ||
1296 | rpc_dummy_info_open(struct inode *inode, struct file *file) | ||
1297 | { | ||
1298 | return single_open(file, rpc_show_dummy_info, NULL); | ||
1299 | } | ||
1300 | |||
1301 | static const struct file_operations rpc_dummy_info_operations = { | ||
1302 | .owner = THIS_MODULE, | ||
1303 | .open = rpc_dummy_info_open, | ||
1304 | .read = seq_read, | ||
1305 | .llseek = seq_lseek, | ||
1306 | .release = single_release, | ||
1307 | }; | ||
1308 | |||
1309 | static const struct rpc_filelist gssd_dummy_info_file[] = { | ||
1310 | [0] = { | ||
1311 | .name = "info", | ||
1312 | .i_fop = &rpc_dummy_info_operations, | ||
1313 | .mode = S_IFREG | S_IRUSR, | ||
1314 | }, | ||
1315 | }; | ||
1316 | |||
1317 | /** | ||
1318 | * rpc_gssd_dummy_populate - create a dummy gssd pipe | ||
1319 | * @root: root of the rpc_pipefs filesystem | ||
1320 | * @pipe_data: pipe data created when netns is initialized | ||
1321 | * | ||
1322 | * Create a dummy set of directories and a pipe that gssd can hold open to | ||
1323 | * indicate that it is up and running. | ||
1324 | */ | ||
1325 | static struct dentry * | ||
1326 | rpc_gssd_dummy_populate(struct dentry *root, struct rpc_pipe *pipe_data) | ||
1327 | { | ||
1328 | int ret = 0; | ||
1329 | struct dentry *gssd_dentry; | ||
1330 | struct dentry *clnt_dentry = NULL; | ||
1331 | struct dentry *pipe_dentry = NULL; | ||
1332 | struct qstr q = QSTR_INIT(files[RPCAUTH_gssd].name, | ||
1333 | strlen(files[RPCAUTH_gssd].name)); | ||
1334 | |||
1335 | /* We should never get this far if "gssd" doesn't exist */ | ||
1336 | gssd_dentry = d_hash_and_lookup(root, &q); | ||
1337 | if (!gssd_dentry) | ||
1338 | return ERR_PTR(-ENOENT); | ||
1339 | |||
1340 | ret = rpc_populate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1, NULL); | ||
1341 | if (ret) { | ||
1342 | pipe_dentry = ERR_PTR(ret); | ||
1343 | goto out; | ||
1344 | } | ||
1345 | |||
1346 | q.name = gssd_dummy_clnt_dir[0].name; | ||
1347 | q.len = strlen(gssd_dummy_clnt_dir[0].name); | ||
1348 | clnt_dentry = d_hash_and_lookup(gssd_dentry, &q); | ||
1349 | if (!clnt_dentry) { | ||
1350 | pipe_dentry = ERR_PTR(-ENOENT); | ||
1351 | goto out; | ||
1352 | } | ||
1353 | |||
1354 | ret = rpc_populate(clnt_dentry, gssd_dummy_info_file, 0, 1, NULL); | ||
1355 | if (ret) { | ||
1356 | __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); | ||
1357 | pipe_dentry = ERR_PTR(ret); | ||
1358 | goto out; | ||
1359 | } | ||
1360 | |||
1361 | pipe_dentry = rpc_mkpipe_dentry(clnt_dentry, "gssd", NULL, pipe_data); | ||
1362 | if (IS_ERR(pipe_dentry)) { | ||
1363 | __rpc_depopulate(clnt_dentry, gssd_dummy_info_file, 0, 1); | ||
1364 | __rpc_depopulate(gssd_dentry, gssd_dummy_clnt_dir, 0, 1); | ||
1365 | } | ||
1366 | out: | ||
1367 | dput(clnt_dentry); | ||
1368 | dput(gssd_dentry); | ||
1369 | return pipe_dentry; | ||
1370 | } | ||
1371 | |||
1372 | static void | ||
1373 | rpc_gssd_dummy_depopulate(struct dentry *pipe_dentry) | ||
1374 | { | ||
1375 | struct dentry *clnt_dir = pipe_dentry->d_parent; | ||
1376 | struct dentry *gssd_dir = clnt_dir->d_parent; | ||
1377 | |||
1378 | __rpc_rmpipe(clnt_dir->d_inode, pipe_dentry); | ||
1379 | __rpc_depopulate(clnt_dir, gssd_dummy_info_file, 0, 1); | ||
1380 | __rpc_depopulate(gssd_dir, gssd_dummy_clnt_dir, 0, 1); | ||
1381 | dput(pipe_dentry); | ||
1382 | } | ||
1383 | |||
1247 | static int | 1384 | static int |
1248 | rpc_fill_super(struct super_block *sb, void *data, int silent) | 1385 | rpc_fill_super(struct super_block *sb, void *data, int silent) |
1249 | { | 1386 | { |
1250 | struct inode *inode; | 1387 | struct inode *inode; |
1251 | struct dentry *root; | 1388 | struct dentry *root, *gssd_dentry; |
1252 | struct net *net = data; | 1389 | struct net *net = data; |
1253 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | 1390 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); |
1254 | int err; | 1391 | int err; |
@@ -1266,6 +1403,13 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
1266 | return -ENOMEM; | 1403 | return -ENOMEM; |
1267 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) | 1404 | if (rpc_populate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF, NULL)) |
1268 | return -ENOMEM; | 1405 | return -ENOMEM; |
1406 | |||
1407 | gssd_dentry = rpc_gssd_dummy_populate(root, sn->gssd_dummy); | ||
1408 | if (IS_ERR(gssd_dentry)) { | ||
1409 | __rpc_depopulate(root, files, RPCAUTH_lockd, RPCAUTH_RootEOF); | ||
1410 | return PTR_ERR(gssd_dentry); | ||
1411 | } | ||
1412 | |||
1269 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", | 1413 | dprintk("RPC: sending pipefs MOUNT notification for net %p%s\n", |
1270 | net, NET_NAME(net)); | 1414 | net, NET_NAME(net)); |
1271 | mutex_lock(&sn->pipefs_sb_lock); | 1415 | mutex_lock(&sn->pipefs_sb_lock); |
@@ -1280,6 +1424,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent) | |||
1280 | return 0; | 1424 | return 0; |
1281 | 1425 | ||
1282 | err_depopulate: | 1426 | err_depopulate: |
1427 | rpc_gssd_dummy_depopulate(gssd_dentry); | ||
1283 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, | 1428 | blocking_notifier_call_chain(&rpc_pipefs_notifier_list, |
1284 | RPC_PIPEFS_UMOUNT, | 1429 | RPC_PIPEFS_UMOUNT, |
1285 | sb); | 1430 | sb); |
@@ -1289,6 +1434,16 @@ err_depopulate: | |||
1289 | return err; | 1434 | return err; |
1290 | } | 1435 | } |
1291 | 1436 | ||
1437 | bool | ||
1438 | gssd_running(struct net *net) | ||
1439 | { | ||
1440 | struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); | ||
1441 | struct rpc_pipe *pipe = sn->gssd_dummy; | ||
1442 | |||
1443 | return pipe->nreaders || pipe->nwriters; | ||
1444 | } | ||
1445 | EXPORT_SYMBOL_GPL(gssd_running); | ||
1446 | |||
1292 | static struct dentry * | 1447 | static struct dentry * |
1293 | rpc_mount(struct file_system_type *fs_type, | 1448 | rpc_mount(struct file_system_type *fs_type, |
1294 | int flags, const char *dev_name, void *data) | 1449 | int flags, const char *dev_name, void *data) |
diff --git a/net/sunrpc/sunrpc_syms.c b/net/sunrpc/sunrpc_syms.c index 3d6498af9adc..cd30120de9e4 100644 --- a/net/sunrpc/sunrpc_syms.c +++ b/net/sunrpc/sunrpc_syms.c | |||
@@ -44,12 +44,17 @@ static __net_init int sunrpc_init_net(struct net *net) | |||
44 | if (err) | 44 | if (err) |
45 | goto err_unixgid; | 45 | goto err_unixgid; |
46 | 46 | ||
47 | rpc_pipefs_init_net(net); | 47 | err = rpc_pipefs_init_net(net); |
48 | if (err) | ||
49 | goto err_pipefs; | ||
50 | |||
48 | INIT_LIST_HEAD(&sn->all_clients); | 51 | INIT_LIST_HEAD(&sn->all_clients); |
49 | spin_lock_init(&sn->rpc_client_lock); | 52 | spin_lock_init(&sn->rpc_client_lock); |
50 | spin_lock_init(&sn->rpcb_clnt_lock); | 53 | spin_lock_init(&sn->rpcb_clnt_lock); |
51 | return 0; | 54 | return 0; |
52 | 55 | ||
56 | err_pipefs: | ||
57 | unix_gid_cache_destroy(net); | ||
53 | err_unixgid: | 58 | err_unixgid: |
54 | ip_map_cache_destroy(net); | 59 | ip_map_cache_destroy(net); |
55 | err_ipmap: | 60 | err_ipmap: |
@@ -60,6 +65,7 @@ err_proc: | |||
60 | 65 | ||
61 | static __net_exit void sunrpc_exit_net(struct net *net) | 66 | static __net_exit void sunrpc_exit_net(struct net *net) |
62 | { | 67 | { |
68 | rpc_pipefs_exit_net(net); | ||
63 | unix_gid_cache_destroy(net); | 69 | unix_gid_cache_destroy(net); |
64 | ip_map_cache_destroy(net); | 70 | ip_map_cache_destroy(net); |
65 | rpc_proc_exit(net); | 71 | rpc_proc_exit(net); |
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c index 1750048130a7..7d4df99f761f 100644 --- a/net/sunrpc/xprt.c +++ b/net/sunrpc/xprt.c | |||
@@ -749,6 +749,11 @@ static void xprt_connect_status(struct rpc_task *task) | |||
749 | } | 749 | } |
750 | 750 | ||
751 | switch (task->tk_status) { | 751 | switch (task->tk_status) { |
752 | case -ECONNREFUSED: | ||
753 | case -ECONNRESET: | ||
754 | case -ECONNABORTED: | ||
755 | case -ENETUNREACH: | ||
756 | case -EHOSTUNREACH: | ||
752 | case -EAGAIN: | 757 | case -EAGAIN: |
753 | dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); | 758 | dprintk("RPC: %5u xprt_connect_status: retrying\n", task->tk_pid); |
754 | break; | 759 | break; |
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 75b045e1cd50..2a7ca8ffe83a 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c | |||
@@ -257,6 +257,7 @@ struct sock_xprt { | |||
257 | void (*old_data_ready)(struct sock *, int); | 257 | void (*old_data_ready)(struct sock *, int); |
258 | void (*old_state_change)(struct sock *); | 258 | void (*old_state_change)(struct sock *); |
259 | void (*old_write_space)(struct sock *); | 259 | void (*old_write_space)(struct sock *); |
260 | void (*old_error_report)(struct sock *); | ||
260 | }; | 261 | }; |
261 | 262 | ||
262 | /* | 263 | /* |
@@ -274,6 +275,11 @@ struct sock_xprt { | |||
274 | */ | 275 | */ |
275 | #define TCP_RPC_REPLY (1UL << 6) | 276 | #define TCP_RPC_REPLY (1UL << 6) |
276 | 277 | ||
278 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | ||
279 | { | ||
280 | return (struct rpc_xprt *) sk->sk_user_data; | ||
281 | } | ||
282 | |||
277 | static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) | 283 | static inline struct sockaddr *xs_addr(struct rpc_xprt *xprt) |
278 | { | 284 | { |
279 | return (struct sockaddr *) &xprt->addr; | 285 | return (struct sockaddr *) &xprt->addr; |
@@ -799,6 +805,7 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) | |||
799 | transport->old_data_ready = sk->sk_data_ready; | 805 | transport->old_data_ready = sk->sk_data_ready; |
800 | transport->old_state_change = sk->sk_state_change; | 806 | transport->old_state_change = sk->sk_state_change; |
801 | transport->old_write_space = sk->sk_write_space; | 807 | transport->old_write_space = sk->sk_write_space; |
808 | transport->old_error_report = sk->sk_error_report; | ||
802 | } | 809 | } |
803 | 810 | ||
804 | static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) | 811 | static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) |
@@ -806,6 +813,34 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s | |||
806 | sk->sk_data_ready = transport->old_data_ready; | 813 | sk->sk_data_ready = transport->old_data_ready; |
807 | sk->sk_state_change = transport->old_state_change; | 814 | sk->sk_state_change = transport->old_state_change; |
808 | sk->sk_write_space = transport->old_write_space; | 815 | sk->sk_write_space = transport->old_write_space; |
816 | sk->sk_error_report = transport->old_error_report; | ||
817 | } | ||
818 | |||
819 | /** | ||
820 | * xs_error_report - callback to handle TCP socket state errors | ||
821 | * @sk: socket | ||
822 | * | ||
823 | * Note: we don't call sock_error() since there may be a rpc_task | ||
824 | * using the socket, and so we don't want to clear sk->sk_err. | ||
825 | */ | ||
826 | static void xs_error_report(struct sock *sk) | ||
827 | { | ||
828 | struct rpc_xprt *xprt; | ||
829 | int err; | ||
830 | |||
831 | read_lock_bh(&sk->sk_callback_lock); | ||
832 | if (!(xprt = xprt_from_sock(sk))) | ||
833 | goto out; | ||
834 | |||
835 | err = -sk->sk_err; | ||
836 | if (err == 0) | ||
837 | goto out; | ||
838 | dprintk("RPC: xs_error_report client %p, error=%d...\n", | ||
839 | xprt, -err); | ||
840 | trace_rpc_socket_error(xprt, sk->sk_socket, err); | ||
841 | xprt_wake_pending_tasks(xprt, err); | ||
842 | out: | ||
843 | read_unlock_bh(&sk->sk_callback_lock); | ||
809 | } | 844 | } |
810 | 845 | ||
811 | static void xs_reset_transport(struct sock_xprt *transport) | 846 | static void xs_reset_transport(struct sock_xprt *transport) |
@@ -885,11 +920,6 @@ static void xs_destroy(struct rpc_xprt *xprt) | |||
885 | module_put(THIS_MODULE); | 920 | module_put(THIS_MODULE); |
886 | } | 921 | } |
887 | 922 | ||
888 | static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) | ||
889 | { | ||
890 | return (struct rpc_xprt *) sk->sk_user_data; | ||
891 | } | ||
892 | |||
893 | static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) | 923 | static int xs_local_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb) |
894 | { | 924 | { |
895 | struct xdr_skb_reader desc = { | 925 | struct xdr_skb_reader desc = { |
@@ -1869,6 +1899,7 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, | |||
1869 | sk->sk_user_data = xprt; | 1899 | sk->sk_user_data = xprt; |
1870 | sk->sk_data_ready = xs_local_data_ready; | 1900 | sk->sk_data_ready = xs_local_data_ready; |
1871 | sk->sk_write_space = xs_udp_write_space; | 1901 | sk->sk_write_space = xs_udp_write_space; |
1902 | sk->sk_error_report = xs_error_report; | ||
1872 | sk->sk_allocation = GFP_ATOMIC; | 1903 | sk->sk_allocation = GFP_ATOMIC; |
1873 | 1904 | ||
1874 | xprt_clear_connected(xprt); | 1905 | xprt_clear_connected(xprt); |
@@ -2146,6 +2177,7 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) | |||
2146 | sk->sk_data_ready = xs_tcp_data_ready; | 2177 | sk->sk_data_ready = xs_tcp_data_ready; |
2147 | sk->sk_state_change = xs_tcp_state_change; | 2178 | sk->sk_state_change = xs_tcp_state_change; |
2148 | sk->sk_write_space = xs_tcp_write_space; | 2179 | sk->sk_write_space = xs_tcp_write_space; |
2180 | sk->sk_error_report = xs_error_report; | ||
2149 | sk->sk_allocation = GFP_ATOMIC; | 2181 | sk->sk_allocation = GFP_ATOMIC; |
2150 | 2182 | ||
2151 | /* socket options */ | 2183 | /* socket options */ |