diff options
Diffstat (limited to 'fs')
| -rw-r--r-- | fs/buffer.c | 10 | ||||
| -rw-r--r-- | fs/cifs/Kconfig | 1 | ||||
| -rw-r--r-- | fs/cifs/cifsfs.c | 93 | ||||
| -rw-r--r-- | fs/cifs/cifsglob.h | 21 | ||||
| -rw-r--r-- | fs/cifs/cifsproto.h | 11 | ||||
| -rw-r--r-- | fs/cifs/cifssmb.c | 1 | ||||
| -rw-r--r-- | fs/cifs/connect.c | 1 | ||||
| -rw-r--r-- | fs/cifs/dir.c | 64 | ||||
| -rw-r--r-- | fs/cifs/file.c | 137 | ||||
| -rw-r--r-- | fs/cifs/misc.c | 34 | ||||
| -rw-r--r-- | fs/cifs/readdir.c | 4 | ||||
| -rw-r--r-- | fs/cifs/transport.c | 50 | ||||
| -rw-r--r-- | fs/fs-writeback.c | 165 |
13 files changed, 280 insertions, 312 deletions
diff --git a/fs/buffer.c b/fs/buffer.c index 24afd7422ae8..6fa530256bfd 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
| @@ -280,7 +280,7 @@ void invalidate_bdev(struct block_device *bdev) | |||
| 280 | EXPORT_SYMBOL(invalidate_bdev); | 280 | EXPORT_SYMBOL(invalidate_bdev); |
| 281 | 281 | ||
| 282 | /* | 282 | /* |
| 283 | * Kick pdflush then try to free up some ZONE_NORMAL memory. | 283 | * Kick the writeback threads then try to free up some ZONE_NORMAL memory. |
| 284 | */ | 284 | */ |
| 285 | static void free_more_memory(void) | 285 | static void free_more_memory(void) |
| 286 | { | 286 | { |
| @@ -1709,9 +1709,9 @@ static int __block_write_full_page(struct inode *inode, struct page *page, | |||
| 1709 | /* | 1709 | /* |
| 1710 | * If it's a fully non-blocking write attempt and we cannot | 1710 | * If it's a fully non-blocking write attempt and we cannot |
| 1711 | * lock the buffer then redirty the page. Note that this can | 1711 | * lock the buffer then redirty the page. Note that this can |
| 1712 | * potentially cause a busy-wait loop from pdflush and kswapd | 1712 | * potentially cause a busy-wait loop from writeback threads |
| 1713 | * activity, but those code paths have their own higher-level | 1713 | * and kswapd activity, but those code paths have their own |
| 1714 | * throttling. | 1714 | * higher-level throttling. |
| 1715 | */ | 1715 | */ |
| 1716 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { | 1716 | if (wbc->sync_mode != WB_SYNC_NONE || !wbc->nonblocking) { |
| 1717 | lock_buffer(bh); | 1717 | lock_buffer(bh); |
| @@ -3208,7 +3208,7 @@ EXPORT_SYMBOL(block_sync_page); | |||
| 3208 | * still running obsolete flush daemons, so we terminate them here. | 3208 | * still running obsolete flush daemons, so we terminate them here. |
| 3209 | * | 3209 | * |
| 3210 | * Use of bdflush() is deprecated and will be removed in a future kernel. | 3210 | * Use of bdflush() is deprecated and will be removed in a future kernel. |
| 3211 | * The `pdflush' kernel threads fully replace bdflush daemons and this call. | 3211 | * The `flush-X' kernel threads fully replace bdflush daemons and this call. |
| 3212 | */ | 3212 | */ |
| 3213 | SYSCALL_DEFINE2(bdflush, int, func, long, data) | 3213 | SYSCALL_DEFINE2(bdflush, int, func, long, data) |
| 3214 | { | 3214 | { |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 6994a0f54f02..80f352596807 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
| @@ -2,6 +2,7 @@ config CIFS | |||
| 2 | tristate "CIFS support (advanced network filesystem, SMBFS successor)" | 2 | tristate "CIFS support (advanced network filesystem, SMBFS successor)" |
| 3 | depends on INET | 3 | depends on INET |
| 4 | select NLS | 4 | select NLS |
| 5 | select SLOW_WORK | ||
| 5 | help | 6 | help |
| 6 | This is the client VFS module for the Common Internet File System | 7 | This is the client VFS module for the Common Internet File System |
| 7 | (CIFS) protocol which is the successor to the Server Message Block | 8 | (CIFS) protocol which is the successor to the Server Message Block |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 90c5b39f0313..9a5e4f5f3122 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
| @@ -64,9 +64,6 @@ unsigned int multiuser_mount = 0; | |||
| 64 | unsigned int extended_security = CIFSSEC_DEF; | 64 | unsigned int extended_security = CIFSSEC_DEF; |
| 65 | /* unsigned int ntlmv2_support = 0; */ | 65 | /* unsigned int ntlmv2_support = 0; */ |
| 66 | unsigned int sign_CIFS_PDUs = 1; | 66 | unsigned int sign_CIFS_PDUs = 1; |
| 67 | extern struct task_struct *oplockThread; /* remove sparse warning */ | ||
| 68 | struct task_struct *oplockThread = NULL; | ||
| 69 | /* extern struct task_struct * dnotifyThread; remove sparse warning */ | ||
| 70 | static const struct super_operations cifs_super_ops; | 67 | static const struct super_operations cifs_super_ops; |
| 71 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; | 68 | unsigned int CIFSMaxBufSize = CIFS_MAX_MSGSIZE; |
| 72 | module_param(CIFSMaxBufSize, int, 0); | 69 | module_param(CIFSMaxBufSize, int, 0); |
| @@ -972,89 +969,12 @@ cifs_destroy_mids(void) | |||
| 972 | kmem_cache_destroy(cifs_oplock_cachep); | 969 | kmem_cache_destroy(cifs_oplock_cachep); |
| 973 | } | 970 | } |
| 974 | 971 | ||
| 975 | static int cifs_oplock_thread(void *dummyarg) | ||
| 976 | { | ||
| 977 | struct oplock_q_entry *oplock_item; | ||
| 978 | struct cifsTconInfo *pTcon; | ||
| 979 | struct inode *inode; | ||
| 980 | __u16 netfid; | ||
| 981 | int rc, waitrc = 0; | ||
| 982 | |||
| 983 | set_freezable(); | ||
| 984 | do { | ||
| 985 | if (try_to_freeze()) | ||
| 986 | continue; | ||
| 987 | |||
| 988 | spin_lock(&cifs_oplock_lock); | ||
| 989 | if (list_empty(&cifs_oplock_list)) { | ||
| 990 | spin_unlock(&cifs_oplock_lock); | ||
| 991 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 992 | schedule_timeout(39*HZ); | ||
| 993 | } else { | ||
| 994 | oplock_item = list_entry(cifs_oplock_list.next, | ||
| 995 | struct oplock_q_entry, qhead); | ||
| 996 | cFYI(1, ("found oplock item to write out")); | ||
| 997 | pTcon = oplock_item->tcon; | ||
| 998 | inode = oplock_item->pinode; | ||
| 999 | netfid = oplock_item->netfid; | ||
| 1000 | spin_unlock(&cifs_oplock_lock); | ||
| 1001 | DeleteOplockQEntry(oplock_item); | ||
| 1002 | /* can not grab inode sem here since it would | ||
| 1003 | deadlock when oplock received on delete | ||
| 1004 | since vfs_unlink holds the i_mutex across | ||
| 1005 | the call */ | ||
| 1006 | /* mutex_lock(&inode->i_mutex);*/ | ||
| 1007 | if (S_ISREG(inode->i_mode)) { | ||
| 1008 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
| 1009 | if (CIFS_I(inode)->clientCanCacheAll == 0) | ||
| 1010 | break_lease(inode, FMODE_READ); | ||
| 1011 | else if (CIFS_I(inode)->clientCanCacheRead == 0) | ||
| 1012 | break_lease(inode, FMODE_WRITE); | ||
| 1013 | #endif | ||
| 1014 | rc = filemap_fdatawrite(inode->i_mapping); | ||
| 1015 | if (CIFS_I(inode)->clientCanCacheRead == 0) { | ||
| 1016 | waitrc = filemap_fdatawait( | ||
| 1017 | inode->i_mapping); | ||
| 1018 | invalidate_remote_inode(inode); | ||
| 1019 | } | ||
| 1020 | if (rc == 0) | ||
| 1021 | rc = waitrc; | ||
| 1022 | } else | ||
| 1023 | rc = 0; | ||
| 1024 | /* mutex_unlock(&inode->i_mutex);*/ | ||
| 1025 | if (rc) | ||
| 1026 | CIFS_I(inode)->write_behind_rc = rc; | ||
| 1027 | cFYI(1, ("Oplock flush inode %p rc %d", | ||
| 1028 | inode, rc)); | ||
| 1029 | |||
| 1030 | /* releasing stale oplock after recent reconnect | ||
| 1031 | of smb session using a now incorrect file | ||
| 1032 | handle is not a data integrity issue but do | ||
| 1033 | not bother sending an oplock release if session | ||
| 1034 | to server still is disconnected since oplock | ||
| 1035 | already released by the server in that case */ | ||
| 1036 | if (!pTcon->need_reconnect) { | ||
| 1037 | rc = CIFSSMBLock(0, pTcon, netfid, | ||
| 1038 | 0 /* len */ , 0 /* offset */, 0, | ||
| 1039 | 0, LOCKING_ANDX_OPLOCK_RELEASE, | ||
| 1040 | false /* wait flag */); | ||
| 1041 | cFYI(1, ("Oplock release rc = %d", rc)); | ||
| 1042 | } | ||
| 1043 | set_current_state(TASK_INTERRUPTIBLE); | ||
| 1044 | schedule_timeout(1); /* yield in case q were corrupt */ | ||
| 1045 | } | ||
| 1046 | } while (!kthread_should_stop()); | ||
| 1047 | |||
| 1048 | return 0; | ||
| 1049 | } | ||
| 1050 | |||
| 1051 | static int __init | 972 | static int __init |
| 1052 | init_cifs(void) | 973 | init_cifs(void) |
| 1053 | { | 974 | { |
| 1054 | int rc = 0; | 975 | int rc = 0; |
| 1055 | cifs_proc_init(); | 976 | cifs_proc_init(); |
| 1056 | INIT_LIST_HEAD(&cifs_tcp_ses_list); | 977 | INIT_LIST_HEAD(&cifs_tcp_ses_list); |
| 1057 | INIT_LIST_HEAD(&cifs_oplock_list); | ||
| 1058 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 978 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
| 1059 | INIT_LIST_HEAD(&GlobalDnotifyReqList); | 979 | INIT_LIST_HEAD(&GlobalDnotifyReqList); |
| 1060 | INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); | 980 | INIT_LIST_HEAD(&GlobalDnotifyRsp_Q); |
| @@ -1083,7 +1003,6 @@ init_cifs(void) | |||
| 1083 | rwlock_init(&GlobalSMBSeslock); | 1003 | rwlock_init(&GlobalSMBSeslock); |
| 1084 | rwlock_init(&cifs_tcp_ses_lock); | 1004 | rwlock_init(&cifs_tcp_ses_lock); |
| 1085 | spin_lock_init(&GlobalMid_Lock); | 1005 | spin_lock_init(&GlobalMid_Lock); |
| 1086 | spin_lock_init(&cifs_oplock_lock); | ||
| 1087 | 1006 | ||
| 1088 | if (cifs_max_pending < 2) { | 1007 | if (cifs_max_pending < 2) { |
| 1089 | cifs_max_pending = 2; | 1008 | cifs_max_pending = 2; |
| @@ -1118,16 +1037,13 @@ init_cifs(void) | |||
| 1118 | if (rc) | 1037 | if (rc) |
| 1119 | goto out_unregister_key_type; | 1038 | goto out_unregister_key_type; |
| 1120 | #endif | 1039 | #endif |
| 1121 | oplockThread = kthread_run(cifs_oplock_thread, NULL, "cifsoplockd"); | 1040 | rc = slow_work_register_user(); |
| 1122 | if (IS_ERR(oplockThread)) { | 1041 | if (rc) |
| 1123 | rc = PTR_ERR(oplockThread); | 1042 | goto out_unregister_resolver_key; |
| 1124 | cERROR(1, ("error %d create oplock thread", rc)); | ||
| 1125 | goto out_unregister_dfs_key_type; | ||
| 1126 | } | ||
| 1127 | 1043 | ||
| 1128 | return 0; | 1044 | return 0; |
| 1129 | 1045 | ||
| 1130 | out_unregister_dfs_key_type: | 1046 | out_unregister_resolver_key: |
| 1131 | #ifdef CONFIG_CIFS_DFS_UPCALL | 1047 | #ifdef CONFIG_CIFS_DFS_UPCALL |
| 1132 | unregister_key_type(&key_type_dns_resolver); | 1048 | unregister_key_type(&key_type_dns_resolver); |
| 1133 | out_unregister_key_type: | 1049 | out_unregister_key_type: |
| @@ -1164,7 +1080,6 @@ exit_cifs(void) | |||
| 1164 | cifs_destroy_inodecache(); | 1080 | cifs_destroy_inodecache(); |
| 1165 | cifs_destroy_mids(); | 1081 | cifs_destroy_mids(); |
| 1166 | cifs_destroy_request_bufs(); | 1082 | cifs_destroy_request_bufs(); |
| 1167 | kthread_stop(oplockThread); | ||
| 1168 | } | 1083 | } |
| 1169 | 1084 | ||
| 1170 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); | 1085 | MODULE_AUTHOR("Steve French <sfrench@us.ibm.com>"); |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 6cfc81a32703..5d0fde18039c 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | */ | 18 | */ |
| 19 | #include <linux/in.h> | 19 | #include <linux/in.h> |
| 20 | #include <linux/in6.h> | 20 | #include <linux/in6.h> |
| 21 | #include <linux/slow-work.h> | ||
| 21 | #include "cifs_fs_sb.h" | 22 | #include "cifs_fs_sb.h" |
| 22 | #include "cifsacl.h" | 23 | #include "cifsacl.h" |
| 23 | /* | 24 | /* |
| @@ -346,14 +347,16 @@ struct cifsFileInfo { | |||
| 346 | /* lock scope id (0 if none) */ | 347 | /* lock scope id (0 if none) */ |
| 347 | struct file *pfile; /* needed for writepage */ | 348 | struct file *pfile; /* needed for writepage */ |
| 348 | struct inode *pInode; /* needed for oplock break */ | 349 | struct inode *pInode; /* needed for oplock break */ |
| 350 | struct vfsmount *mnt; | ||
| 349 | struct mutex lock_mutex; | 351 | struct mutex lock_mutex; |
| 350 | struct list_head llist; /* list of byte range locks we have. */ | 352 | struct list_head llist; /* list of byte range locks we have. */ |
| 351 | bool closePend:1; /* file is marked to close */ | 353 | bool closePend:1; /* file is marked to close */ |
| 352 | bool invalidHandle:1; /* file closed via session abend */ | 354 | bool invalidHandle:1; /* file closed via session abend */ |
| 353 | bool messageMode:1; /* for pipes: message vs byte mode */ | 355 | bool oplock_break_cancelled:1; |
| 354 | atomic_t count; /* reference count */ | 356 | atomic_t count; /* reference count */ |
| 355 | struct mutex fh_mutex; /* prevents reopen race after dead ses*/ | 357 | struct mutex fh_mutex; /* prevents reopen race after dead ses*/ |
| 356 | struct cifs_search_info srch_inf; | 358 | struct cifs_search_info srch_inf; |
| 359 | struct slow_work oplock_break; /* slow_work job for oplock breaks */ | ||
| 357 | }; | 360 | }; |
| 358 | 361 | ||
| 359 | /* Take a reference on the file private data */ | 362 | /* Take a reference on the file private data */ |
| @@ -365,8 +368,10 @@ static inline void cifsFileInfo_get(struct cifsFileInfo *cifs_file) | |||
| 365 | /* Release a reference on the file private data */ | 368 | /* Release a reference on the file private data */ |
| 366 | static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | 369 | static inline void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
| 367 | { | 370 | { |
| 368 | if (atomic_dec_and_test(&cifs_file->count)) | 371 | if (atomic_dec_and_test(&cifs_file->count)) { |
| 372 | iput(cifs_file->pInode); | ||
| 369 | kfree(cifs_file); | 373 | kfree(cifs_file); |
| 374 | } | ||
| 370 | } | 375 | } |
| 371 | 376 | ||
| 372 | /* | 377 | /* |
| @@ -382,7 +387,6 @@ struct cifsInodeInfo { | |||
| 382 | unsigned long time; /* jiffies of last update/check of inode */ | 387 | unsigned long time; /* jiffies of last update/check of inode */ |
| 383 | bool clientCanCacheRead:1; /* read oplock */ | 388 | bool clientCanCacheRead:1; /* read oplock */ |
| 384 | bool clientCanCacheAll:1; /* read and writebehind oplock */ | 389 | bool clientCanCacheAll:1; /* read and writebehind oplock */ |
| 385 | bool oplockPending:1; | ||
| 386 | bool delete_pending:1; /* DELETE_ON_CLOSE is set */ | 390 | bool delete_pending:1; /* DELETE_ON_CLOSE is set */ |
| 387 | u64 server_eof; /* current file size on server */ | 391 | u64 server_eof; /* current file size on server */ |
| 388 | u64 uniqueid; /* server inode number */ | 392 | u64 uniqueid; /* server inode number */ |
| @@ -585,9 +589,9 @@ require use of the stronger protocol */ | |||
| 585 | #define CIFSSEC_MUST_LANMAN 0x10010 | 589 | #define CIFSSEC_MUST_LANMAN 0x10010 |
| 586 | #define CIFSSEC_MUST_PLNTXT 0x20020 | 590 | #define CIFSSEC_MUST_PLNTXT 0x20020 |
| 587 | #ifdef CONFIG_CIFS_UPCALL | 591 | #ifdef CONFIG_CIFS_UPCALL |
| 588 | #define CIFSSEC_MASK 0xAF0AF /* allows weak security but also krb5 */ | 592 | #define CIFSSEC_MASK 0xBF0BF /* allows weak security but also krb5 */ |
| 589 | #else | 593 | #else |
| 590 | #define CIFSSEC_MASK 0xA70A7 /* current flags supported if weak */ | 594 | #define CIFSSEC_MASK 0xB70B7 /* current flags supported if weak */ |
| 591 | #endif /* UPCALL */ | 595 | #endif /* UPCALL */ |
| 592 | #else /* do not allow weak pw hash */ | 596 | #else /* do not allow weak pw hash */ |
| 593 | #ifdef CONFIG_CIFS_UPCALL | 597 | #ifdef CONFIG_CIFS_UPCALL |
| @@ -669,12 +673,6 @@ GLOBAL_EXTERN rwlock_t cifs_tcp_ses_lock; | |||
| 669 | */ | 673 | */ |
| 670 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; | 674 | GLOBAL_EXTERN rwlock_t GlobalSMBSeslock; |
| 671 | 675 | ||
| 672 | /* Global list of oplocks */ | ||
| 673 | GLOBAL_EXTERN struct list_head cifs_oplock_list; | ||
| 674 | |||
| 675 | /* Protects the cifs_oplock_list */ | ||
| 676 | GLOBAL_EXTERN spinlock_t cifs_oplock_lock; | ||
| 677 | |||
| 678 | /* Outstanding dir notify requests */ | 676 | /* Outstanding dir notify requests */ |
| 679 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; | 677 | GLOBAL_EXTERN struct list_head GlobalDnotifyReqList; |
| 680 | /* DirNotify response queue */ | 678 | /* DirNotify response queue */ |
| @@ -725,3 +723,4 @@ GLOBAL_EXTERN unsigned int cifs_min_rcv; /* min size of big ntwrk buf pool */ | |||
| 725 | GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ | 723 | GLOBAL_EXTERN unsigned int cifs_min_small; /* min size of small buf pool */ |
| 726 | GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ | 724 | GLOBAL_EXTERN unsigned int cifs_max_pending; /* MAX requests at once to server*/ |
| 727 | 725 | ||
| 726 | extern const struct slow_work_ops cifs_oplock_break_ops; | ||
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index da8fbf565991..6928c24d1d42 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
| @@ -86,18 +86,17 @@ extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, | |||
| 86 | const int stage, | 86 | const int stage, |
| 87 | const struct nls_table *nls_cp); | 87 | const struct nls_table *nls_cp); |
| 88 | extern __u16 GetNextMid(struct TCP_Server_Info *server); | 88 | extern __u16 GetNextMid(struct TCP_Server_Info *server); |
| 89 | extern struct oplock_q_entry *AllocOplockQEntry(struct inode *, u16, | ||
| 90 | struct cifsTconInfo *); | ||
| 91 | extern void DeleteOplockQEntry(struct oplock_q_entry *); | ||
| 92 | extern void DeleteTconOplockQEntries(struct cifsTconInfo *); | ||
| 93 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 89 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
| 94 | extern u64 cifs_UnixTimeToNT(struct timespec); | 90 | extern u64 cifs_UnixTimeToNT(struct timespec); |
| 95 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, | 91 | extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, |
| 96 | int offset); | 92 | int offset); |
| 97 | 93 | ||
| 94 | extern struct cifsFileInfo *cifs_new_fileinfo(struct inode *newinode, | ||
| 95 | __u16 fileHandle, struct file *file, | ||
| 96 | struct vfsmount *mnt, unsigned int oflags); | ||
| 98 | extern int cifs_posix_open(char *full_path, struct inode **pinode, | 97 | extern int cifs_posix_open(char *full_path, struct inode **pinode, |
| 99 | struct super_block *sb, int mode, int oflags, | 98 | struct vfsmount *mnt, int mode, int oflags, |
| 100 | int *poplock, __u16 *pnetfid, int xid); | 99 | __u32 *poplock, __u16 *pnetfid, int xid); |
| 101 | extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, | 100 | extern void cifs_unix_basic_to_fattr(struct cifs_fattr *fattr, |
| 102 | FILE_UNIX_BASIC_INFO *info, | 101 | FILE_UNIX_BASIC_INFO *info, |
| 103 | struct cifs_sb_info *cifs_sb); | 102 | struct cifs_sb_info *cifs_sb); |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 301e307e1279..941441d3e386 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
| @@ -94,6 +94,7 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon) | |||
| 94 | list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { | 94 | list_for_each_safe(tmp, tmp1, &pTcon->openFileList) { |
| 95 | open_file = list_entry(tmp, struct cifsFileInfo, tlist); | 95 | open_file = list_entry(tmp, struct cifsFileInfo, tlist); |
| 96 | open_file->invalidHandle = true; | 96 | open_file->invalidHandle = true; |
| 97 | open_file->oplock_break_cancelled = true; | ||
| 97 | } | 98 | } |
| 98 | write_unlock(&GlobalSMBSeslock); | 99 | write_unlock(&GlobalSMBSeslock); |
| 99 | /* BB Add call to invalidate_inodes(sb) for all superblocks mounted | 100 | /* BB Add call to invalidate_inodes(sb) for all superblocks mounted |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index d49682433c20..43003e0bef18 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
| @@ -1670,7 +1670,6 @@ cifs_put_tcon(struct cifsTconInfo *tcon) | |||
| 1670 | CIFSSMBTDis(xid, tcon); | 1670 | CIFSSMBTDis(xid, tcon); |
| 1671 | _FreeXid(xid); | 1671 | _FreeXid(xid); |
| 1672 | 1672 | ||
| 1673 | DeleteTconOplockQEntries(tcon); | ||
| 1674 | tconInfoFree(tcon); | 1673 | tconInfoFree(tcon); |
| 1675 | cifs_put_smb_ses(ses); | 1674 | cifs_put_smb_ses(ses); |
| 1676 | } | 1675 | } |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index a6424cfc0121..627a60a6c1b1 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/stat.h> | 24 | #include <linux/stat.h> |
| 25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
| 26 | #include <linux/namei.h> | 26 | #include <linux/namei.h> |
| 27 | #include <linux/mount.h> | ||
| 27 | #include "cifsfs.h" | 28 | #include "cifsfs.h" |
| 28 | #include "cifspdu.h" | 29 | #include "cifspdu.h" |
| 29 | #include "cifsglob.h" | 30 | #include "cifsglob.h" |
| @@ -129,44 +130,45 @@ cifs_bp_rename_retry: | |||
| 129 | return full_path; | 130 | return full_path; |
| 130 | } | 131 | } |
| 131 | 132 | ||
| 132 | static void | 133 | struct cifsFileInfo * |
| 133 | cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle, | 134 | cifs_new_fileinfo(struct inode *newinode, __u16 fileHandle, |
| 134 | struct cifsTconInfo *tcon, bool write_only) | 135 | struct file *file, struct vfsmount *mnt, unsigned int oflags) |
| 135 | { | 136 | { |
| 136 | int oplock = 0; | 137 | int oplock = 0; |
| 137 | struct cifsFileInfo *pCifsFile; | 138 | struct cifsFileInfo *pCifsFile; |
| 138 | struct cifsInodeInfo *pCifsInode; | 139 | struct cifsInodeInfo *pCifsInode; |
| 140 | struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); | ||
| 139 | 141 | ||
| 140 | pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); | 142 | pCifsFile = kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); |
| 141 | |||
| 142 | if (pCifsFile == NULL) | 143 | if (pCifsFile == NULL) |
| 143 | return; | 144 | return pCifsFile; |
| 144 | 145 | ||
| 145 | if (oplockEnabled) | 146 | if (oplockEnabled) |
| 146 | oplock = REQ_OPLOCK; | 147 | oplock = REQ_OPLOCK; |
| 147 | 148 | ||
| 148 | pCifsFile->netfid = fileHandle; | 149 | pCifsFile->netfid = fileHandle; |
| 149 | pCifsFile->pid = current->tgid; | 150 | pCifsFile->pid = current->tgid; |
| 150 | pCifsFile->pInode = newinode; | 151 | pCifsFile->pInode = igrab(newinode); |
| 152 | pCifsFile->mnt = mnt; | ||
| 153 | pCifsFile->pfile = file; | ||
| 151 | pCifsFile->invalidHandle = false; | 154 | pCifsFile->invalidHandle = false; |
| 152 | pCifsFile->closePend = false; | 155 | pCifsFile->closePend = false; |
| 153 | mutex_init(&pCifsFile->fh_mutex); | 156 | mutex_init(&pCifsFile->fh_mutex); |
| 154 | mutex_init(&pCifsFile->lock_mutex); | 157 | mutex_init(&pCifsFile->lock_mutex); |
| 155 | INIT_LIST_HEAD(&pCifsFile->llist); | 158 | INIT_LIST_HEAD(&pCifsFile->llist); |
| 156 | atomic_set(&pCifsFile->count, 1); | 159 | atomic_set(&pCifsFile->count, 1); |
| 160 | slow_work_init(&pCifsFile->oplock_break, &cifs_oplock_break_ops); | ||
| 157 | 161 | ||
| 158 | /* set the following in open now | ||
| 159 | pCifsFile->pfile = file; */ | ||
| 160 | write_lock(&GlobalSMBSeslock); | 162 | write_lock(&GlobalSMBSeslock); |
| 161 | list_add(&pCifsFile->tlist, &tcon->openFileList); | 163 | list_add(&pCifsFile->tlist, &cifs_sb->tcon->openFileList); |
| 162 | pCifsInode = CIFS_I(newinode); | 164 | pCifsInode = CIFS_I(newinode); |
| 163 | if (pCifsInode) { | 165 | if (pCifsInode) { |
| 164 | /* if readable file instance put first in list*/ | 166 | /* if readable file instance put first in list*/ |
| 165 | if (write_only) | 167 | if (oflags & FMODE_READ) |
| 168 | list_add(&pCifsFile->flist, &pCifsInode->openFileList); | ||
| 169 | else | ||
| 166 | list_add_tail(&pCifsFile->flist, | 170 | list_add_tail(&pCifsFile->flist, |
| 167 | &pCifsInode->openFileList); | 171 | &pCifsInode->openFileList); |
| 168 | else | ||
| 169 | list_add(&pCifsFile->flist, &pCifsInode->openFileList); | ||
| 170 | 172 | ||
| 171 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { | 173 | if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) { |
| 172 | pCifsInode->clientCanCacheAll = true; | 174 | pCifsInode->clientCanCacheAll = true; |
| @@ -176,18 +178,18 @@ cifs_fill_fileinfo(struct inode *newinode, __u16 fileHandle, | |||
| 176 | pCifsInode->clientCanCacheRead = true; | 178 | pCifsInode->clientCanCacheRead = true; |
| 177 | } | 179 | } |
| 178 | write_unlock(&GlobalSMBSeslock); | 180 | write_unlock(&GlobalSMBSeslock); |
| 181 | |||
| 182 | return pCifsFile; | ||
| 179 | } | 183 | } |
| 180 | 184 | ||
| 181 | int cifs_posix_open(char *full_path, struct inode **pinode, | 185 | int cifs_posix_open(char *full_path, struct inode **pinode, |
| 182 | struct super_block *sb, int mode, int oflags, | 186 | struct vfsmount *mnt, int mode, int oflags, |
| 183 | int *poplock, __u16 *pnetfid, int xid) | 187 | __u32 *poplock, __u16 *pnetfid, int xid) |
| 184 | { | 188 | { |
| 185 | int rc; | 189 | int rc; |
| 186 | __u32 oplock; | ||
| 187 | bool write_only = false; | ||
| 188 | FILE_UNIX_BASIC_INFO *presp_data; | 190 | FILE_UNIX_BASIC_INFO *presp_data; |
| 189 | __u32 posix_flags = 0; | 191 | __u32 posix_flags = 0; |
| 190 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 192 | struct cifs_sb_info *cifs_sb = CIFS_SB(mnt->mnt_sb); |
| 191 | struct cifs_fattr fattr; | 193 | struct cifs_fattr fattr; |
| 192 | 194 | ||
| 193 | cFYI(1, ("posix open %s", full_path)); | 195 | cFYI(1, ("posix open %s", full_path)); |
| @@ -223,12 +225,9 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
| 223 | if (oflags & O_DIRECT) | 225 | if (oflags & O_DIRECT) |
| 224 | posix_flags |= SMB_O_DIRECT; | 226 | posix_flags |= SMB_O_DIRECT; |
| 225 | 227 | ||
| 226 | if (!(oflags & FMODE_READ)) | ||
| 227 | write_only = true; | ||
| 228 | |||
| 229 | mode &= ~current_umask(); | 228 | mode &= ~current_umask(); |
| 230 | rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode, | 229 | rc = CIFSPOSIXCreate(xid, cifs_sb->tcon, posix_flags, mode, |
| 231 | pnetfid, presp_data, &oplock, full_path, | 230 | pnetfid, presp_data, poplock, full_path, |
| 232 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & | 231 | cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & |
| 233 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 232 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
| 234 | if (rc) | 233 | if (rc) |
| @@ -244,7 +243,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
| 244 | 243 | ||
| 245 | /* get new inode and set it up */ | 244 | /* get new inode and set it up */ |
| 246 | if (*pinode == NULL) { | 245 | if (*pinode == NULL) { |
| 247 | *pinode = cifs_iget(sb, &fattr); | 246 | *pinode = cifs_iget(mnt->mnt_sb, &fattr); |
| 248 | if (!*pinode) { | 247 | if (!*pinode) { |
| 249 | rc = -ENOMEM; | 248 | rc = -ENOMEM; |
| 250 | goto posix_open_ret; | 249 | goto posix_open_ret; |
| @@ -253,7 +252,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
| 253 | cifs_fattr_to_inode(*pinode, &fattr); | 252 | cifs_fattr_to_inode(*pinode, &fattr); |
| 254 | } | 253 | } |
| 255 | 254 | ||
| 256 | cifs_fill_fileinfo(*pinode, *pnetfid, cifs_sb->tcon, write_only); | 255 | cifs_new_fileinfo(*pinode, *pnetfid, NULL, mnt, oflags); |
| 257 | 256 | ||
| 258 | posix_open_ret: | 257 | posix_open_ret: |
| 259 | kfree(presp_data); | 258 | kfree(presp_data); |
| @@ -280,7 +279,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
| 280 | int rc = -ENOENT; | 279 | int rc = -ENOENT; |
| 281 | int xid; | 280 | int xid; |
| 282 | int create_options = CREATE_NOT_DIR; | 281 | int create_options = CREATE_NOT_DIR; |
| 283 | int oplock = 0; | 282 | __u32 oplock = 0; |
| 284 | int oflags; | 283 | int oflags; |
| 285 | bool posix_create = false; | 284 | bool posix_create = false; |
| 286 | /* | 285 | /* |
| @@ -298,7 +297,6 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
| 298 | FILE_ALL_INFO *buf = NULL; | 297 | FILE_ALL_INFO *buf = NULL; |
| 299 | struct inode *newinode = NULL; | 298 | struct inode *newinode = NULL; |
| 300 | int disposition = FILE_OVERWRITE_IF; | 299 | int disposition = FILE_OVERWRITE_IF; |
| 301 | bool write_only = false; | ||
| 302 | 300 | ||
| 303 | xid = GetXid(); | 301 | xid = GetXid(); |
| 304 | 302 | ||
| @@ -323,7 +321,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
| 323 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && | 321 | if (tcon->unix_ext && (tcon->ses->capabilities & CAP_UNIX) && |
| 324 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & | 322 | (CIFS_UNIX_POSIX_PATH_OPS_CAP & |
| 325 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 323 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
| 326 | rc = cifs_posix_open(full_path, &newinode, inode->i_sb, | 324 | rc = cifs_posix_open(full_path, &newinode, nd->path.mnt, |
| 327 | mode, oflags, &oplock, &fileHandle, xid); | 325 | mode, oflags, &oplock, &fileHandle, xid); |
| 328 | /* EIO could indicate that (posix open) operation is not | 326 | /* EIO could indicate that (posix open) operation is not |
| 329 | supported, despite what server claimed in capability | 327 | supported, despite what server claimed in capability |
| @@ -351,11 +349,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
| 351 | desiredAccess = 0; | 349 | desiredAccess = 0; |
| 352 | if (oflags & FMODE_READ) | 350 | if (oflags & FMODE_READ) |
| 353 | desiredAccess |= GENERIC_READ; /* is this too little? */ | 351 | desiredAccess |= GENERIC_READ; /* is this too little? */ |
| 354 | if (oflags & FMODE_WRITE) { | 352 | if (oflags & FMODE_WRITE) |
| 355 | desiredAccess |= GENERIC_WRITE; | 353 | desiredAccess |= GENERIC_WRITE; |
| 356 | if (!(oflags & FMODE_READ)) | ||
| 357 | write_only = true; | ||
| 358 | } | ||
| 359 | 354 | ||
| 360 | if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) | 355 | if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) |
| 361 | disposition = FILE_CREATE; | 356 | disposition = FILE_CREATE; |
| @@ -470,8 +465,8 @@ cifs_create_set_dentry: | |||
| 470 | /* mknod case - do not leave file open */ | 465 | /* mknod case - do not leave file open */ |
| 471 | CIFSSMBClose(xid, tcon, fileHandle); | 466 | CIFSSMBClose(xid, tcon, fileHandle); |
| 472 | } else if (!(posix_create) && (newinode)) { | 467 | } else if (!(posix_create) && (newinode)) { |
| 473 | cifs_fill_fileinfo(newinode, fileHandle, | 468 | cifs_new_fileinfo(newinode, fileHandle, NULL, |
| 474 | cifs_sb->tcon, write_only); | 469 | nd->path.mnt, oflags); |
| 475 | } | 470 | } |
| 476 | cifs_create_out: | 471 | cifs_create_out: |
| 477 | kfree(buf); | 472 | kfree(buf); |
| @@ -611,7 +606,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
| 611 | { | 606 | { |
| 612 | int xid; | 607 | int xid; |
| 613 | int rc = 0; /* to get around spurious gcc warning, set to zero here */ | 608 | int rc = 0; /* to get around spurious gcc warning, set to zero here */ |
| 614 | int oplock = 0; | 609 | __u32 oplock = 0; |
| 615 | __u16 fileHandle = 0; | 610 | __u16 fileHandle = 0; |
| 616 | bool posix_open = false; | 611 | bool posix_open = false; |
| 617 | struct cifs_sb_info *cifs_sb; | 612 | struct cifs_sb_info *cifs_sb; |
| @@ -683,8 +678,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
| 683 | if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && | 678 | if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) && |
| 684 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && | 679 | (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open && |
| 685 | (nd->intent.open.flags & O_CREAT)) { | 680 | (nd->intent.open.flags & O_CREAT)) { |
| 686 | rc = cifs_posix_open(full_path, &newInode, | 681 | rc = cifs_posix_open(full_path, &newInode, nd->path.mnt, |
| 687 | parent_dir_inode->i_sb, | ||
| 688 | nd->intent.open.create_mode, | 682 | nd->intent.open.create_mode, |
| 689 | nd->intent.open.flags, &oplock, | 683 | nd->intent.open.flags, &oplock, |
| 690 | &fileHandle, xid); | 684 | &fileHandle, xid); |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index fa7beac8b80e..429337eb7afe 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
| @@ -30,6 +30,7 @@ | |||
| 30 | #include <linux/writeback.h> | 30 | #include <linux/writeback.h> |
| 31 | #include <linux/task_io_accounting_ops.h> | 31 | #include <linux/task_io_accounting_ops.h> |
| 32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
| 33 | #include <linux/mount.h> | ||
| 33 | #include <asm/div64.h> | 34 | #include <asm/div64.h> |
| 34 | #include "cifsfs.h" | 35 | #include "cifsfs.h" |
| 35 | #include "cifspdu.h" | 36 | #include "cifspdu.h" |
| @@ -39,27 +40,6 @@ | |||
| 39 | #include "cifs_debug.h" | 40 | #include "cifs_debug.h" |
| 40 | #include "cifs_fs_sb.h" | 41 | #include "cifs_fs_sb.h" |
| 41 | 42 | ||
| 42 | static inline struct cifsFileInfo *cifs_init_private( | ||
| 43 | struct cifsFileInfo *private_data, struct inode *inode, | ||
| 44 | struct file *file, __u16 netfid) | ||
| 45 | { | ||
| 46 | memset(private_data, 0, sizeof(struct cifsFileInfo)); | ||
| 47 | private_data->netfid = netfid; | ||
| 48 | private_data->pid = current->tgid; | ||
| 49 | mutex_init(&private_data->fh_mutex); | ||
| 50 | mutex_init(&private_data->lock_mutex); | ||
| 51 | INIT_LIST_HEAD(&private_data->llist); | ||
| 52 | private_data->pfile = file; /* needed for writepage */ | ||
| 53 | private_data->pInode = inode; | ||
| 54 | private_data->invalidHandle = false; | ||
| 55 | private_data->closePend = false; | ||
| 56 | /* Initialize reference count to one. The private data is | ||
| 57 | freed on the release of the last reference */ | ||
| 58 | atomic_set(&private_data->count, 1); | ||
| 59 | |||
| 60 | return private_data; | ||
| 61 | } | ||
| 62 | |||
| 63 | static inline int cifs_convert_flags(unsigned int flags) | 43 | static inline int cifs_convert_flags(unsigned int flags) |
| 64 | { | 44 | { |
| 65 | if ((flags & O_ACCMODE) == O_RDONLY) | 45 | if ((flags & O_ACCMODE) == O_RDONLY) |
| @@ -123,9 +103,11 @@ static inline int cifs_get_disposition(unsigned int flags) | |||
| 123 | } | 103 | } |
| 124 | 104 | ||
| 125 | /* all arguments to this function must be checked for validity in caller */ | 105 | /* all arguments to this function must be checked for validity in caller */ |
| 126 | static inline int cifs_posix_open_inode_helper(struct inode *inode, | 106 | static inline int |
| 127 | struct file *file, struct cifsInodeInfo *pCifsInode, | 107 | cifs_posix_open_inode_helper(struct inode *inode, struct file *file, |
| 128 | struct cifsFileInfo *pCifsFile, int oplock, u16 netfid) | 108 | struct cifsInodeInfo *pCifsInode, |
| 109 | struct cifsFileInfo *pCifsFile, __u32 oplock, | ||
| 110 | u16 netfid) | ||
| 129 | { | 111 | { |
| 130 | 112 | ||
| 131 | write_lock(&GlobalSMBSeslock); | 113 | write_lock(&GlobalSMBSeslock); |
| @@ -219,17 +201,6 @@ static inline int cifs_open_inode_helper(struct inode *inode, struct file *file, | |||
| 219 | struct timespec temp; | 201 | struct timespec temp; |
| 220 | int rc; | 202 | int rc; |
| 221 | 203 | ||
| 222 | /* want handles we can use to read with first | ||
| 223 | in the list so we do not have to walk the | ||
| 224 | list to search for one in write_begin */ | ||
| 225 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) { | ||
| 226 | list_add_tail(&pCifsFile->flist, | ||
| 227 | &pCifsInode->openFileList); | ||
| 228 | } else { | ||
| 229 | list_add(&pCifsFile->flist, | ||
| 230 | &pCifsInode->openFileList); | ||
| 231 | } | ||
| 232 | write_unlock(&GlobalSMBSeslock); | ||
| 233 | if (pCifsInode->clientCanCacheRead) { | 204 | if (pCifsInode->clientCanCacheRead) { |
| 234 | /* we have the inode open somewhere else | 205 | /* we have the inode open somewhere else |
| 235 | no need to discard cache data */ | 206 | no need to discard cache data */ |
| @@ -279,7 +250,8 @@ client_can_cache: | |||
| 279 | int cifs_open(struct inode *inode, struct file *file) | 250 | int cifs_open(struct inode *inode, struct file *file) |
| 280 | { | 251 | { |
| 281 | int rc = -EACCES; | 252 | int rc = -EACCES; |
| 282 | int xid, oplock; | 253 | int xid; |
| 254 | __u32 oplock; | ||
| 283 | struct cifs_sb_info *cifs_sb; | 255 | struct cifs_sb_info *cifs_sb; |
| 284 | struct cifsTconInfo *tcon; | 256 | struct cifsTconInfo *tcon; |
| 285 | struct cifsFileInfo *pCifsFile; | 257 | struct cifsFileInfo *pCifsFile; |
| @@ -324,7 +296,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
| 324 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 296 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
| 325 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 297 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
| 326 | /* can not refresh inode info since size could be stale */ | 298 | /* can not refresh inode info since size could be stale */ |
| 327 | rc = cifs_posix_open(full_path, &inode, inode->i_sb, | 299 | rc = cifs_posix_open(full_path, &inode, file->f_path.mnt, |
| 328 | cifs_sb->mnt_file_mode /* ignored */, | 300 | cifs_sb->mnt_file_mode /* ignored */, |
| 329 | oflags, &oplock, &netfid, xid); | 301 | oflags, &oplock, &netfid, xid); |
| 330 | if (rc == 0) { | 302 | if (rc == 0) { |
| @@ -414,24 +386,17 @@ int cifs_open(struct inode *inode, struct file *file) | |||
| 414 | cFYI(1, ("cifs_open returned 0x%x", rc)); | 386 | cFYI(1, ("cifs_open returned 0x%x", rc)); |
| 415 | goto out; | 387 | goto out; |
| 416 | } | 388 | } |
| 417 | file->private_data = | 389 | |
| 418 | kmalloc(sizeof(struct cifsFileInfo), GFP_KERNEL); | 390 | pCifsFile = cifs_new_fileinfo(inode, netfid, file, file->f_path.mnt, |
| 391 | file->f_flags); | ||
| 392 | file->private_data = pCifsFile; | ||
| 419 | if (file->private_data == NULL) { | 393 | if (file->private_data == NULL) { |
| 420 | rc = -ENOMEM; | 394 | rc = -ENOMEM; |
| 421 | goto out; | 395 | goto out; |
| 422 | } | 396 | } |
| 423 | pCifsFile = cifs_init_private(file->private_data, inode, file, netfid); | ||
| 424 | write_lock(&GlobalSMBSeslock); | ||
| 425 | list_add(&pCifsFile->tlist, &tcon->openFileList); | ||
| 426 | 397 | ||
| 427 | pCifsInode = CIFS_I(file->f_path.dentry->d_inode); | 398 | rc = cifs_open_inode_helper(inode, file, pCifsInode, pCifsFile, tcon, |
| 428 | if (pCifsInode) { | 399 | &oplock, buf, full_path, xid); |
| 429 | rc = cifs_open_inode_helper(inode, file, pCifsInode, | ||
| 430 | pCifsFile, tcon, | ||
| 431 | &oplock, buf, full_path, xid); | ||
| 432 | } else { | ||
| 433 | write_unlock(&GlobalSMBSeslock); | ||
| 434 | } | ||
| 435 | 400 | ||
| 436 | if (oplock & CIFS_CREATE_ACTION) { | 401 | if (oplock & CIFS_CREATE_ACTION) { |
| 437 | /* time to set mode which we can not set earlier due to | 402 | /* time to set mode which we can not set earlier due to |
| @@ -474,7 +439,8 @@ static int cifs_relock_file(struct cifsFileInfo *cifsFile) | |||
| 474 | static int cifs_reopen_file(struct file *file, bool can_flush) | 439 | static int cifs_reopen_file(struct file *file, bool can_flush) |
| 475 | { | 440 | { |
| 476 | int rc = -EACCES; | 441 | int rc = -EACCES; |
| 477 | int xid, oplock; | 442 | int xid; |
| 443 | __u32 oplock; | ||
| 478 | struct cifs_sb_info *cifs_sb; | 444 | struct cifs_sb_info *cifs_sb; |
| 479 | struct cifsTconInfo *tcon; | 445 | struct cifsTconInfo *tcon; |
| 480 | struct cifsFileInfo *pCifsFile; | 446 | struct cifsFileInfo *pCifsFile; |
| @@ -543,7 +509,7 @@ reopen_error_exit: | |||
| 543 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { | 509 | le64_to_cpu(tcon->fsUnixInfo.Capability))) { |
| 544 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); | 510 | int oflags = (int) cifs_posix_convert_flags(file->f_flags); |
| 545 | /* can not refresh inode info since size could be stale */ | 511 | /* can not refresh inode info since size could be stale */ |
| 546 | rc = cifs_posix_open(full_path, NULL, inode->i_sb, | 512 | rc = cifs_posix_open(full_path, NULL, file->f_path.mnt, |
| 547 | cifs_sb->mnt_file_mode /* ignored */, | 513 | cifs_sb->mnt_file_mode /* ignored */, |
| 548 | oflags, &oplock, &netfid, xid); | 514 | oflags, &oplock, &netfid, xid); |
| 549 | if (rc == 0) { | 515 | if (rc == 0) { |
| @@ -2308,6 +2274,73 @@ out: | |||
| 2308 | return rc; | 2274 | return rc; |
| 2309 | } | 2275 | } |
| 2310 | 2276 | ||
| 2277 | static void | ||
| 2278 | cifs_oplock_break(struct slow_work *work) | ||
| 2279 | { | ||
| 2280 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, | ||
| 2281 | oplock_break); | ||
| 2282 | struct inode *inode = cfile->pInode; | ||
| 2283 | struct cifsInodeInfo *cinode = CIFS_I(inode); | ||
| 2284 | struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->mnt->mnt_sb); | ||
| 2285 | int rc, waitrc = 0; | ||
| 2286 | |||
| 2287 | if (inode && S_ISREG(inode->i_mode)) { | ||
| 2288 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
| 2289 | if (cinode->clientCanCacheAll == 0) | ||
| 2290 | break_lease(inode, FMODE_READ); | ||
| 2291 | else if (cinode->clientCanCacheRead == 0) | ||
| 2292 | break_lease(inode, FMODE_WRITE); | ||
| 2293 | #endif | ||
| 2294 | rc = filemap_fdatawrite(inode->i_mapping); | ||
| 2295 | if (cinode->clientCanCacheRead == 0) { | ||
| 2296 | waitrc = filemap_fdatawait(inode->i_mapping); | ||
| 2297 | invalidate_remote_inode(inode); | ||
| 2298 | } | ||
| 2299 | if (!rc) | ||
| 2300 | rc = waitrc; | ||
| 2301 | if (rc) | ||
| 2302 | cinode->write_behind_rc = rc; | ||
| 2303 | cFYI(1, ("Oplock flush inode %p rc %d", inode, rc)); | ||
| 2304 | } | ||
| 2305 | |||
| 2306 | /* | ||
| 2307 | * releasing stale oplock after recent reconnect of smb session using | ||
| 2308 | * a now incorrect file handle is not a data integrity issue but do | ||
| 2309 | * not bother sending an oplock release if session to server still is | ||
| 2310 | * disconnected since oplock already released by the server | ||
| 2311 | */ | ||
| 2312 | if (!cfile->closePend && !cfile->oplock_break_cancelled) { | ||
| 2313 | rc = CIFSSMBLock(0, cifs_sb->tcon, cfile->netfid, 0, 0, 0, 0, | ||
| 2314 | LOCKING_ANDX_OPLOCK_RELEASE, false); | ||
| 2315 | cFYI(1, ("Oplock release rc = %d", rc)); | ||
| 2316 | } | ||
| 2317 | } | ||
| 2318 | |||
| 2319 | static int | ||
| 2320 | cifs_oplock_break_get(struct slow_work *work) | ||
| 2321 | { | ||
| 2322 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, | ||
| 2323 | oplock_break); | ||
| 2324 | mntget(cfile->mnt); | ||
| 2325 | cifsFileInfo_get(cfile); | ||
| 2326 | return 0; | ||
| 2327 | } | ||
| 2328 | |||
| 2329 | static void | ||
| 2330 | cifs_oplock_break_put(struct slow_work *work) | ||
| 2331 | { | ||
| 2332 | struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, | ||
| 2333 | oplock_break); | ||
| 2334 | mntput(cfile->mnt); | ||
| 2335 | cifsFileInfo_put(cfile); | ||
| 2336 | } | ||
| 2337 | |||
| 2338 | const struct slow_work_ops cifs_oplock_break_ops = { | ||
| 2339 | .get_ref = cifs_oplock_break_get, | ||
| 2340 | .put_ref = cifs_oplock_break_put, | ||
| 2341 | .execute = cifs_oplock_break, | ||
| 2342 | }; | ||
| 2343 | |||
| 2311 | const struct address_space_operations cifs_addr_ops = { | 2344 | const struct address_space_operations cifs_addr_ops = { |
| 2312 | .readpage = cifs_readpage, | 2345 | .readpage = cifs_readpage, |
| 2313 | .readpages = cifs_readpages, | 2346 | .readpages = cifs_readpages, |
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index e079a9190ec4..0241b25ac33f 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
| @@ -32,7 +32,6 @@ | |||
| 32 | 32 | ||
| 33 | extern mempool_t *cifs_sm_req_poolp; | 33 | extern mempool_t *cifs_sm_req_poolp; |
| 34 | extern mempool_t *cifs_req_poolp; | 34 | extern mempool_t *cifs_req_poolp; |
| 35 | extern struct task_struct *oplockThread; | ||
| 36 | 35 | ||
| 37 | /* The xid serves as a useful identifier for each incoming vfs request, | 36 | /* The xid serves as a useful identifier for each incoming vfs request, |
| 38 | in a similar way to the mid which is useful to track each sent smb, | 37 | in a similar way to the mid which is useful to track each sent smb, |
| @@ -500,6 +499,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
| 500 | struct cifsTconInfo *tcon; | 499 | struct cifsTconInfo *tcon; |
| 501 | struct cifsInodeInfo *pCifsInode; | 500 | struct cifsInodeInfo *pCifsInode; |
| 502 | struct cifsFileInfo *netfile; | 501 | struct cifsFileInfo *netfile; |
| 502 | int rc; | ||
| 503 | 503 | ||
| 504 | cFYI(1, ("Checking for oplock break or dnotify response")); | 504 | cFYI(1, ("Checking for oplock break or dnotify response")); |
| 505 | if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && | 505 | if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && |
| @@ -562,30 +562,40 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
| 562 | continue; | 562 | continue; |
| 563 | 563 | ||
| 564 | cifs_stats_inc(&tcon->num_oplock_brks); | 564 | cifs_stats_inc(&tcon->num_oplock_brks); |
| 565 | write_lock(&GlobalSMBSeslock); | 565 | read_lock(&GlobalSMBSeslock); |
| 566 | list_for_each(tmp2, &tcon->openFileList) { | 566 | list_for_each(tmp2, &tcon->openFileList) { |
| 567 | netfile = list_entry(tmp2, struct cifsFileInfo, | 567 | netfile = list_entry(tmp2, struct cifsFileInfo, |
| 568 | tlist); | 568 | tlist); |
| 569 | if (pSMB->Fid != netfile->netfid) | 569 | if (pSMB->Fid != netfile->netfid) |
| 570 | continue; | 570 | continue; |
| 571 | 571 | ||
| 572 | write_unlock(&GlobalSMBSeslock); | 572 | /* |
| 573 | read_unlock(&cifs_tcp_ses_lock); | 573 | * don't do anything if file is about to be |
| 574 | * closed anyway. | ||
| 575 | */ | ||
| 576 | if (netfile->closePend) { | ||
| 577 | read_unlock(&GlobalSMBSeslock); | ||
| 578 | read_unlock(&cifs_tcp_ses_lock); | ||
| 579 | return true; | ||
| 580 | } | ||
| 581 | |||
| 574 | cFYI(1, ("file id match, oplock break")); | 582 | cFYI(1, ("file id match, oplock break")); |
| 575 | pCifsInode = CIFS_I(netfile->pInode); | 583 | pCifsInode = CIFS_I(netfile->pInode); |
| 576 | pCifsInode->clientCanCacheAll = false; | 584 | pCifsInode->clientCanCacheAll = false; |
| 577 | if (pSMB->OplockLevel == 0) | 585 | if (pSMB->OplockLevel == 0) |
| 578 | pCifsInode->clientCanCacheRead = false; | 586 | pCifsInode->clientCanCacheRead = false; |
| 579 | pCifsInode->oplockPending = true; | 587 | rc = slow_work_enqueue(&netfile->oplock_break); |
| 580 | AllocOplockQEntry(netfile->pInode, | 588 | if (rc) { |
| 581 | netfile->netfid, tcon); | 589 | cERROR(1, ("failed to enqueue oplock " |
| 582 | cFYI(1, ("about to wake up oplock thread")); | 590 | "break: %d\n", rc)); |
| 583 | if (oplockThread) | 591 | } else { |
| 584 | wake_up_process(oplockThread); | 592 | netfile->oplock_break_cancelled = false; |
| 585 | 593 | } | |
| 594 | read_unlock(&GlobalSMBSeslock); | ||
| 595 | read_unlock(&cifs_tcp_ses_lock); | ||
| 586 | return true; | 596 | return true; |
| 587 | } | 597 | } |
| 588 | write_unlock(&GlobalSMBSeslock); | 598 | read_unlock(&GlobalSMBSeslock); |
| 589 | read_unlock(&cifs_tcp_ses_lock); | 599 | read_unlock(&cifs_tcp_ses_lock); |
| 590 | cFYI(1, ("No matching file for oplock break")); | 600 | cFYI(1, ("No matching file for oplock break")); |
| 591 | return true; | 601 | return true; |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f823a4a208a7..1f098ca71636 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
| @@ -146,7 +146,7 @@ cifs_fill_common_info(struct cifs_fattr *fattr, struct cifs_sb_info *cifs_sb) | |||
| 146 | } | 146 | } |
| 147 | } | 147 | } |
| 148 | 148 | ||
| 149 | void | 149 | static void |
| 150 | cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, | 150 | cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, |
| 151 | struct cifs_sb_info *cifs_sb) | 151 | struct cifs_sb_info *cifs_sb) |
| 152 | { | 152 | { |
| @@ -161,7 +161,7 @@ cifs_dir_info_to_fattr(struct cifs_fattr *fattr, FILE_DIRECTORY_INFO *info, | |||
| 161 | cifs_fill_common_info(fattr, cifs_sb); | 161 | cifs_fill_common_info(fattr, cifs_sb); |
| 162 | } | 162 | } |
| 163 | 163 | ||
| 164 | void | 164 | static void |
| 165 | cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, | 165 | cifs_std_info_to_fattr(struct cifs_fattr *fattr, FIND_FILE_STANDARD_INFO *info, |
| 166 | struct cifs_sb_info *cifs_sb) | 166 | struct cifs_sb_info *cifs_sb) |
| 167 | { | 167 | { |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1da4ab250eae..07b8e71544ee 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
| @@ -103,56 +103,6 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
| 103 | mempool_free(midEntry, cifs_mid_poolp); | 103 | mempool_free(midEntry, cifs_mid_poolp); |
| 104 | } | 104 | } |
| 105 | 105 | ||
| 106 | struct oplock_q_entry * | ||
| 107 | AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon) | ||
| 108 | { | ||
| 109 | struct oplock_q_entry *temp; | ||
| 110 | if ((pinode == NULL) || (tcon == NULL)) { | ||
| 111 | cERROR(1, ("Null parms passed to AllocOplockQEntry")); | ||
| 112 | return NULL; | ||
| 113 | } | ||
| 114 | temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, | ||
| 115 | GFP_KERNEL); | ||
| 116 | if (temp == NULL) | ||
| 117 | return temp; | ||
| 118 | else { | ||
| 119 | temp->pinode = pinode; | ||
| 120 | temp->tcon = tcon; | ||
| 121 | temp->netfid = fid; | ||
| 122 | spin_lock(&cifs_oplock_lock); | ||
| 123 | list_add_tail(&temp->qhead, &cifs_oplock_list); | ||
| 124 | spin_unlock(&cifs_oplock_lock); | ||
| 125 | } | ||
| 126 | return temp; | ||
| 127 | } | ||
| 128 | |||
| 129 | void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry) | ||
| 130 | { | ||
| 131 | spin_lock(&cifs_oplock_lock); | ||
| 132 | /* should we check if list empty first? */ | ||
| 133 | list_del(&oplockEntry->qhead); | ||
| 134 | spin_unlock(&cifs_oplock_lock); | ||
| 135 | kmem_cache_free(cifs_oplock_cachep, oplockEntry); | ||
| 136 | } | ||
| 137 | |||
| 138 | |||
| 139 | void DeleteTconOplockQEntries(struct cifsTconInfo *tcon) | ||
| 140 | { | ||
| 141 | struct oplock_q_entry *temp; | ||
| 142 | |||
| 143 | if (tcon == NULL) | ||
| 144 | return; | ||
| 145 | |||
| 146 | spin_lock(&cifs_oplock_lock); | ||
| 147 | list_for_each_entry(temp, &cifs_oplock_list, qhead) { | ||
| 148 | if ((temp->tcon) && (temp->tcon == tcon)) { | ||
| 149 | list_del(&temp->qhead); | ||
| 150 | kmem_cache_free(cifs_oplock_cachep, temp); | ||
| 151 | } | ||
| 152 | } | ||
| 153 | spin_unlock(&cifs_oplock_lock); | ||
| 154 | } | ||
| 155 | |||
| 156 | static int | 106 | static int |
| 157 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | 107 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) |
| 158 | { | 108 | { |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 8e1e5e19d21e..9d5360c4c2af 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
| @@ -41,8 +41,9 @@ struct wb_writeback_args { | |||
| 41 | long nr_pages; | 41 | long nr_pages; |
| 42 | struct super_block *sb; | 42 | struct super_block *sb; |
| 43 | enum writeback_sync_modes sync_mode; | 43 | enum writeback_sync_modes sync_mode; |
| 44 | int for_kupdate; | 44 | int for_kupdate:1; |
| 45 | int range_cyclic; | 45 | int range_cyclic:1; |
| 46 | int for_background:1; | ||
| 46 | }; | 47 | }; |
| 47 | 48 | ||
| 48 | /* | 49 | /* |
| @@ -249,14 +250,25 @@ static void bdi_sync_writeback(struct backing_dev_info *bdi, | |||
| 249 | * completion. Caller need not hold sb s_umount semaphore. | 250 | * completion. Caller need not hold sb s_umount semaphore. |
| 250 | * | 251 | * |
| 251 | */ | 252 | */ |
| 252 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) | 253 | void bdi_start_writeback(struct backing_dev_info *bdi, struct super_block *sb, |
| 254 | long nr_pages) | ||
| 253 | { | 255 | { |
| 254 | struct wb_writeback_args args = { | 256 | struct wb_writeback_args args = { |
| 257 | .sb = sb, | ||
| 255 | .sync_mode = WB_SYNC_NONE, | 258 | .sync_mode = WB_SYNC_NONE, |
| 256 | .nr_pages = nr_pages, | 259 | .nr_pages = nr_pages, |
| 257 | .range_cyclic = 1, | 260 | .range_cyclic = 1, |
| 258 | }; | 261 | }; |
| 259 | 262 | ||
| 263 | /* | ||
| 264 | * We treat @nr_pages=0 as the special case to do background writeback, | ||
| 265 | * ie. to sync pages until the background dirty threshold is reached. | ||
| 266 | */ | ||
| 267 | if (!nr_pages) { | ||
| 268 | args.nr_pages = LONG_MAX; | ||
| 269 | args.for_background = 1; | ||
| 270 | } | ||
| 271 | |||
| 260 | bdi_alloc_queue_work(bdi, &args); | 272 | bdi_alloc_queue_work(bdi, &args); |
| 261 | } | 273 | } |
| 262 | 274 | ||
| @@ -310,7 +322,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) | |||
| 310 | * For inodes being constantly redirtied, dirtied_when can get stuck. | 322 | * For inodes being constantly redirtied, dirtied_when can get stuck. |
| 311 | * It _appears_ to be in the future, but is actually in distant past. | 323 | * It _appears_ to be in the future, but is actually in distant past. |
| 312 | * This test is necessary to prevent such wrapped-around relative times | 324 | * This test is necessary to prevent such wrapped-around relative times |
| 313 | * from permanently stopping the whole pdflush writeback. | 325 | * from permanently stopping the whole bdi writeback. |
| 314 | */ | 326 | */ |
| 315 | ret = ret && time_before_eq(inode->dirtied_when, jiffies); | 327 | ret = ret && time_before_eq(inode->dirtied_when, jiffies); |
| 316 | #endif | 328 | #endif |
| @@ -324,13 +336,38 @@ static void move_expired_inodes(struct list_head *delaying_queue, | |||
| 324 | struct list_head *dispatch_queue, | 336 | struct list_head *dispatch_queue, |
| 325 | unsigned long *older_than_this) | 337 | unsigned long *older_than_this) |
| 326 | { | 338 | { |
| 339 | LIST_HEAD(tmp); | ||
| 340 | struct list_head *pos, *node; | ||
| 341 | struct super_block *sb = NULL; | ||
| 342 | struct inode *inode; | ||
| 343 | int do_sb_sort = 0; | ||
| 344 | |||
| 327 | while (!list_empty(delaying_queue)) { | 345 | while (!list_empty(delaying_queue)) { |
| 328 | struct inode *inode = list_entry(delaying_queue->prev, | 346 | inode = list_entry(delaying_queue->prev, struct inode, i_list); |
| 329 | struct inode, i_list); | ||
| 330 | if (older_than_this && | 347 | if (older_than_this && |
| 331 | inode_dirtied_after(inode, *older_than_this)) | 348 | inode_dirtied_after(inode, *older_than_this)) |
| 332 | break; | 349 | break; |
| 333 | list_move(&inode->i_list, dispatch_queue); | 350 | if (sb && sb != inode->i_sb) |
| 351 | do_sb_sort = 1; | ||
| 352 | sb = inode->i_sb; | ||
| 353 | list_move(&inode->i_list, &tmp); | ||
| 354 | } | ||
| 355 | |||
| 356 | /* just one sb in list, splice to dispatch_queue and we're done */ | ||
| 357 | if (!do_sb_sort) { | ||
| 358 | list_splice(&tmp, dispatch_queue); | ||
| 359 | return; | ||
| 360 | } | ||
| 361 | |||
| 362 | /* Move inodes from one superblock together */ | ||
| 363 | while (!list_empty(&tmp)) { | ||
| 364 | inode = list_entry(tmp.prev, struct inode, i_list); | ||
| 365 | sb = inode->i_sb; | ||
| 366 | list_for_each_prev_safe(pos, node, &tmp) { | ||
| 367 | inode = list_entry(pos, struct inode, i_list); | ||
| 368 | if (inode->i_sb == sb) | ||
| 369 | list_move(&inode->i_list, dispatch_queue); | ||
| 370 | } | ||
| 334 | } | 371 | } |
| 335 | } | 372 | } |
| 336 | 373 | ||
| @@ -439,8 +476,18 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 439 | spin_lock(&inode_lock); | 476 | spin_lock(&inode_lock); |
| 440 | inode->i_state &= ~I_SYNC; | 477 | inode->i_state &= ~I_SYNC; |
| 441 | if (!(inode->i_state & (I_FREEING | I_CLEAR))) { | 478 | if (!(inode->i_state & (I_FREEING | I_CLEAR))) { |
| 442 | if (!(inode->i_state & I_DIRTY) && | 479 | if ((inode->i_state & I_DIRTY_PAGES) && wbc->for_kupdate) { |
| 443 | mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | 480 | /* |
| 481 | * More pages get dirtied by a fast dirtier. | ||
| 482 | */ | ||
| 483 | goto select_queue; | ||
| 484 | } else if (inode->i_state & I_DIRTY) { | ||
| 485 | /* | ||
| 486 | * At least XFS will redirty the inode during the | ||
| 487 | * writeback (delalloc) and on io completion (isize). | ||
| 488 | */ | ||
| 489 | redirty_tail(inode); | ||
| 490 | } else if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | ||
| 444 | /* | 491 | /* |
| 445 | * We didn't write back all the pages. nfs_writepages() | 492 | * We didn't write back all the pages. nfs_writepages() |
| 446 | * sometimes bales out without doing anything. Redirty | 493 | * sometimes bales out without doing anything. Redirty |
| @@ -462,6 +509,7 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 462 | * soon as the queue becomes uncongested. | 509 | * soon as the queue becomes uncongested. |
| 463 | */ | 510 | */ |
| 464 | inode->i_state |= I_DIRTY_PAGES; | 511 | inode->i_state |= I_DIRTY_PAGES; |
| 512 | select_queue: | ||
| 465 | if (wbc->nr_to_write <= 0) { | 513 | if (wbc->nr_to_write <= 0) { |
| 466 | /* | 514 | /* |
| 467 | * slice used up: queue for next turn | 515 | * slice used up: queue for next turn |
| @@ -484,12 +532,6 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 484 | inode->i_state |= I_DIRTY_PAGES; | 532 | inode->i_state |= I_DIRTY_PAGES; |
| 485 | redirty_tail(inode); | 533 | redirty_tail(inode); |
| 486 | } | 534 | } |
| 487 | } else if (inode->i_state & I_DIRTY) { | ||
| 488 | /* | ||
| 489 | * Someone redirtied the inode while were writing back | ||
| 490 | * the pages. | ||
| 491 | */ | ||
| 492 | redirty_tail(inode); | ||
| 493 | } else if (atomic_read(&inode->i_count)) { | 535 | } else if (atomic_read(&inode->i_count)) { |
| 494 | /* | 536 | /* |
| 495 | * The inode is clean, inuse | 537 | * The inode is clean, inuse |
| @@ -506,6 +548,17 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 506 | return ret; | 548 | return ret; |
| 507 | } | 549 | } |
| 508 | 550 | ||
| 551 | static void unpin_sb_for_writeback(struct super_block **psb) | ||
| 552 | { | ||
| 553 | struct super_block *sb = *psb; | ||
| 554 | |||
| 555 | if (sb) { | ||
| 556 | up_read(&sb->s_umount); | ||
| 557 | put_super(sb); | ||
| 558 | *psb = NULL; | ||
| 559 | } | ||
| 560 | } | ||
| 561 | |||
| 509 | /* | 562 | /* |
| 510 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned | 563 | * For WB_SYNC_NONE writeback, the caller does not have the sb pinned |
| 511 | * before calling writeback. So make sure that we do pin it, so it doesn't | 564 | * before calling writeback. So make sure that we do pin it, so it doesn't |
| @@ -515,11 +568,20 @@ writeback_single_inode(struct inode *inode, struct writeback_control *wbc) | |||
| 515 | * 1 if we failed. | 568 | * 1 if we failed. |
| 516 | */ | 569 | */ |
| 517 | static int pin_sb_for_writeback(struct writeback_control *wbc, | 570 | static int pin_sb_for_writeback(struct writeback_control *wbc, |
| 518 | struct inode *inode) | 571 | struct inode *inode, struct super_block **psb) |
| 519 | { | 572 | { |
| 520 | struct super_block *sb = inode->i_sb; | 573 | struct super_block *sb = inode->i_sb; |
| 521 | 574 | ||
| 522 | /* | 575 | /* |
| 576 | * If this sb is already pinned, nothing more to do. If not and | ||
| 577 | * *psb is non-NULL, unpin the old one first | ||
| 578 | */ | ||
| 579 | if (sb == *psb) | ||
| 580 | return 0; | ||
| 581 | else if (*psb) | ||
| 582 | unpin_sb_for_writeback(psb); | ||
| 583 | |||
| 584 | /* | ||
| 523 | * Caller must already hold the ref for this | 585 | * Caller must already hold the ref for this |
| 524 | */ | 586 | */ |
| 525 | if (wbc->sync_mode == WB_SYNC_ALL) { | 587 | if (wbc->sync_mode == WB_SYNC_ALL) { |
| @@ -532,7 +594,7 @@ static int pin_sb_for_writeback(struct writeback_control *wbc, | |||
| 532 | if (down_read_trylock(&sb->s_umount)) { | 594 | if (down_read_trylock(&sb->s_umount)) { |
| 533 | if (sb->s_root) { | 595 | if (sb->s_root) { |
| 534 | spin_unlock(&sb_lock); | 596 | spin_unlock(&sb_lock); |
| 535 | return 0; | 597 | goto pinned; |
| 536 | } | 598 | } |
| 537 | /* | 599 | /* |
| 538 | * umounted, drop rwsem again and fall through to failure | 600 | * umounted, drop rwsem again and fall through to failure |
| @@ -543,24 +605,15 @@ static int pin_sb_for_writeback(struct writeback_control *wbc, | |||
| 543 | sb->s_count--; | 605 | sb->s_count--; |
| 544 | spin_unlock(&sb_lock); | 606 | spin_unlock(&sb_lock); |
| 545 | return 1; | 607 | return 1; |
| 546 | } | 608 | pinned: |
| 547 | 609 | *psb = sb; | |
| 548 | static void unpin_sb_for_writeback(struct writeback_control *wbc, | 610 | return 0; |
| 549 | struct inode *inode) | ||
| 550 | { | ||
| 551 | struct super_block *sb = inode->i_sb; | ||
| 552 | |||
| 553 | if (wbc->sync_mode == WB_SYNC_ALL) | ||
| 554 | return; | ||
| 555 | |||
| 556 | up_read(&sb->s_umount); | ||
| 557 | put_super(sb); | ||
| 558 | } | 611 | } |
| 559 | 612 | ||
| 560 | static void writeback_inodes_wb(struct bdi_writeback *wb, | 613 | static void writeback_inodes_wb(struct bdi_writeback *wb, |
| 561 | struct writeback_control *wbc) | 614 | struct writeback_control *wbc) |
| 562 | { | 615 | { |
| 563 | struct super_block *sb = wbc->sb; | 616 | struct super_block *sb = wbc->sb, *pin_sb = NULL; |
| 564 | const int is_blkdev_sb = sb_is_blkdev_sb(sb); | 617 | const int is_blkdev_sb = sb_is_blkdev_sb(sb); |
| 565 | const unsigned long start = jiffies; /* livelock avoidance */ | 618 | const unsigned long start = jiffies; /* livelock avoidance */ |
| 566 | 619 | ||
| @@ -619,7 +672,7 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
| 619 | if (inode_dirtied_after(inode, start)) | 672 | if (inode_dirtied_after(inode, start)) |
| 620 | break; | 673 | break; |
| 621 | 674 | ||
| 622 | if (pin_sb_for_writeback(wbc, inode)) { | 675 | if (pin_sb_for_writeback(wbc, inode, &pin_sb)) { |
| 623 | requeue_io(inode); | 676 | requeue_io(inode); |
| 624 | continue; | 677 | continue; |
| 625 | } | 678 | } |
| @@ -628,7 +681,6 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
| 628 | __iget(inode); | 681 | __iget(inode); |
| 629 | pages_skipped = wbc->pages_skipped; | 682 | pages_skipped = wbc->pages_skipped; |
| 630 | writeback_single_inode(inode, wbc); | 683 | writeback_single_inode(inode, wbc); |
| 631 | unpin_sb_for_writeback(wbc, inode); | ||
| 632 | if (wbc->pages_skipped != pages_skipped) { | 684 | if (wbc->pages_skipped != pages_skipped) { |
| 633 | /* | 685 | /* |
| 634 | * writeback is not making progress due to locked | 686 | * writeback is not making progress due to locked |
| @@ -648,6 +700,8 @@ static void writeback_inodes_wb(struct bdi_writeback *wb, | |||
| 648 | wbc->more_io = 1; | 700 | wbc->more_io = 1; |
| 649 | } | 701 | } |
| 650 | 702 | ||
| 703 | unpin_sb_for_writeback(&pin_sb); | ||
| 704 | |||
| 651 | spin_unlock(&inode_lock); | 705 | spin_unlock(&inode_lock); |
| 652 | /* Leave any unwritten inodes on b_io */ | 706 | /* Leave any unwritten inodes on b_io */ |
| 653 | } | 707 | } |
| @@ -706,6 +760,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
| 706 | }; | 760 | }; |
| 707 | unsigned long oldest_jif; | 761 | unsigned long oldest_jif; |
| 708 | long wrote = 0; | 762 | long wrote = 0; |
| 763 | struct inode *inode; | ||
| 709 | 764 | ||
| 710 | if (wbc.for_kupdate) { | 765 | if (wbc.for_kupdate) { |
| 711 | wbc.older_than_this = &oldest_jif; | 766 | wbc.older_than_this = &oldest_jif; |
| @@ -719,20 +774,16 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
| 719 | 774 | ||
| 720 | for (;;) { | 775 | for (;;) { |
| 721 | /* | 776 | /* |
| 722 | * Don't flush anything for non-integrity writeback where | 777 | * Stop writeback when nr_pages has been consumed |
| 723 | * no nr_pages was given | ||
| 724 | */ | 778 | */ |
| 725 | if (!args->for_kupdate && args->nr_pages <= 0 && | 779 | if (args->nr_pages <= 0) |
| 726 | args->sync_mode == WB_SYNC_NONE) | ||
| 727 | break; | 780 | break; |
| 728 | 781 | ||
| 729 | /* | 782 | /* |
| 730 | * If no specific pages were given and this is just a | 783 | * For background writeout, stop when we are below the |
| 731 | * periodic background writeout and we are below the | 784 | * background dirty threshold |
| 732 | * background dirty threshold, don't do anything | ||
| 733 | */ | 785 | */ |
| 734 | if (args->for_kupdate && args->nr_pages <= 0 && | 786 | if (args->for_background && !over_bground_thresh()) |
| 735 | !over_bground_thresh()) | ||
| 736 | break; | 787 | break; |
| 737 | 788 | ||
| 738 | wbc.more_io = 0; | 789 | wbc.more_io = 0; |
| @@ -744,13 +795,32 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
| 744 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 795 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
| 745 | 796 | ||
| 746 | /* | 797 | /* |
| 747 | * If we ran out of stuff to write, bail unless more_io got set | 798 | * If we consumed everything, see if we have more |
| 748 | */ | 799 | */ |
| 749 | if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { | 800 | if (wbc.nr_to_write <= 0) |
| 750 | if (wbc.more_io && !wbc.for_kupdate) | 801 | continue; |
| 751 | continue; | 802 | /* |
| 803 | * Didn't write everything and we don't have more IO, bail | ||
| 804 | */ | ||
| 805 | if (!wbc.more_io) | ||
| 752 | break; | 806 | break; |
| 807 | /* | ||
| 808 | * Did we write something? Try for more | ||
| 809 | */ | ||
| 810 | if (wbc.nr_to_write < MAX_WRITEBACK_PAGES) | ||
| 811 | continue; | ||
| 812 | /* | ||
| 813 | * Nothing written. Wait for some inode to | ||
| 814 | * become available for writeback. Otherwise | ||
| 815 | * we'll just busyloop. | ||
| 816 | */ | ||
| 817 | spin_lock(&inode_lock); | ||
| 818 | if (!list_empty(&wb->b_more_io)) { | ||
| 819 | inode = list_entry(wb->b_more_io.prev, | ||
| 820 | struct inode, i_list); | ||
| 821 | inode_wait_for_writeback(inode); | ||
| 753 | } | 822 | } |
| 823 | spin_unlock(&inode_lock); | ||
| 754 | } | 824 | } |
| 755 | 825 | ||
| 756 | return wrote; | 826 | return wrote; |
| @@ -1060,9 +1130,6 @@ EXPORT_SYMBOL(__mark_inode_dirty); | |||
| 1060 | * If older_than_this is non-NULL, then only write out inodes which | 1130 | * If older_than_this is non-NULL, then only write out inodes which |
| 1061 | * had their first dirtying at a time earlier than *older_than_this. | 1131 | * had their first dirtying at a time earlier than *older_than_this. |
| 1062 | * | 1132 | * |
| 1063 | * If we're a pdlfush thread, then implement pdflush collision avoidance | ||
| 1064 | * against the entire list. | ||
| 1065 | * | ||
| 1066 | * If `bdi' is non-zero then we're being asked to writeback a specific queue. | 1133 | * If `bdi' is non-zero then we're being asked to writeback a specific queue. |
| 1067 | * This function assumes that the blockdev superblock's inodes are backed by | 1134 | * This function assumes that the blockdev superblock's inodes are backed by |
| 1068 | * a variety of queues, so all inodes are searched. For other superblocks, | 1135 | * a variety of queues, so all inodes are searched. For other superblocks, |
| @@ -1141,7 +1208,7 @@ void writeback_inodes_sb(struct super_block *sb) | |||
| 1141 | nr_to_write = nr_dirty + nr_unstable + | 1208 | nr_to_write = nr_dirty + nr_unstable + |
| 1142 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 1209 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
| 1143 | 1210 | ||
| 1144 | bdi_writeback_all(sb, nr_to_write); | 1211 | bdi_start_writeback(sb->s_bdi, sb, nr_to_write); |
| 1145 | } | 1212 | } |
| 1146 | EXPORT_SYMBOL(writeback_inodes_sb); | 1213 | EXPORT_SYMBOL(writeback_inodes_sb); |
| 1147 | 1214 | ||
