diff options
author | Jeff Layton <jlayton@redhat.com> | 2009-09-21 06:47:50 -0400 |
---|---|---|
committer | Steve French <sfrench@us.ibm.com> | 2009-09-24 14:33:18 -0400 |
commit | 3bc303c254335dbd7c7012cc1760b12f1d5514d3 (patch) | |
tree | 7da17fbfd697216d9ed0ccd64ea9c03aaf3d52c1 /fs/cifs/transport.c | |
parent | 48541bd3dd4739b4d574b44ea47660c88d833677 (diff) |
cifs: convert oplock breaks to use slow_work facility (try #4)
This is the fourth respin of the patch to convert oplock breaks to
use the slow_work facility.
A customer of ours was testing a backport of one of the earlier
patchsets, and hit a "Busy inodes after umount..." problem. An oplock
break job had raced with a umount, and the superblock got torn down and
its memory reused. When the oplock break job tried to dereference the
inode->i_sb, the kernel oopsed.
This patchset has the oplock break job hold an inode and vfsmount
reference until the oplock break completes. With this, there should be
no need to take a tcon reference (the vfsmount implicitly holds one
already).
Currently, when an oplock break comes in there's a chance that the
oplock break job won't occur if the allocation of the oplock_q_entry
fails. There are also some rather nasty races in the allocation and
handling these structs.
Rather than allocating oplock queue entries when an oplock break comes
in, add a few extra fields to the cifsFileInfo struct. Get rid of the
dedicated cifs_oplock_thread as well and queue the oplock break job to
the slow_work thread pool.
This approach also has the advantage that the oplock break jobs can
potentially run in parallel rather than be serialized like they are
today.
Signed-off-by: Jeff Layton <jlayton@redhat.com>
Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/transport.c')
-rw-r--r-- | fs/cifs/transport.c | 50 |
1 files changed, 0 insertions, 50 deletions
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index 1da4ab250eae..07b8e71544ee 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -103,56 +103,6 @@ DeleteMidQEntry(struct mid_q_entry *midEntry) | |||
103 | mempool_free(midEntry, cifs_mid_poolp); | 103 | mempool_free(midEntry, cifs_mid_poolp); |
104 | } | 104 | } |
105 | 105 | ||
106 | struct oplock_q_entry * | ||
107 | AllocOplockQEntry(struct inode *pinode, __u16 fid, struct cifsTconInfo *tcon) | ||
108 | { | ||
109 | struct oplock_q_entry *temp; | ||
110 | if ((pinode == NULL) || (tcon == NULL)) { | ||
111 | cERROR(1, ("Null parms passed to AllocOplockQEntry")); | ||
112 | return NULL; | ||
113 | } | ||
114 | temp = (struct oplock_q_entry *) kmem_cache_alloc(cifs_oplock_cachep, | ||
115 | GFP_KERNEL); | ||
116 | if (temp == NULL) | ||
117 | return temp; | ||
118 | else { | ||
119 | temp->pinode = pinode; | ||
120 | temp->tcon = tcon; | ||
121 | temp->netfid = fid; | ||
122 | spin_lock(&cifs_oplock_lock); | ||
123 | list_add_tail(&temp->qhead, &cifs_oplock_list); | ||
124 | spin_unlock(&cifs_oplock_lock); | ||
125 | } | ||
126 | return temp; | ||
127 | } | ||
128 | |||
129 | void DeleteOplockQEntry(struct oplock_q_entry *oplockEntry) | ||
130 | { | ||
131 | spin_lock(&cifs_oplock_lock); | ||
132 | /* should we check if list empty first? */ | ||
133 | list_del(&oplockEntry->qhead); | ||
134 | spin_unlock(&cifs_oplock_lock); | ||
135 | kmem_cache_free(cifs_oplock_cachep, oplockEntry); | ||
136 | } | ||
137 | |||
138 | |||
139 | void DeleteTconOplockQEntries(struct cifsTconInfo *tcon) | ||
140 | { | ||
141 | struct oplock_q_entry *temp; | ||
142 | |||
143 | if (tcon == NULL) | ||
144 | return; | ||
145 | |||
146 | spin_lock(&cifs_oplock_lock); | ||
147 | list_for_each_entry(temp, &cifs_oplock_list, qhead) { | ||
148 | if ((temp->tcon) && (temp->tcon == tcon)) { | ||
149 | list_del(&temp->qhead); | ||
150 | kmem_cache_free(cifs_oplock_cachep, temp); | ||
151 | } | ||
152 | } | ||
153 | spin_unlock(&cifs_oplock_lock); | ||
154 | } | ||
155 | |||
156 | static int | 106 | static int |
157 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) | 107 | smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec) |
158 | { | 108 | { |