aboutsummaryrefslogtreecommitdiffstats
path: root/fs/cifs/misc.c
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2009-09-21 06:47:50 -0400
committerSteve French <sfrench@us.ibm.com>2009-09-24 14:33:18 -0400
commit3bc303c254335dbd7c7012cc1760b12f1d5514d3 (patch)
tree7da17fbfd697216d9ed0ccd64ea9c03aaf3d52c1 /fs/cifs/misc.c
parent48541bd3dd4739b4d574b44ea47660c88d833677 (diff)
cifs: convert oplock breaks to use slow_work facility (try #4)
This is the fourth respin of the patch to convert oplock breaks to use the slow_work facility. A customer of ours was testing a backport of one of the earlier patchsets, and hit a "Busy inodes after umount..." problem. An oplock break job had raced with a umount, and the superblock got torn down and its memory reused. When the oplock break job tried to dereference the inode->i_sb, the kernel oopsed. This patchset has the oplock break job hold an inode and vfsmount reference until the oplock break completes. With this, there should be no need to take a tcon reference (the vfsmount implicitly holds one already). Currently, when an oplock break comes in there's a chance that the oplock break job won't occur if the allocation of the oplock_q_entry fails. There are also some rather nasty races in the allocation and handling these structs. Rather than allocating oplock queue entries when an oplock break comes in, add a few extra fields to the cifsFileInfo struct. Get rid of the dedicated cifs_oplock_thread as well and queue the oplock break job to the slow_work thread pool. This approach also has the advantage that the oplock break jobs can potentially run in parallel rather than be serialized like they are today. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <sfrench@us.ibm.com>
Diffstat (limited to 'fs/cifs/misc.c')
-rw-r--r--fs/cifs/misc.c29
1 files changed, 20 insertions, 9 deletions
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 191e6220bc76..0241b25ac33f 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -32,7 +32,6 @@
32 32
33extern mempool_t *cifs_sm_req_poolp; 33extern mempool_t *cifs_sm_req_poolp;
34extern mempool_t *cifs_req_poolp; 34extern mempool_t *cifs_req_poolp;
35extern struct task_struct *oplockThread;
36 35
37/* The xid serves as a useful identifier for each incoming vfs request, 36/* The xid serves as a useful identifier for each incoming vfs request,
38 in a similar way to the mid which is useful to track each sent smb, 37 in a similar way to the mid which is useful to track each sent smb,
@@ -500,6 +499,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
500 struct cifsTconInfo *tcon; 499 struct cifsTconInfo *tcon;
501 struct cifsInodeInfo *pCifsInode; 500 struct cifsInodeInfo *pCifsInode;
502 struct cifsFileInfo *netfile; 501 struct cifsFileInfo *netfile;
502 int rc;
503 503
504 cFYI(1, ("Checking for oplock break or dnotify response")); 504 cFYI(1, ("Checking for oplock break or dnotify response"));
505 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) && 505 if ((pSMB->hdr.Command == SMB_COM_NT_TRANSACT) &&
@@ -569,19 +569,30 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
569 if (pSMB->Fid != netfile->netfid) 569 if (pSMB->Fid != netfile->netfid)
570 continue; 570 continue;
571 571
572 read_unlock(&GlobalSMBSeslock); 572 /*
573 read_unlock(&cifs_tcp_ses_lock); 573 * don't do anything if file is about to be
574 * closed anyway.
575 */
576 if (netfile->closePend) {
577 read_unlock(&GlobalSMBSeslock);
578 read_unlock(&cifs_tcp_ses_lock);
579 return true;
580 }
581
574 cFYI(1, ("file id match, oplock break")); 582 cFYI(1, ("file id match, oplock break"));
575 pCifsInode = CIFS_I(netfile->pInode); 583 pCifsInode = CIFS_I(netfile->pInode);
576 pCifsInode->clientCanCacheAll = false; 584 pCifsInode->clientCanCacheAll = false;
577 if (pSMB->OplockLevel == 0) 585 if (pSMB->OplockLevel == 0)
578 pCifsInode->clientCanCacheRead = false; 586 pCifsInode->clientCanCacheRead = false;
579 AllocOplockQEntry(netfile->pInode, 587 rc = slow_work_enqueue(&netfile->oplock_break);
580 netfile->netfid, tcon); 588 if (rc) {
581 cFYI(1, ("about to wake up oplock thread")); 589 cERROR(1, ("failed to enqueue oplock "
582 if (oplockThread) 590 "break: %d\n", rc));
583 wake_up_process(oplockThread); 591 } else {
584 592 netfile->oplock_break_cancelled = false;
593 }
594 read_unlock(&GlobalSMBSeslock);
595 read_unlock(&cifs_tcp_ses_lock);
585 return true; 596 return true;
586 } 597 }
587 read_unlock(&GlobalSMBSeslock); 598 read_unlock(&GlobalSMBSeslock);