diff options
Diffstat (limited to 'fs/cifs/cifsfs.c')
-rw-r--r-- | fs/cifs/cifsfs.c | 158 |
1 files changed, 106 insertions, 52 deletions
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 682b0235ad9a..79eeccd0437f 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/seq_file.h> | 32 | #include <linux/seq_file.h> |
33 | #include <linux/vfs.h> | 33 | #include <linux/vfs.h> |
34 | #include <linux/mempool.h> | 34 | #include <linux/mempool.h> |
35 | #include <linux/delay.h> | ||
35 | #include "cifsfs.h" | 36 | #include "cifsfs.h" |
36 | #include "cifspdu.h" | 37 | #include "cifspdu.h" |
37 | #define DECLARE_GLOBALS_HERE | 38 | #define DECLARE_GLOBALS_HERE |
@@ -429,6 +430,11 @@ static void cifs_umount_begin(struct super_block * sblock) | |||
429 | { | 430 | { |
430 | cFYI(1,("wake up tasks now - umount begin not complete")); | 431 | cFYI(1,("wake up tasks now - umount begin not complete")); |
431 | wake_up_all(&tcon->ses->server->request_q); | 432 | wake_up_all(&tcon->ses->server->request_q); |
433 | wake_up_all(&tcon->ses->server->response_q); | ||
434 | msleep(1); /* yield */ | ||
435 | /* we have to kick the requests once more */ | ||
436 | wake_up_all(&tcon->ses->server->response_q); | ||
437 | msleep(1); | ||
432 | } | 438 | } |
433 | /* BB FIXME - finish add checks for tidStatus BB */ | 439 | /* BB FIXME - finish add checks for tidStatus BB */ |
434 | 440 | ||
@@ -483,56 +489,40 @@ cifs_get_sb(struct file_system_type *fs_type, | |||
483 | return sb; | 489 | return sb; |
484 | } | 490 | } |
485 | 491 | ||
486 | static ssize_t | 492 | static ssize_t cifs_file_writev(struct file *file, const struct iovec *iov, |
487 | cifs_read_wrapper(struct file * file, char __user *read_data, size_t read_size, | 493 | unsigned long nr_segs, loff_t *ppos) |
488 | loff_t * poffset) | ||
489 | { | 494 | { |
490 | if(file->f_dentry == NULL) | 495 | struct inode *inode = file->f_dentry->d_inode; |
491 | return -EIO; | 496 | ssize_t written; |
492 | else if(file->f_dentry->d_inode == NULL) | ||
493 | return -EIO; | ||
494 | |||
495 | cFYI(1,("In read_wrapper size %zd at %lld",read_size,*poffset)); | ||
496 | 497 | ||
497 | if(CIFS_I(file->f_dentry->d_inode)->clientCanCacheRead) { | 498 | written = generic_file_writev(file, iov, nr_segs, ppos); |
498 | return generic_file_read(file,read_data,read_size,poffset); | 499 | if (!CIFS_I(inode)->clientCanCacheAll) |
499 | } else { | 500 | filemap_fdatawrite(inode->i_mapping); |
500 | /* BB do we need to lock inode from here until after invalidate? */ | 501 | return written; |
501 | /* if(file->f_dentry->d_inode->i_mapping) { | ||
502 | filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); | ||
503 | filemap_fdatawait(file->f_dentry->d_inode->i_mapping); | ||
504 | }*/ | ||
505 | /* cifs_revalidate(file->f_dentry);*/ /* BB fixme */ | ||
506 | |||
507 | /* BB we should make timer configurable - perhaps | ||
508 | by simply calling cifs_revalidate here */ | ||
509 | /* invalidate_remote_inode(file->f_dentry->d_inode);*/ | ||
510 | return generic_file_read(file,read_data,read_size,poffset); | ||
511 | } | ||
512 | } | 502 | } |
513 | 503 | ||
514 | static ssize_t | 504 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const char __user *buf, |
515 | cifs_write_wrapper(struct file * file, const char __user *write_data, | 505 | size_t count, loff_t pos) |
516 | size_t write_size, loff_t * poffset) | ||
517 | { | 506 | { |
507 | struct inode *inode = iocb->ki_filp->f_dentry->d_inode; | ||
518 | ssize_t written; | 508 | ssize_t written; |
519 | 509 | ||
520 | if(file->f_dentry == NULL) | 510 | written = generic_file_aio_write(iocb, buf, count, pos); |
521 | return -EIO; | 511 | if (!CIFS_I(inode)->clientCanCacheAll) |
522 | else if(file->f_dentry->d_inode == NULL) | 512 | filemap_fdatawrite(inode->i_mapping); |
523 | return -EIO; | ||
524 | |||
525 | cFYI(1,("In write_wrapper size %zd at %lld",write_size,*poffset)); | ||
526 | |||
527 | written = generic_file_write(file,write_data,write_size,poffset); | ||
528 | if(!CIFS_I(file->f_dentry->d_inode)->clientCanCacheAll) { | ||
529 | if(file->f_dentry->d_inode->i_mapping) { | ||
530 | filemap_fdatawrite(file->f_dentry->d_inode->i_mapping); | ||
531 | } | ||
532 | } | ||
533 | return written; | 513 | return written; |
534 | } | 514 | } |
535 | 515 | ||
516 | static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) | ||
517 | { | ||
518 | /* origin == SEEK_END => we must revalidate the cached file length */ | ||
519 | if (origin == 2) { | ||
520 | int retval = cifs_revalidate(file->f_dentry); | ||
521 | if (retval < 0) | ||
522 | return (loff_t)retval; | ||
523 | } | ||
524 | return remote_llseek(file, offset, origin); | ||
525 | } | ||
536 | 526 | ||
537 | static struct file_system_type cifs_fs_type = { | 527 | static struct file_system_type cifs_fs_type = { |
538 | .owner = THIS_MODULE, | 528 | .owner = THIS_MODULE, |
@@ -594,8 +584,12 @@ struct inode_operations cifs_symlink_inode_ops = { | |||
594 | }; | 584 | }; |
595 | 585 | ||
596 | struct file_operations cifs_file_ops = { | 586 | struct file_operations cifs_file_ops = { |
597 | .read = cifs_read_wrapper, | 587 | .read = do_sync_read, |
598 | .write = cifs_write_wrapper, | 588 | .write = do_sync_write, |
589 | .readv = generic_file_readv, | ||
590 | .writev = cifs_file_writev, | ||
591 | .aio_read = generic_file_aio_read, | ||
592 | .aio_write = cifs_file_aio_write, | ||
599 | .open = cifs_open, | 593 | .open = cifs_open, |
600 | .release = cifs_close, | 594 | .release = cifs_close, |
601 | .lock = cifs_lock, | 595 | .lock = cifs_lock, |
@@ -603,15 +597,12 @@ struct file_operations cifs_file_ops = { | |||
603 | .flush = cifs_flush, | 597 | .flush = cifs_flush, |
604 | .mmap = cifs_file_mmap, | 598 | .mmap = cifs_file_mmap, |
605 | .sendfile = generic_file_sendfile, | 599 | .sendfile = generic_file_sendfile, |
600 | .llseek = cifs_llseek, | ||
606 | #ifdef CONFIG_CIFS_POSIX | 601 | #ifdef CONFIG_CIFS_POSIX |
607 | .ioctl = cifs_ioctl, | 602 | .ioctl = cifs_ioctl, |
608 | #endif /* CONFIG_CIFS_POSIX */ | 603 | #endif /* CONFIG_CIFS_POSIX */ |
609 | 604 | ||
610 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 605 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
611 | .readv = generic_file_readv, | ||
612 | .writev = generic_file_writev, | ||
613 | .aio_read = generic_file_aio_read, | ||
614 | .aio_write = generic_file_aio_write, | ||
615 | .dir_notify = cifs_dir_notify, | 606 | .dir_notify = cifs_dir_notify, |
616 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | 607 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ |
617 | }; | 608 | }; |
@@ -630,12 +621,53 @@ struct file_operations cifs_file_direct_ops = { | |||
630 | #ifdef CONFIG_CIFS_POSIX | 621 | #ifdef CONFIG_CIFS_POSIX |
631 | .ioctl = cifs_ioctl, | 622 | .ioctl = cifs_ioctl, |
632 | #endif /* CONFIG_CIFS_POSIX */ | 623 | #endif /* CONFIG_CIFS_POSIX */ |
624 | .llseek = cifs_llseek, | ||
625 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
626 | .dir_notify = cifs_dir_notify, | ||
627 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
628 | }; | ||
629 | struct file_operations cifs_file_nobrl_ops = { | ||
630 | .read = do_sync_read, | ||
631 | .write = do_sync_write, | ||
632 | .readv = generic_file_readv, | ||
633 | .writev = cifs_file_writev, | ||
634 | .aio_read = generic_file_aio_read, | ||
635 | .aio_write = cifs_file_aio_write, | ||
636 | .open = cifs_open, | ||
637 | .release = cifs_close, | ||
638 | .fsync = cifs_fsync, | ||
639 | .flush = cifs_flush, | ||
640 | .mmap = cifs_file_mmap, | ||
641 | .sendfile = generic_file_sendfile, | ||
642 | .llseek = cifs_llseek, | ||
643 | #ifdef CONFIG_CIFS_POSIX | ||
644 | .ioctl = cifs_ioctl, | ||
645 | #endif /* CONFIG_CIFS_POSIX */ | ||
633 | 646 | ||
634 | #ifdef CONFIG_CIFS_EXPERIMENTAL | 647 | #ifdef CONFIG_CIFS_EXPERIMENTAL |
635 | .dir_notify = cifs_dir_notify, | 648 | .dir_notify = cifs_dir_notify, |
636 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | 649 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ |
637 | }; | 650 | }; |
638 | 651 | ||
652 | struct file_operations cifs_file_direct_nobrl_ops = { | ||
653 | /* no mmap, no aio, no readv - | ||
654 | BB reevaluate whether they can be done with directio, no cache */ | ||
655 | .read = cifs_user_read, | ||
656 | .write = cifs_user_write, | ||
657 | .open = cifs_open, | ||
658 | .release = cifs_close, | ||
659 | .fsync = cifs_fsync, | ||
660 | .flush = cifs_flush, | ||
661 | .sendfile = generic_file_sendfile, /* BB removeme BB */ | ||
662 | #ifdef CONFIG_CIFS_POSIX | ||
663 | .ioctl = cifs_ioctl, | ||
664 | #endif /* CONFIG_CIFS_POSIX */ | ||
665 | .llseek = cifs_llseek, | ||
666 | #ifdef CONFIG_CIFS_EXPERIMENTAL | ||
667 | .dir_notify = cifs_dir_notify, | ||
668 | #endif /* CONFIG_CIFS_EXPERIMENTAL */ | ||
669 | }; | ||
670 | |||
639 | struct file_operations cifs_dir_ops = { | 671 | struct file_operations cifs_dir_ops = { |
640 | .readdir = cifs_readdir, | 672 | .readdir = cifs_readdir, |
641 | .release = cifs_closedir, | 673 | .release = cifs_closedir, |
@@ -714,7 +746,7 @@ cifs_init_request_bufs(void) | |||
714 | kmem_cache_destroy(cifs_req_cachep); | 746 | kmem_cache_destroy(cifs_req_cachep); |
715 | return -ENOMEM; | 747 | return -ENOMEM; |
716 | } | 748 | } |
717 | /* 256 (MAX_CIFS_HDR_SIZE bytes is enough for most SMB responses and | 749 | /* MAX_CIFS_SMALL_BUFFER_SIZE bytes is enough for most SMB responses and |
718 | almost all handle based requests (but not write response, nor is it | 750 | almost all handle based requests (but not write response, nor is it |
719 | sufficient for path based requests). A smaller size would have | 751 | sufficient for path based requests). A smaller size would have |
720 | been more efficient (compacting multiple slab items on one 4k page) | 752 | been more efficient (compacting multiple slab items on one 4k page) |
@@ -723,7 +755,8 @@ cifs_init_request_bufs(void) | |||
723 | efficient to alloc 1 per page off the slab compared to 17K (5page) | 755 | efficient to alloc 1 per page off the slab compared to 17K (5page) |
724 | alloc of large cifs buffers even when page debugging is on */ | 756 | alloc of large cifs buffers even when page debugging is on */ |
725 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", | 757 | cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq", |
726 | MAX_CIFS_HDR_SIZE, 0, SLAB_HWCACHE_ALIGN, NULL, NULL); | 758 | MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN, |
759 | NULL, NULL); | ||
727 | if (cifs_sm_req_cachep == NULL) { | 760 | if (cifs_sm_req_cachep == NULL) { |
728 | mempool_destroy(cifs_req_poolp); | 761 | mempool_destroy(cifs_req_poolp); |
729 | kmem_cache_destroy(cifs_req_cachep); | 762 | kmem_cache_destroy(cifs_req_cachep); |
@@ -841,9 +874,9 @@ static int cifs_oplock_thread(void * dummyarg) | |||
841 | DeleteOplockQEntry(oplock_item); | 874 | DeleteOplockQEntry(oplock_item); |
842 | /* can not grab inode sem here since it would | 875 | /* can not grab inode sem here since it would |
843 | deadlock when oplock received on delete | 876 | deadlock when oplock received on delete |
844 | since vfs_unlink holds the i_sem across | 877 | since vfs_unlink holds the i_mutex across |
845 | the call */ | 878 | the call */ |
846 | /* down(&inode->i_sem);*/ | 879 | /* mutex_lock(&inode->i_mutex);*/ |
847 | if (S_ISREG(inode->i_mode)) { | 880 | if (S_ISREG(inode->i_mode)) { |
848 | rc = filemap_fdatawrite(inode->i_mapping); | 881 | rc = filemap_fdatawrite(inode->i_mapping); |
849 | if(CIFS_I(inode)->clientCanCacheRead == 0) { | 882 | if(CIFS_I(inode)->clientCanCacheRead == 0) { |
@@ -852,7 +885,7 @@ static int cifs_oplock_thread(void * dummyarg) | |||
852 | } | 885 | } |
853 | } else | 886 | } else |
854 | rc = 0; | 887 | rc = 0; |
855 | /* up(&inode->i_sem);*/ | 888 | /* mutex_unlock(&inode->i_mutex);*/ |
856 | if (rc) | 889 | if (rc) |
857 | CIFS_I(inode)->write_behind_rc = rc; | 890 | CIFS_I(inode)->write_behind_rc = rc; |
858 | cFYI(1,("Oplock flush inode %p rc %d",inode,rc)); | 891 | cFYI(1,("Oplock flush inode %p rc %d",inode,rc)); |
@@ -882,6 +915,9 @@ static int cifs_oplock_thread(void * dummyarg) | |||
882 | 915 | ||
883 | static int cifs_dnotify_thread(void * dummyarg) | 916 | static int cifs_dnotify_thread(void * dummyarg) |
884 | { | 917 | { |
918 | struct list_head *tmp; | ||
919 | struct cifsSesInfo *ses; | ||
920 | |||
885 | daemonize("cifsdnotifyd"); | 921 | daemonize("cifsdnotifyd"); |
886 | allow_signal(SIGTERM); | 922 | allow_signal(SIGTERM); |
887 | 923 | ||
@@ -890,7 +926,19 @@ static int cifs_dnotify_thread(void * dummyarg) | |||
890 | if(try_to_freeze()) | 926 | if(try_to_freeze()) |
891 | continue; | 927 | continue; |
892 | set_current_state(TASK_INTERRUPTIBLE); | 928 | set_current_state(TASK_INTERRUPTIBLE); |
893 | schedule_timeout(39*HZ); | 929 | schedule_timeout(15*HZ); |
930 | read_lock(&GlobalSMBSeslock); | ||
931 | /* check if any stuck requests that need | ||
932 | to be woken up and wakeq so the | ||
933 | thread can wake up and error out */ | ||
934 | list_for_each(tmp, &GlobalSMBSessionList) { | ||
935 | ses = list_entry(tmp, struct cifsSesInfo, | ||
936 | cifsSessionList); | ||
937 | if(ses && ses->server && | ||
938 | atomic_read(&ses->server->inFlight)) | ||
939 | wake_up_all(&ses->server->response_q); | ||
940 | } | ||
941 | read_unlock(&GlobalSMBSeslock); | ||
894 | } while(!signal_pending(current)); | 942 | } while(!signal_pending(current)); |
895 | complete_and_exit (&cifs_dnotify_exited, 0); | 943 | complete_and_exit (&cifs_dnotify_exited, 0); |
896 | } | 944 | } |
@@ -920,6 +968,12 @@ init_cifs(void) | |||
920 | atomic_set(&tconInfoReconnectCount, 0); | 968 | atomic_set(&tconInfoReconnectCount, 0); |
921 | 969 | ||
922 | atomic_set(&bufAllocCount, 0); | 970 | atomic_set(&bufAllocCount, 0); |
971 | atomic_set(&smBufAllocCount, 0); | ||
972 | #ifdef CONFIG_CIFS_STATS2 | ||
973 | atomic_set(&totBufAllocCount, 0); | ||
974 | atomic_set(&totSmBufAllocCount, 0); | ||
975 | #endif /* CONFIG_CIFS_STATS2 */ | ||
976 | |||
923 | atomic_set(&midCount, 0); | 977 | atomic_set(&midCount, 0); |
924 | GlobalCurrentXid = 0; | 978 | GlobalCurrentXid = 0; |
925 | GlobalTotalActiveXid = 0; | 979 | GlobalTotalActiveXid = 0; |