diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 13:45:01 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 13:45:01 -0500 |
commit | 275220f0fcff1adf28a717076e00f575edf05fda (patch) | |
tree | d249bccc80c64443dab211639050c4fb14332648 /fs | |
parent | fe3c560b8a22cb28e54fe8950abef38e88d75831 (diff) | |
parent | 81c5e2ae33c4b19e53966b427e33646bf6811830 (diff) |
Merge branch 'for-2.6.38/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.38/core' of git://git.kernel.dk/linux-2.6-block: (43 commits)
block: ensure that completion error gets properly traced
blktrace: add missing probe argument to block_bio_complete
block cfq: don't use atomic_t for cfq_group
block cfq: don't use atomic_t for cfq_queue
block: trace event block fix unassigned field
block: add internal hd part table references
block: fix accounting bug on cross partition merges
kref: add kref_test_and_get
bio-integrity: mark kintegrityd_wq highpri and CPU intensive
block: make kblockd_workqueue smarter
Revert "sd: implement sd_check_events()"
block: Clean up exit_io_context() source code.
Fix compile warnings due to missing removal of a 'ret' variable
fs/block: type signature of major_to_index(int) to major_to_index(unsigned)
block: convert !IS_ERR(p) && p to !IS_ERR_NOR_NULL(p)
cfq-iosched: don't check cfqg in choose_service_tree()
fs/splice: Pull buf->ops->confirm() from splice_from_pipe actors
cdrom: export cdrom_check_events()
sd: implement sd_check_events()
sr: implement sr_check_events()
...
Diffstat (limited to 'fs')
-rw-r--r-- | fs/bio-integrity.c | 7 | ||||
-rw-r--r-- | fs/block_dev.c | 741 | ||||
-rw-r--r-- | fs/btrfs/volumes.c | 28 | ||||
-rw-r--r-- | fs/btrfs/volumes.h | 2 | ||||
-rw-r--r-- | fs/char_dev.c | 2 | ||||
-rw-r--r-- | fs/ext3/super.c | 12 | ||||
-rw-r--r-- | fs/ext4/super.c | 12 | ||||
-rw-r--r-- | fs/gfs2/ops_fstype.c | 8 | ||||
-rw-r--r-- | fs/jfs/jfs_logmgr.c | 17 | ||||
-rw-r--r-- | fs/logfs/dev_bdev.c | 7 | ||||
-rw-r--r-- | fs/nfsd/vfs.c | 5 | ||||
-rw-r--r-- | fs/nilfs2/super.c | 8 | ||||
-rw-r--r-- | fs/ocfs2/cluster/heartbeat.c | 2 | ||||
-rw-r--r-- | fs/partitions/check.c | 106 | ||||
-rw-r--r-- | fs/reiserfs/journal.c | 21 | ||||
-rw-r--r-- | fs/splice.c | 43 | ||||
-rw-r--r-- | fs/super.c | 19 | ||||
-rw-r--r-- | fs/xfs/linux-2.6/xfs_super.c | 5 |
18 files changed, 347 insertions, 698 deletions
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 4d0ff5ee27b8..e49cce234c65 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c | |||
@@ -782,7 +782,12 @@ void __init bio_integrity_init(void) | |||
782 | { | 782 | { |
783 | unsigned int i; | 783 | unsigned int i; |
784 | 784 | ||
785 | kintegrityd_wq = create_workqueue("kintegrityd"); | 785 | /* |
786 | * kintegrityd won't block much but may burn a lot of CPU cycles. | ||
787 | * Make it highpri CPU intensive wq with max concurrency of 1. | ||
788 | */ | ||
789 | kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM | | ||
790 | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); | ||
786 | if (!kintegrityd_wq) | 791 | if (!kintegrityd_wq) |
787 | panic("Failed to create kintegrityd\n"); | 792 | panic("Failed to create kintegrityd\n"); |
788 | 793 | ||
diff --git a/fs/block_dev.c b/fs/block_dev.c index 88da70355aa3..fe3f59c14a02 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
@@ -432,9 +432,6 @@ static void init_once(void *foo) | |||
432 | mutex_init(&bdev->bd_mutex); | 432 | mutex_init(&bdev->bd_mutex); |
433 | INIT_LIST_HEAD(&bdev->bd_inodes); | 433 | INIT_LIST_HEAD(&bdev->bd_inodes); |
434 | INIT_LIST_HEAD(&bdev->bd_list); | 434 | INIT_LIST_HEAD(&bdev->bd_list); |
435 | #ifdef CONFIG_SYSFS | ||
436 | INIT_LIST_HEAD(&bdev->bd_holder_list); | ||
437 | #endif | ||
438 | inode_init_once(&ei->vfs_inode); | 435 | inode_init_once(&ei->vfs_inode); |
439 | /* Initialize mutex for freeze. */ | 436 | /* Initialize mutex for freeze. */ |
440 | mutex_init(&bdev->bd_fsfreeze_mutex); | 437 | mutex_init(&bdev->bd_fsfreeze_mutex); |
@@ -669,7 +666,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole, | |||
669 | else if (bdev->bd_contains == bdev) | 666 | else if (bdev->bd_contains == bdev) |
670 | return true; /* is a whole device which isn't held */ | 667 | return true; /* is a whole device which isn't held */ |
671 | 668 | ||
672 | else if (whole->bd_holder == bd_claim) | 669 | else if (whole->bd_holder == bd_may_claim) |
673 | return true; /* is a partition of a device that is being partitioned */ | 670 | return true; /* is a partition of a device that is being partitioned */ |
674 | else if (whole->bd_holder != NULL) | 671 | else if (whole->bd_holder != NULL) |
675 | return false; /* is a partition of a held device */ | 672 | return false; /* is a partition of a held device */ |
@@ -781,439 +778,87 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
781 | } | 778 | } |
782 | } | 779 | } |
783 | 780 | ||
784 | /* releases bdev_lock */ | ||
785 | static void __bd_abort_claiming(struct block_device *whole, void *holder) | ||
786 | { | ||
787 | BUG_ON(whole->bd_claiming != holder); | ||
788 | whole->bd_claiming = NULL; | ||
789 | wake_up_bit(&whole->bd_claiming, 0); | ||
790 | |||
791 | spin_unlock(&bdev_lock); | ||
792 | bdput(whole); | ||
793 | } | ||
794 | |||
795 | /** | ||
796 | * bd_abort_claiming - abort claiming a block device | ||
797 | * @whole: whole block device returned by bd_start_claiming() | ||
798 | * @holder: holder trying to claim @bdev | ||
799 | * | ||
800 | * Abort a claiming block started by bd_start_claiming(). Note that | ||
801 | * @whole is not the block device to be claimed but the whole device | ||
802 | * returned by bd_start_claiming(). | ||
803 | * | ||
804 | * CONTEXT: | ||
805 | * Grabs and releases bdev_lock. | ||
806 | */ | ||
807 | static void bd_abort_claiming(struct block_device *whole, void *holder) | ||
808 | { | ||
809 | spin_lock(&bdev_lock); | ||
810 | __bd_abort_claiming(whole, holder); /* releases bdev_lock */ | ||
811 | } | ||
812 | |||
813 | /* increment holders when we have a legitimate claim. requires bdev_lock */ | ||
814 | static void __bd_claim(struct block_device *bdev, struct block_device *whole, | ||
815 | void *holder) | ||
816 | { | ||
817 | /* note that for a whole device bd_holders | ||
818 | * will be incremented twice, and bd_holder will | ||
819 | * be set to bd_claim before being set to holder | ||
820 | */ | ||
821 | whole->bd_holders++; | ||
822 | whole->bd_holder = bd_claim; | ||
823 | bdev->bd_holders++; | ||
824 | bdev->bd_holder = holder; | ||
825 | } | ||
826 | |||
827 | /** | ||
828 | * bd_finish_claiming - finish claiming a block device | ||
829 | * @bdev: block device of interest (passed to bd_start_claiming()) | ||
830 | * @whole: whole block device returned by bd_start_claiming() | ||
831 | * @holder: holder trying to claim @bdev | ||
832 | * | ||
833 | * Finish a claiming block started by bd_start_claiming(). | ||
834 | * | ||
835 | * CONTEXT: | ||
836 | * Grabs and releases bdev_lock. | ||
837 | */ | ||
838 | static void bd_finish_claiming(struct block_device *bdev, | ||
839 | struct block_device *whole, void *holder) | ||
840 | { | ||
841 | spin_lock(&bdev_lock); | ||
842 | BUG_ON(!bd_may_claim(bdev, whole, holder)); | ||
843 | __bd_claim(bdev, whole, holder); | ||
844 | __bd_abort_claiming(whole, holder); /* not actually an abort */ | ||
845 | } | ||
846 | |||
847 | /** | ||
848 | * bd_claim - claim a block device | ||
849 | * @bdev: block device to claim | ||
850 | * @holder: holder trying to claim @bdev | ||
851 | * | ||
852 | * Try to claim @bdev which must have been opened successfully. | ||
853 | * | ||
854 | * CONTEXT: | ||
855 | * Might sleep. | ||
856 | * | ||
857 | * RETURNS: | ||
858 | * 0 if successful, -EBUSY if @bdev is already claimed. | ||
859 | */ | ||
860 | int bd_claim(struct block_device *bdev, void *holder) | ||
861 | { | ||
862 | struct block_device *whole = bdev->bd_contains; | ||
863 | int res; | ||
864 | |||
865 | might_sleep(); | ||
866 | |||
867 | spin_lock(&bdev_lock); | ||
868 | res = bd_prepare_to_claim(bdev, whole, holder); | ||
869 | if (res == 0) | ||
870 | __bd_claim(bdev, whole, holder); | ||
871 | spin_unlock(&bdev_lock); | ||
872 | |||
873 | return res; | ||
874 | } | ||
875 | EXPORT_SYMBOL(bd_claim); | ||
876 | |||
877 | void bd_release(struct block_device *bdev) | ||
878 | { | ||
879 | spin_lock(&bdev_lock); | ||
880 | if (!--bdev->bd_contains->bd_holders) | ||
881 | bdev->bd_contains->bd_holder = NULL; | ||
882 | if (!--bdev->bd_holders) | ||
883 | bdev->bd_holder = NULL; | ||
884 | spin_unlock(&bdev_lock); | ||
885 | } | ||
886 | |||
887 | EXPORT_SYMBOL(bd_release); | ||
888 | |||
889 | #ifdef CONFIG_SYSFS | 781 | #ifdef CONFIG_SYSFS |
890 | /* | ||
891 | * Functions for bd_claim_by_kobject / bd_release_from_kobject | ||
892 | * | ||
893 | * If a kobject is passed to bd_claim_by_kobject() | ||
894 | * and the kobject has a parent directory, | ||
895 | * following symlinks are created: | ||
896 | * o from the kobject to the claimed bdev | ||
897 | * o from "holders" directory of the bdev to the parent of the kobject | ||
898 | * bd_release_from_kobject() removes these symlinks. | ||
899 | * | ||
900 | * Example: | ||
901 | * If /dev/dm-0 maps to /dev/sda, kobject corresponding to | ||
902 | * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then: | ||
903 | * /sys/block/dm-0/slaves/sda --> /sys/block/sda | ||
904 | * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 | ||
905 | */ | ||
906 | |||
907 | static int add_symlink(struct kobject *from, struct kobject *to) | 782 | static int add_symlink(struct kobject *from, struct kobject *to) |
908 | { | 783 | { |
909 | if (!from || !to) | ||
910 | return 0; | ||
911 | return sysfs_create_link(from, to, kobject_name(to)); | 784 | return sysfs_create_link(from, to, kobject_name(to)); |
912 | } | 785 | } |
913 | 786 | ||
914 | static void del_symlink(struct kobject *from, struct kobject *to) | 787 | static void del_symlink(struct kobject *from, struct kobject *to) |
915 | { | 788 | { |
916 | if (!from || !to) | ||
917 | return; | ||
918 | sysfs_remove_link(from, kobject_name(to)); | 789 | sysfs_remove_link(from, kobject_name(to)); |
919 | } | 790 | } |
920 | 791 | ||
921 | /* | ||
922 | * 'struct bd_holder' contains pointers to kobjects symlinked by | ||
923 | * bd_claim_by_kobject. | ||
924 | * It's connected to bd_holder_list which is protected by bdev->bd_sem. | ||
925 | */ | ||
926 | struct bd_holder { | ||
927 | struct list_head list; /* chain of holders of the bdev */ | ||
928 | int count; /* references from the holder */ | ||
929 | struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */ | ||
930 | struct kobject *hdev; /* e.g. "/block/dm-0" */ | ||
931 | struct kobject *hdir; /* e.g. "/block/sda/holders" */ | ||
932 | struct kobject *sdev; /* e.g. "/block/sda" */ | ||
933 | }; | ||
934 | |||
935 | /* | ||
936 | * Get references of related kobjects at once. | ||
937 | * Returns 1 on success. 0 on failure. | ||
938 | * | ||
939 | * Should call bd_holder_release_dirs() after successful use. | ||
940 | */ | ||
941 | static int bd_holder_grab_dirs(struct block_device *bdev, | ||
942 | struct bd_holder *bo) | ||
943 | { | ||
944 | if (!bdev || !bo) | ||
945 | return 0; | ||
946 | |||
947 | bo->sdir = kobject_get(bo->sdir); | ||
948 | if (!bo->sdir) | ||
949 | return 0; | ||
950 | |||
951 | bo->hdev = kobject_get(bo->sdir->parent); | ||
952 | if (!bo->hdev) | ||
953 | goto fail_put_sdir; | ||
954 | |||
955 | bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj); | ||
956 | if (!bo->sdev) | ||
957 | goto fail_put_hdev; | ||
958 | |||
959 | bo->hdir = kobject_get(bdev->bd_part->holder_dir); | ||
960 | if (!bo->hdir) | ||
961 | goto fail_put_sdev; | ||
962 | |||
963 | return 1; | ||
964 | |||
965 | fail_put_sdev: | ||
966 | kobject_put(bo->sdev); | ||
967 | fail_put_hdev: | ||
968 | kobject_put(bo->hdev); | ||
969 | fail_put_sdir: | ||
970 | kobject_put(bo->sdir); | ||
971 | |||
972 | return 0; | ||
973 | } | ||
974 | |||
975 | /* Put references of related kobjects at once. */ | ||
976 | static void bd_holder_release_dirs(struct bd_holder *bo) | ||
977 | { | ||
978 | kobject_put(bo->hdir); | ||
979 | kobject_put(bo->sdev); | ||
980 | kobject_put(bo->hdev); | ||
981 | kobject_put(bo->sdir); | ||
982 | } | ||
983 | |||
984 | static struct bd_holder *alloc_bd_holder(struct kobject *kobj) | ||
985 | { | ||
986 | struct bd_holder *bo; | ||
987 | |||
988 | bo = kzalloc(sizeof(*bo), GFP_KERNEL); | ||
989 | if (!bo) | ||
990 | return NULL; | ||
991 | |||
992 | bo->count = 1; | ||
993 | bo->sdir = kobj; | ||
994 | |||
995 | return bo; | ||
996 | } | ||
997 | |||
998 | static void free_bd_holder(struct bd_holder *bo) | ||
999 | { | ||
1000 | kfree(bo); | ||
1001 | } | ||
1002 | |||
1003 | /** | 792 | /** |
1004 | * find_bd_holder - find matching struct bd_holder from the block device | 793 | * bd_link_disk_holder - create symlinks between holding disk and slave bdev |
794 | * @bdev: the claimed slave bdev | ||
795 | * @disk: the holding disk | ||
1005 | * | 796 | * |
1006 | * @bdev: struct block device to be searched | 797 | * This functions creates the following sysfs symlinks. |
1007 | * @bo: target struct bd_holder | ||
1008 | * | ||
1009 | * Returns matching entry with @bo in @bdev->bd_holder_list. | ||
1010 | * If found, increment the reference count and return the pointer. | ||
1011 | * If not found, returns NULL. | ||
1012 | */ | ||
1013 | static struct bd_holder *find_bd_holder(struct block_device *bdev, | ||
1014 | struct bd_holder *bo) | ||
1015 | { | ||
1016 | struct bd_holder *tmp; | ||
1017 | |||
1018 | list_for_each_entry(tmp, &bdev->bd_holder_list, list) | ||
1019 | if (tmp->sdir == bo->sdir) { | ||
1020 | tmp->count++; | ||
1021 | return tmp; | ||
1022 | } | ||
1023 | |||
1024 | return NULL; | ||
1025 | } | ||
1026 | |||
1027 | /** | ||
1028 | * add_bd_holder - create sysfs symlinks for bd_claim() relationship | ||
1029 | * | ||
1030 | * @bdev: block device to be bd_claimed | ||
1031 | * @bo: preallocated and initialized by alloc_bd_holder() | ||
1032 | * | ||
1033 | * Add @bo to @bdev->bd_holder_list, create symlinks. | ||
1034 | * | ||
1035 | * Returns 0 if symlinks are created. | ||
1036 | * Returns -ve if something fails. | ||
1037 | */ | ||
1038 | static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo) | ||
1039 | { | ||
1040 | int err; | ||
1041 | |||
1042 | if (!bo) | ||
1043 | return -EINVAL; | ||
1044 | |||
1045 | if (!bd_holder_grab_dirs(bdev, bo)) | ||
1046 | return -EBUSY; | ||
1047 | |||
1048 | err = add_symlink(bo->sdir, bo->sdev); | ||
1049 | if (err) | ||
1050 | return err; | ||
1051 | |||
1052 | err = add_symlink(bo->hdir, bo->hdev); | ||
1053 | if (err) { | ||
1054 | del_symlink(bo->sdir, bo->sdev); | ||
1055 | return err; | ||
1056 | } | ||
1057 | |||
1058 | list_add_tail(&bo->list, &bdev->bd_holder_list); | ||
1059 | return 0; | ||
1060 | } | ||
1061 | |||
1062 | /** | ||
1063 | * del_bd_holder - delete sysfs symlinks for bd_claim() relationship | ||
1064 | * | 798 | * |
1065 | * @bdev: block device to be bd_claimed | 799 | * - from "slaves" directory of the holder @disk to the claimed @bdev |
1066 | * @kobj: holder's kobject | 800 | * - from "holders" directory of the @bdev to the holder @disk |
1067 | * | 801 | * |
1068 | * If there is matching entry with @kobj in @bdev->bd_holder_list | 802 | * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is |
1069 | * and no other bd_claim() from the same kobject, | 803 | * passed to bd_link_disk_holder(), then: |
1070 | * remove the struct bd_holder from the list, delete symlinks for it. | ||
1071 | * | 804 | * |
1072 | * Returns a pointer to the struct bd_holder when it's removed from the list | 805 | * /sys/block/dm-0/slaves/sda --> /sys/block/sda |
1073 | * and ready to be freed. | 806 | * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0 |
1074 | * Returns NULL if matching claim isn't found or there is other bd_claim() | ||
1075 | * by the same kobject. | ||
1076 | */ | ||
1077 | static struct bd_holder *del_bd_holder(struct block_device *bdev, | ||
1078 | struct kobject *kobj) | ||
1079 | { | ||
1080 | struct bd_holder *bo; | ||
1081 | |||
1082 | list_for_each_entry(bo, &bdev->bd_holder_list, list) { | ||
1083 | if (bo->sdir == kobj) { | ||
1084 | bo->count--; | ||
1085 | BUG_ON(bo->count < 0); | ||
1086 | if (!bo->count) { | ||
1087 | list_del(&bo->list); | ||
1088 | del_symlink(bo->sdir, bo->sdev); | ||
1089 | del_symlink(bo->hdir, bo->hdev); | ||
1090 | bd_holder_release_dirs(bo); | ||
1091 | return bo; | ||
1092 | } | ||
1093 | break; | ||
1094 | } | ||
1095 | } | ||
1096 | |||
1097 | return NULL; | ||
1098 | } | ||
1099 | |||
1100 | /** | ||
1101 | * bd_claim_by_kobject - bd_claim() with additional kobject signature | ||
1102 | * | 807 | * |
1103 | * @bdev: block device to be claimed | 808 | * The caller must have claimed @bdev before calling this function and |
1104 | * @holder: holder's signature | 809 | * ensure that both @bdev and @disk are valid during the creation and |
1105 | * @kobj: holder's kobject | 810 | * lifetime of these symlinks. |
1106 | * | 811 | * |
1107 | * Do bd_claim() and if it succeeds, create sysfs symlinks between | 812 | * CONTEXT: |
1108 | * the bdev and the holder's kobject. | 813 | * Might sleep. |
1109 | * Use bd_release_from_kobject() when relesing the claimed bdev. | ||
1110 | * | 814 | * |
1111 | * Returns 0 on success. (same as bd_claim()) | 815 | * RETURNS: |
1112 | * Returns errno on failure. | 816 | * 0 on success, -errno on failure. |
1113 | */ | 817 | */ |
1114 | static int bd_claim_by_kobject(struct block_device *bdev, void *holder, | 818 | int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk) |
1115 | struct kobject *kobj) | ||
1116 | { | 819 | { |
1117 | int err; | 820 | int ret = 0; |
1118 | struct bd_holder *bo, *found; | ||
1119 | |||
1120 | if (!kobj) | ||
1121 | return -EINVAL; | ||
1122 | |||
1123 | bo = alloc_bd_holder(kobj); | ||
1124 | if (!bo) | ||
1125 | return -ENOMEM; | ||
1126 | 821 | ||
1127 | mutex_lock(&bdev->bd_mutex); | 822 | mutex_lock(&bdev->bd_mutex); |
1128 | 823 | ||
1129 | err = bd_claim(bdev, holder); | 824 | WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk); |
1130 | if (err) | ||
1131 | goto fail; | ||
1132 | 825 | ||
1133 | found = find_bd_holder(bdev, bo); | 826 | /* FIXME: remove the following once add_disk() handles errors */ |
1134 | if (found) | 827 | if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir)) |
1135 | goto fail; | 828 | goto out_unlock; |
1136 | 829 | ||
1137 | err = add_bd_holder(bdev, bo); | 830 | ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
1138 | if (err) | 831 | if (ret) |
1139 | bd_release(bdev); | 832 | goto out_unlock; |
1140 | else | ||
1141 | bo = NULL; | ||
1142 | fail: | ||
1143 | mutex_unlock(&bdev->bd_mutex); | ||
1144 | free_bd_holder(bo); | ||
1145 | return err; | ||
1146 | } | ||
1147 | 833 | ||
1148 | /** | 834 | ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
1149 | * bd_release_from_kobject - bd_release() with additional kobject signature | 835 | if (ret) { |
1150 | * | 836 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
1151 | * @bdev: block device to be released | 837 | goto out_unlock; |
1152 | * @kobj: holder's kobject | 838 | } |
1153 | * | ||
1154 | * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject(). | ||
1155 | */ | ||
1156 | static void bd_release_from_kobject(struct block_device *bdev, | ||
1157 | struct kobject *kobj) | ||
1158 | { | ||
1159 | if (!kobj) | ||
1160 | return; | ||
1161 | 839 | ||
1162 | mutex_lock(&bdev->bd_mutex); | 840 | bdev->bd_holder_disk = disk; |
1163 | bd_release(bdev); | 841 | out_unlock: |
1164 | free_bd_holder(del_bd_holder(bdev, kobj)); | ||
1165 | mutex_unlock(&bdev->bd_mutex); | 842 | mutex_unlock(&bdev->bd_mutex); |
843 | return ret; | ||
1166 | } | 844 | } |
845 | EXPORT_SYMBOL_GPL(bd_link_disk_holder); | ||
1167 | 846 | ||
1168 | /** | 847 | static void bd_unlink_disk_holder(struct block_device *bdev) |
1169 | * bd_claim_by_disk - wrapper function for bd_claim_by_kobject() | ||
1170 | * | ||
1171 | * @bdev: block device to be claimed | ||
1172 | * @holder: holder's signature | ||
1173 | * @disk: holder's gendisk | ||
1174 | * | ||
1175 | * Call bd_claim_by_kobject() with getting @disk->slave_dir. | ||
1176 | */ | ||
1177 | int bd_claim_by_disk(struct block_device *bdev, void *holder, | ||
1178 | struct gendisk *disk) | ||
1179 | { | 848 | { |
1180 | return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); | 849 | struct gendisk *disk = bdev->bd_holder_disk; |
1181 | } | ||
1182 | EXPORT_SYMBOL_GPL(bd_claim_by_disk); | ||
1183 | 850 | ||
1184 | /** | 851 | bdev->bd_holder_disk = NULL; |
1185 | * bd_release_from_disk - wrapper function for bd_release_from_kobject() | 852 | if (!disk) |
1186 | * | 853 | return; |
1187 | * @bdev: block device to be claimed | ||
1188 | * @disk: holder's gendisk | ||
1189 | * | ||
1190 | * Call bd_release_from_kobject() and put @disk->slave_dir. | ||
1191 | */ | ||
1192 | void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk) | ||
1193 | { | ||
1194 | bd_release_from_kobject(bdev, disk->slave_dir); | ||
1195 | kobject_put(disk->slave_dir); | ||
1196 | } | ||
1197 | EXPORT_SYMBOL_GPL(bd_release_from_disk); | ||
1198 | #endif | ||
1199 | 854 | ||
1200 | /* | 855 | del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj); |
1201 | * Tries to open block device by device number. Use it ONLY if you | 856 | del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj); |
1202 | * really do not have anything better - i.e. when you are behind a | ||
1203 | * truly sucky interface and all you are given is a device number. _Never_ | ||
1204 | * to be used for internal purposes. If you ever need it - reconsider | ||
1205 | * your API. | ||
1206 | */ | ||
1207 | struct block_device *open_by_devnum(dev_t dev, fmode_t mode) | ||
1208 | { | ||
1209 | struct block_device *bdev = bdget(dev); | ||
1210 | int err = -ENOMEM; | ||
1211 | if (bdev) | ||
1212 | err = blkdev_get(bdev, mode); | ||
1213 | return err ? ERR_PTR(err) : bdev; | ||
1214 | } | 857 | } |
1215 | 858 | #else | |
1216 | EXPORT_SYMBOL(open_by_devnum); | 859 | static inline void bd_unlink_disk_holder(struct block_device *bdev) |
860 | { } | ||
861 | #endif | ||
1217 | 862 | ||
1218 | /** | 863 | /** |
1219 | * flush_disk - invalidates all buffer-cache entries on a disk | 864 | * flush_disk - invalidates all buffer-cache entries on a disk |
@@ -1309,10 +954,11 @@ int check_disk_change(struct block_device *bdev) | |||
1309 | { | 954 | { |
1310 | struct gendisk *disk = bdev->bd_disk; | 955 | struct gendisk *disk = bdev->bd_disk; |
1311 | const struct block_device_operations *bdops = disk->fops; | 956 | const struct block_device_operations *bdops = disk->fops; |
957 | unsigned int events; | ||
1312 | 958 | ||
1313 | if (!bdops->media_changed) | 959 | events = disk_clear_events(disk, DISK_EVENT_MEDIA_CHANGE | |
1314 | return 0; | 960 | DISK_EVENT_EJECT_REQUEST); |
1315 | if (!bdops->media_changed(bdev->bd_disk)) | 961 | if (!(events & DISK_EVENT_MEDIA_CHANGE)) |
1316 | return 0; | 962 | return 0; |
1317 | 963 | ||
1318 | flush_disk(bdev); | 964 | flush_disk(bdev); |
@@ -1475,17 +1121,171 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
1475 | return ret; | 1121 | return ret; |
1476 | } | 1122 | } |
1477 | 1123 | ||
1478 | int blkdev_get(struct block_device *bdev, fmode_t mode) | 1124 | /** |
1125 | * blkdev_get - open a block device | ||
1126 | * @bdev: block_device to open | ||
1127 | * @mode: FMODE_* mask | ||
1128 | * @holder: exclusive holder identifier | ||
1129 | * | ||
1130 | * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is | ||
1131 | * open with exclusive access. Specifying %FMODE_EXCL with %NULL | ||
1132 | * @holder is invalid. Exclusive opens may nest for the same @holder. | ||
1133 | * | ||
1134 | * On success, the reference count of @bdev is unchanged. On failure, | ||
1135 | * @bdev is put. | ||
1136 | * | ||
1137 | * CONTEXT: | ||
1138 | * Might sleep. | ||
1139 | * | ||
1140 | * RETURNS: | ||
1141 | * 0 on success, -errno on failure. | ||
1142 | */ | ||
1143 | int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder) | ||
1479 | { | 1144 | { |
1480 | return __blkdev_get(bdev, mode, 0); | 1145 | struct block_device *whole = NULL; |
1146 | int res; | ||
1147 | |||
1148 | WARN_ON_ONCE((mode & FMODE_EXCL) && !holder); | ||
1149 | |||
1150 | if ((mode & FMODE_EXCL) && holder) { | ||
1151 | whole = bd_start_claiming(bdev, holder); | ||
1152 | if (IS_ERR(whole)) { | ||
1153 | bdput(bdev); | ||
1154 | return PTR_ERR(whole); | ||
1155 | } | ||
1156 | } | ||
1157 | |||
1158 | res = __blkdev_get(bdev, mode, 0); | ||
1159 | |||
1160 | /* __blkdev_get() may alter read only status, check it afterwards */ | ||
1161 | if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) { | ||
1162 | __blkdev_put(bdev, mode, 0); | ||
1163 | res = -EACCES; | ||
1164 | } | ||
1165 | |||
1166 | if (whole) { | ||
1167 | /* finish claiming */ | ||
1168 | mutex_lock(&bdev->bd_mutex); | ||
1169 | spin_lock(&bdev_lock); | ||
1170 | |||
1171 | if (!res) { | ||
1172 | BUG_ON(!bd_may_claim(bdev, whole, holder)); | ||
1173 | /* | ||
1174 | * Note that for a whole device bd_holders | ||
1175 | * will be incremented twice, and bd_holder | ||
1176 | * will be set to bd_may_claim before being | ||
1177 | * set to holder | ||
1178 | */ | ||
1179 | whole->bd_holders++; | ||
1180 | whole->bd_holder = bd_may_claim; | ||
1181 | bdev->bd_holders++; | ||
1182 | bdev->bd_holder = holder; | ||
1183 | } | ||
1184 | |||
1185 | /* tell others that we're done */ | ||
1186 | BUG_ON(whole->bd_claiming != holder); | ||
1187 | whole->bd_claiming = NULL; | ||
1188 | wake_up_bit(&whole->bd_claiming, 0); | ||
1189 | |||
1190 | spin_unlock(&bdev_lock); | ||
1191 | |||
1192 | /* | ||
1193 | * Block event polling for write claims. Any write | ||
1194 | * holder makes the write_holder state stick until all | ||
1195 | * are released. This is good enough and tracking | ||
1196 | * individual writeable reference is too fragile given | ||
1197 | * the way @mode is used in blkdev_get/put(). | ||
1198 | */ | ||
1199 | if (!res && (mode & FMODE_WRITE) && !bdev->bd_write_holder) { | ||
1200 | bdev->bd_write_holder = true; | ||
1201 | disk_block_events(bdev->bd_disk); | ||
1202 | } | ||
1203 | |||
1204 | mutex_unlock(&bdev->bd_mutex); | ||
1205 | bdput(whole); | ||
1206 | } | ||
1207 | |||
1208 | return res; | ||
1481 | } | 1209 | } |
1482 | EXPORT_SYMBOL(blkdev_get); | 1210 | EXPORT_SYMBOL(blkdev_get); |
1483 | 1211 | ||
1212 | /** | ||
1213 | * blkdev_get_by_path - open a block device by name | ||
1214 | * @path: path to the block device to open | ||
1215 | * @mode: FMODE_* mask | ||
1216 | * @holder: exclusive holder identifier | ||
1217 | * | ||
1218 | * Open the blockdevice described by the device file at @path. @mode | ||
1219 | * and @holder are identical to blkdev_get(). | ||
1220 | * | ||
1221 | * On success, the returned block_device has reference count of one. | ||
1222 | * | ||
1223 | * CONTEXT: | ||
1224 | * Might sleep. | ||
1225 | * | ||
1226 | * RETURNS: | ||
1227 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. | ||
1228 | */ | ||
1229 | struct block_device *blkdev_get_by_path(const char *path, fmode_t mode, | ||
1230 | void *holder) | ||
1231 | { | ||
1232 | struct block_device *bdev; | ||
1233 | int err; | ||
1234 | |||
1235 | bdev = lookup_bdev(path); | ||
1236 | if (IS_ERR(bdev)) | ||
1237 | return bdev; | ||
1238 | |||
1239 | err = blkdev_get(bdev, mode, holder); | ||
1240 | if (err) | ||
1241 | return ERR_PTR(err); | ||
1242 | |||
1243 | return bdev; | ||
1244 | } | ||
1245 | EXPORT_SYMBOL(blkdev_get_by_path); | ||
1246 | |||
1247 | /** | ||
1248 | * blkdev_get_by_dev - open a block device by device number | ||
1249 | * @dev: device number of block device to open | ||
1250 | * @mode: FMODE_* mask | ||
1251 | * @holder: exclusive holder identifier | ||
1252 | * | ||
1253 | * Open the blockdevice described by device number @dev. @mode and | ||
1254 | * @holder are identical to blkdev_get(). | ||
1255 | * | ||
1256 | * Use it ONLY if you really do not have anything better - i.e. when | ||
1257 | * you are behind a truly sucky interface and all you are given is a | ||
1258 | * device number. _Never_ to be used for internal purposes. If you | ||
1259 | * ever need it - reconsider your API. | ||
1260 | * | ||
1261 | * On success, the returned block_device has reference count of one. | ||
1262 | * | ||
1263 | * CONTEXT: | ||
1264 | * Might sleep. | ||
1265 | * | ||
1266 | * RETURNS: | ||
1267 | * Pointer to block_device on success, ERR_PTR(-errno) on failure. | ||
1268 | */ | ||
1269 | struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder) | ||
1270 | { | ||
1271 | struct block_device *bdev; | ||
1272 | int err; | ||
1273 | |||
1274 | bdev = bdget(dev); | ||
1275 | if (!bdev) | ||
1276 | return ERR_PTR(-ENOMEM); | ||
1277 | |||
1278 | err = blkdev_get(bdev, mode, holder); | ||
1279 | if (err) | ||
1280 | return ERR_PTR(err); | ||
1281 | |||
1282 | return bdev; | ||
1283 | } | ||
1284 | EXPORT_SYMBOL(blkdev_get_by_dev); | ||
1285 | |||
1484 | static int blkdev_open(struct inode * inode, struct file * filp) | 1286 | static int blkdev_open(struct inode * inode, struct file * filp) |
1485 | { | 1287 | { |
1486 | struct block_device *whole = NULL; | ||
1487 | struct block_device *bdev; | 1288 | struct block_device *bdev; |
1488 | int res; | ||
1489 | 1289 | ||
1490 | /* | 1290 | /* |
1491 | * Preserve backwards compatibility and allow large file access | 1291 | * Preserve backwards compatibility and allow large file access |
@@ -1506,26 +1306,9 @@ static int blkdev_open(struct inode * inode, struct file * filp) | |||
1506 | if (bdev == NULL) | 1306 | if (bdev == NULL) |
1507 | return -ENOMEM; | 1307 | return -ENOMEM; |
1508 | 1308 | ||
1509 | if (filp->f_mode & FMODE_EXCL) { | ||
1510 | whole = bd_start_claiming(bdev, filp); | ||
1511 | if (IS_ERR(whole)) { | ||
1512 | bdput(bdev); | ||
1513 | return PTR_ERR(whole); | ||
1514 | } | ||
1515 | } | ||
1516 | |||
1517 | filp->f_mapping = bdev->bd_inode->i_mapping; | 1309 | filp->f_mapping = bdev->bd_inode->i_mapping; |
1518 | 1310 | ||
1519 | res = blkdev_get(bdev, filp->f_mode); | 1311 | return blkdev_get(bdev, filp->f_mode, filp); |
1520 | |||
1521 | if (whole) { | ||
1522 | if (res == 0) | ||
1523 | bd_finish_claiming(bdev, whole, filp); | ||
1524 | else | ||
1525 | bd_abort_claiming(whole, filp); | ||
1526 | } | ||
1527 | |||
1528 | return res; | ||
1529 | } | 1312 | } |
1530 | 1313 | ||
1531 | static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | 1314 | static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) |
@@ -1539,6 +1322,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1539 | bdev->bd_part_count--; | 1322 | bdev->bd_part_count--; |
1540 | 1323 | ||
1541 | if (!--bdev->bd_openers) { | 1324 | if (!--bdev->bd_openers) { |
1325 | WARN_ON_ONCE(bdev->bd_holders); | ||
1542 | sync_blockdev(bdev); | 1326 | sync_blockdev(bdev); |
1543 | kill_bdev(bdev); | 1327 | kill_bdev(bdev); |
1544 | } | 1328 | } |
@@ -1569,6 +1353,45 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
1569 | 1353 | ||
1570 | int blkdev_put(struct block_device *bdev, fmode_t mode) | 1354 | int blkdev_put(struct block_device *bdev, fmode_t mode) |
1571 | { | 1355 | { |
1356 | if (mode & FMODE_EXCL) { | ||
1357 | bool bdev_free; | ||
1358 | |||
1359 | /* | ||
1360 | * Release a claim on the device. The holder fields | ||
1361 | * are protected with bdev_lock. bd_mutex is to | ||
1362 | * synchronize disk_holder unlinking. | ||
1363 | */ | ||
1364 | mutex_lock(&bdev->bd_mutex); | ||
1365 | spin_lock(&bdev_lock); | ||
1366 | |||
1367 | WARN_ON_ONCE(--bdev->bd_holders < 0); | ||
1368 | WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0); | ||
1369 | |||
1370 | /* bd_contains might point to self, check in a separate step */ | ||
1371 | if ((bdev_free = !bdev->bd_holders)) | ||
1372 | bdev->bd_holder = NULL; | ||
1373 | if (!bdev->bd_contains->bd_holders) | ||
1374 | bdev->bd_contains->bd_holder = NULL; | ||
1375 | |||
1376 | spin_unlock(&bdev_lock); | ||
1377 | |||
1378 | /* | ||
1379 | * If this was the last claim, remove holder link and | ||
1380 | * unblock evpoll if it was a write holder. | ||
1381 | */ | ||
1382 | if (bdev_free) { | ||
1383 | bd_unlink_disk_holder(bdev); | ||
1384 | if (bdev->bd_write_holder) { | ||
1385 | disk_unblock_events(bdev->bd_disk); | ||
1386 | bdev->bd_write_holder = false; | ||
1387 | } else | ||
1388 | disk_check_events(bdev->bd_disk); | ||
1389 | } | ||
1390 | |||
1391 | mutex_unlock(&bdev->bd_mutex); | ||
1392 | } else | ||
1393 | disk_check_events(bdev->bd_disk); | ||
1394 | |||
1572 | return __blkdev_put(bdev, mode, 0); | 1395 | return __blkdev_put(bdev, mode, 0); |
1573 | } | 1396 | } |
1574 | EXPORT_SYMBOL(blkdev_put); | 1397 | EXPORT_SYMBOL(blkdev_put); |
@@ -1576,8 +1399,7 @@ EXPORT_SYMBOL(blkdev_put); | |||
1576 | static int blkdev_close(struct inode * inode, struct file * filp) | 1399 | static int blkdev_close(struct inode * inode, struct file * filp) |
1577 | { | 1400 | { |
1578 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); | 1401 | struct block_device *bdev = I_BDEV(filp->f_mapping->host); |
1579 | if (bdev->bd_holder == filp) | 1402 | |
1580 | bd_release(bdev); | ||
1581 | return blkdev_put(bdev, filp->f_mode); | 1403 | return blkdev_put(bdev, filp->f_mode); |
1582 | } | 1404 | } |
1583 | 1405 | ||
@@ -1722,67 +1544,6 @@ fail: | |||
1722 | } | 1544 | } |
1723 | EXPORT_SYMBOL(lookup_bdev); | 1545 | EXPORT_SYMBOL(lookup_bdev); |
1724 | 1546 | ||
1725 | /** | ||
1726 | * open_bdev_exclusive - open a block device by name and set it up for use | ||
1727 | * | ||
1728 | * @path: special file representing the block device | ||
1729 | * @mode: FMODE_... combination to pass be used | ||
1730 | * @holder: owner for exclusion | ||
1731 | * | ||
1732 | * Open the blockdevice described by the special file at @path, claim it | ||
1733 | * for the @holder. | ||
1734 | */ | ||
1735 | struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder) | ||
1736 | { | ||
1737 | struct block_device *bdev, *whole; | ||
1738 | int error; | ||
1739 | |||
1740 | bdev = lookup_bdev(path); | ||
1741 | if (IS_ERR(bdev)) | ||
1742 | return bdev; | ||
1743 | |||
1744 | whole = bd_start_claiming(bdev, holder); | ||
1745 | if (IS_ERR(whole)) { | ||
1746 | bdput(bdev); | ||
1747 | return whole; | ||
1748 | } | ||
1749 | |||
1750 | error = blkdev_get(bdev, mode); | ||
1751 | if (error) | ||
1752 | goto out_abort_claiming; | ||
1753 | |||
1754 | error = -EACCES; | ||
1755 | if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) | ||
1756 | goto out_blkdev_put; | ||
1757 | |||
1758 | bd_finish_claiming(bdev, whole, holder); | ||
1759 | return bdev; | ||
1760 | |||
1761 | out_blkdev_put: | ||
1762 | blkdev_put(bdev, mode); | ||
1763 | out_abort_claiming: | ||
1764 | bd_abort_claiming(whole, holder); | ||
1765 | return ERR_PTR(error); | ||
1766 | } | ||
1767 | |||
1768 | EXPORT_SYMBOL(open_bdev_exclusive); | ||
1769 | |||
1770 | /** | ||
1771 | * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive() | ||
1772 | * | ||
1773 | * @bdev: blockdevice to close | ||
1774 | * @mode: mode, must match that used to open. | ||
1775 | * | ||
1776 | * This is the counterpart to open_bdev_exclusive(). | ||
1777 | */ | ||
1778 | void close_bdev_exclusive(struct block_device *bdev, fmode_t mode) | ||
1779 | { | ||
1780 | bd_release(bdev); | ||
1781 | blkdev_put(bdev, mode); | ||
1782 | } | ||
1783 | |||
1784 | EXPORT_SYMBOL(close_bdev_exclusive); | ||
1785 | |||
1786 | int __invalidate_device(struct block_device *bdev) | 1547 | int __invalidate_device(struct block_device *bdev) |
1787 | { | 1548 | { |
1788 | struct super_block *sb = get_super(bdev); | 1549 | struct super_block *sb = get_super(bdev); |
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 6b9884507837..1718e1a5c320 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -493,7 +493,7 @@ again: | |||
493 | continue; | 493 | continue; |
494 | 494 | ||
495 | if (device->bdev) { | 495 | if (device->bdev) { |
496 | close_bdev_exclusive(device->bdev, device->mode); | 496 | blkdev_put(device->bdev, device->mode); |
497 | device->bdev = NULL; | 497 | device->bdev = NULL; |
498 | fs_devices->open_devices--; | 498 | fs_devices->open_devices--; |
499 | } | 499 | } |
@@ -527,7 +527,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
527 | 527 | ||
528 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 528 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
529 | if (device->bdev) { | 529 | if (device->bdev) { |
530 | close_bdev_exclusive(device->bdev, device->mode); | 530 | blkdev_put(device->bdev, device->mode); |
531 | fs_devices->open_devices--; | 531 | fs_devices->open_devices--; |
532 | } | 532 | } |
533 | if (device->writeable) { | 533 | if (device->writeable) { |
@@ -584,13 +584,15 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
584 | int seeding = 1; | 584 | int seeding = 1; |
585 | int ret = 0; | 585 | int ret = 0; |
586 | 586 | ||
587 | flags |= FMODE_EXCL; | ||
588 | |||
587 | list_for_each_entry(device, head, dev_list) { | 589 | list_for_each_entry(device, head, dev_list) { |
588 | if (device->bdev) | 590 | if (device->bdev) |
589 | continue; | 591 | continue; |
590 | if (!device->name) | 592 | if (!device->name) |
591 | continue; | 593 | continue; |
592 | 594 | ||
593 | bdev = open_bdev_exclusive(device->name, flags, holder); | 595 | bdev = blkdev_get_by_path(device->name, flags, holder); |
594 | if (IS_ERR(bdev)) { | 596 | if (IS_ERR(bdev)) { |
595 | printk(KERN_INFO "open %s failed\n", device->name); | 597 | printk(KERN_INFO "open %s failed\n", device->name); |
596 | goto error; | 598 | goto error; |
@@ -642,7 +644,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
642 | error_brelse: | 644 | error_brelse: |
643 | brelse(bh); | 645 | brelse(bh); |
644 | error_close: | 646 | error_close: |
645 | close_bdev_exclusive(bdev, FMODE_READ); | 647 | blkdev_put(bdev, flags); |
646 | error: | 648 | error: |
647 | continue; | 649 | continue; |
648 | } | 650 | } |
@@ -688,7 +690,8 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | |||
688 | 690 | ||
689 | mutex_lock(&uuid_mutex); | 691 | mutex_lock(&uuid_mutex); |
690 | 692 | ||
691 | bdev = open_bdev_exclusive(path, flags, holder); | 693 | flags |= FMODE_EXCL; |
694 | bdev = blkdev_get_by_path(path, flags, holder); | ||
692 | 695 | ||
693 | if (IS_ERR(bdev)) { | 696 | if (IS_ERR(bdev)) { |
694 | ret = PTR_ERR(bdev); | 697 | ret = PTR_ERR(bdev); |
@@ -720,7 +723,7 @@ int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | |||
720 | 723 | ||
721 | brelse(bh); | 724 | brelse(bh); |
722 | error_close: | 725 | error_close: |
723 | close_bdev_exclusive(bdev, flags); | 726 | blkdev_put(bdev, flags); |
724 | error: | 727 | error: |
725 | mutex_unlock(&uuid_mutex); | 728 | mutex_unlock(&uuid_mutex); |
726 | return ret; | 729 | return ret; |
@@ -1183,8 +1186,8 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1183 | goto out; | 1186 | goto out; |
1184 | } | 1187 | } |
1185 | } else { | 1188 | } else { |
1186 | bdev = open_bdev_exclusive(device_path, FMODE_READ, | 1189 | bdev = blkdev_get_by_path(device_path, FMODE_READ | FMODE_EXCL, |
1187 | root->fs_info->bdev_holder); | 1190 | root->fs_info->bdev_holder); |
1188 | if (IS_ERR(bdev)) { | 1191 | if (IS_ERR(bdev)) { |
1189 | ret = PTR_ERR(bdev); | 1192 | ret = PTR_ERR(bdev); |
1190 | goto out; | 1193 | goto out; |
@@ -1251,7 +1254,7 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1251 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; | 1254 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; |
1252 | 1255 | ||
1253 | if (device->bdev) { | 1256 | if (device->bdev) { |
1254 | close_bdev_exclusive(device->bdev, device->mode); | 1257 | blkdev_put(device->bdev, device->mode); |
1255 | device->bdev = NULL; | 1258 | device->bdev = NULL; |
1256 | device->fs_devices->open_devices--; | 1259 | device->fs_devices->open_devices--; |
1257 | } | 1260 | } |
@@ -1294,7 +1297,7 @@ error_brelse: | |||
1294 | brelse(bh); | 1297 | brelse(bh); |
1295 | error_close: | 1298 | error_close: |
1296 | if (bdev) | 1299 | if (bdev) |
1297 | close_bdev_exclusive(bdev, FMODE_READ); | 1300 | blkdev_put(bdev, FMODE_READ | FMODE_EXCL); |
1298 | out: | 1301 | out: |
1299 | mutex_unlock(&root->fs_info->volume_mutex); | 1302 | mutex_unlock(&root->fs_info->volume_mutex); |
1300 | mutex_unlock(&uuid_mutex); | 1303 | mutex_unlock(&uuid_mutex); |
@@ -1446,7 +1449,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1446 | if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) | 1449 | if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) |
1447 | return -EINVAL; | 1450 | return -EINVAL; |
1448 | 1451 | ||
1449 | bdev = open_bdev_exclusive(device_path, 0, root->fs_info->bdev_holder); | 1452 | bdev = blkdev_get_by_path(device_path, FMODE_EXCL, |
1453 | root->fs_info->bdev_holder); | ||
1450 | if (IS_ERR(bdev)) | 1454 | if (IS_ERR(bdev)) |
1451 | return PTR_ERR(bdev); | 1455 | return PTR_ERR(bdev); |
1452 | 1456 | ||
@@ -1572,7 +1576,7 @@ out: | |||
1572 | mutex_unlock(&root->fs_info->volume_mutex); | 1576 | mutex_unlock(&root->fs_info->volume_mutex); |
1573 | return ret; | 1577 | return ret; |
1574 | error: | 1578 | error: |
1575 | close_bdev_exclusive(bdev, 0); | 1579 | blkdev_put(bdev, FMODE_EXCL); |
1576 | if (seeding_dev) { | 1580 | if (seeding_dev) { |
1577 | mutex_unlock(&uuid_mutex); | 1581 | mutex_unlock(&uuid_mutex); |
1578 | up_write(&sb->s_umount); | 1582 | up_write(&sb->s_umount); |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 2740db49eb04..1be781079450 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -50,7 +50,7 @@ struct btrfs_device { | |||
50 | 50 | ||
51 | struct block_device *bdev; | 51 | struct block_device *bdev; |
52 | 52 | ||
53 | /* the mode sent to open_bdev_exclusive */ | 53 | /* the mode sent to blkdev_get */ |
54 | fmode_t mode; | 54 | fmode_t mode; |
55 | 55 | ||
56 | char *name; | 56 | char *name; |
diff --git a/fs/char_dev.c b/fs/char_dev.c index 6e99b9ddd4e9..dca9e5e0f73b 100644 --- a/fs/char_dev.c +++ b/fs/char_dev.c | |||
@@ -59,7 +59,7 @@ static struct char_device_struct { | |||
59 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; | 59 | } *chrdevs[CHRDEV_MAJOR_HASH_SIZE]; |
60 | 60 | ||
61 | /* index in the above */ | 61 | /* index in the above */ |
62 | static inline int major_to_index(int major) | 62 | static inline int major_to_index(unsigned major) |
63 | { | 63 | { |
64 | return major % CHRDEV_MAJOR_HASH_SIZE; | 64 | return major % CHRDEV_MAJOR_HASH_SIZE; |
65 | } | 65 | } |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index b7d0554631e4..7aa767d4f06f 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -364,7 +364,7 @@ static struct block_device *ext3_blkdev_get(dev_t dev, struct super_block *sb) | |||
364 | struct block_device *bdev; | 364 | struct block_device *bdev; |
365 | char b[BDEVNAME_SIZE]; | 365 | char b[BDEVNAME_SIZE]; |
366 | 366 | ||
367 | bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); | 367 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); |
368 | if (IS_ERR(bdev)) | 368 | if (IS_ERR(bdev)) |
369 | goto fail; | 369 | goto fail; |
370 | return bdev; | 370 | return bdev; |
@@ -381,8 +381,7 @@ fail: | |||
381 | */ | 381 | */ |
382 | static int ext3_blkdev_put(struct block_device *bdev) | 382 | static int ext3_blkdev_put(struct block_device *bdev) |
383 | { | 383 | { |
384 | bd_release(bdev); | 384 | return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
385 | return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | ||
386 | } | 385 | } |
387 | 386 | ||
388 | static int ext3_blkdev_remove(struct ext3_sb_info *sbi) | 387 | static int ext3_blkdev_remove(struct ext3_sb_info *sbi) |
@@ -2162,13 +2161,6 @@ static journal_t *ext3_get_dev_journal(struct super_block *sb, | |||
2162 | if (bdev == NULL) | 2161 | if (bdev == NULL) |
2163 | return NULL; | 2162 | return NULL; |
2164 | 2163 | ||
2165 | if (bd_claim(bdev, sb)) { | ||
2166 | ext3_msg(sb, KERN_ERR, | ||
2167 | "error: failed to claim external journal device"); | ||
2168 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | ||
2169 | return NULL; | ||
2170 | } | ||
2171 | |||
2172 | blocksize = sb->s_blocksize; | 2164 | blocksize = sb->s_blocksize; |
2173 | hblock = bdev_logical_block_size(bdev); | 2165 | hblock = bdev_logical_block_size(bdev); |
2174 | if (blocksize < hblock) { | 2166 | if (blocksize < hblock) { |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 29c80f6d8b27..cb10a06775e4 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -657,7 +657,7 @@ static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb) | |||
657 | struct block_device *bdev; | 657 | struct block_device *bdev; |
658 | char b[BDEVNAME_SIZE]; | 658 | char b[BDEVNAME_SIZE]; |
659 | 659 | ||
660 | bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); | 660 | bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, sb); |
661 | if (IS_ERR(bdev)) | 661 | if (IS_ERR(bdev)) |
662 | goto fail; | 662 | goto fail; |
663 | return bdev; | 663 | return bdev; |
@@ -673,8 +673,7 @@ fail: | |||
673 | */ | 673 | */ |
674 | static int ext4_blkdev_put(struct block_device *bdev) | 674 | static int ext4_blkdev_put(struct block_device *bdev) |
675 | { | 675 | { |
676 | bd_release(bdev); | 676 | return blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
677 | return blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | ||
678 | } | 677 | } |
679 | 678 | ||
680 | static int ext4_blkdev_remove(struct ext4_sb_info *sbi) | 679 | static int ext4_blkdev_remove(struct ext4_sb_info *sbi) |
@@ -3778,13 +3777,6 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb, | |||
3778 | if (bdev == NULL) | 3777 | if (bdev == NULL) |
3779 | return NULL; | 3778 | return NULL; |
3780 | 3779 | ||
3781 | if (bd_claim(bdev, sb)) { | ||
3782 | ext4_msg(sb, KERN_ERR, | ||
3783 | "failed to claim external journal device"); | ||
3784 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | ||
3785 | return NULL; | ||
3786 | } | ||
3787 | |||
3788 | blocksize = sb->s_blocksize; | 3780 | blocksize = sb->s_blocksize; |
3789 | hblock = bdev_logical_block_size(bdev); | 3781 | hblock = bdev_logical_block_size(bdev); |
3790 | if (blocksize < hblock) { | 3782 | if (blocksize < hblock) { |
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c index 693f4470a2df..777927ce6f79 100644 --- a/fs/gfs2/ops_fstype.c +++ b/fs/gfs2/ops_fstype.c | |||
@@ -1268,7 +1268,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, | |||
1268 | { | 1268 | { |
1269 | struct block_device *bdev; | 1269 | struct block_device *bdev; |
1270 | struct super_block *s; | 1270 | struct super_block *s; |
1271 | fmode_t mode = FMODE_READ; | 1271 | fmode_t mode = FMODE_READ | FMODE_EXCL; |
1272 | int error; | 1272 | int error; |
1273 | struct gfs2_args args; | 1273 | struct gfs2_args args; |
1274 | struct gfs2_sbd *sdp; | 1274 | struct gfs2_sbd *sdp; |
@@ -1276,7 +1276,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, | |||
1276 | if (!(flags & MS_RDONLY)) | 1276 | if (!(flags & MS_RDONLY)) |
1277 | mode |= FMODE_WRITE; | 1277 | mode |= FMODE_WRITE; |
1278 | 1278 | ||
1279 | bdev = open_bdev_exclusive(dev_name, mode, fs_type); | 1279 | bdev = blkdev_get_by_path(dev_name, mode, fs_type); |
1280 | if (IS_ERR(bdev)) | 1280 | if (IS_ERR(bdev)) |
1281 | return ERR_CAST(bdev); | 1281 | return ERR_CAST(bdev); |
1282 | 1282 | ||
@@ -1298,7 +1298,7 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags, | |||
1298 | goto error_bdev; | 1298 | goto error_bdev; |
1299 | 1299 | ||
1300 | if (s->s_root) | 1300 | if (s->s_root) |
1301 | close_bdev_exclusive(bdev, mode); | 1301 | blkdev_put(bdev, mode); |
1302 | 1302 | ||
1303 | memset(&args, 0, sizeof(args)); | 1303 | memset(&args, 0, sizeof(args)); |
1304 | args.ar_quota = GFS2_QUOTA_DEFAULT; | 1304 | args.ar_quota = GFS2_QUOTA_DEFAULT; |
@@ -1342,7 +1342,7 @@ error_super: | |||
1342 | deactivate_locked_super(s); | 1342 | deactivate_locked_super(s); |
1343 | return ERR_PTR(error); | 1343 | return ERR_PTR(error); |
1344 | error_bdev: | 1344 | error_bdev: |
1345 | close_bdev_exclusive(bdev, mode); | 1345 | blkdev_put(bdev, mode); |
1346 | return ERR_PTR(error); | 1346 | return ERR_PTR(error); |
1347 | } | 1347 | } |
1348 | 1348 | ||
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index e1b8493b9aaa..278e3fb40b71 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c | |||
@@ -1120,16 +1120,13 @@ int lmLogOpen(struct super_block *sb) | |||
1120 | * file systems to log may have n-to-1 relationship; | 1120 | * file systems to log may have n-to-1 relationship; |
1121 | */ | 1121 | */ |
1122 | 1122 | ||
1123 | bdev = open_by_devnum(sbi->logdev, FMODE_READ|FMODE_WRITE); | 1123 | bdev = blkdev_get_by_dev(sbi->logdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
1124 | log); | ||
1124 | if (IS_ERR(bdev)) { | 1125 | if (IS_ERR(bdev)) { |
1125 | rc = -PTR_ERR(bdev); | 1126 | rc = -PTR_ERR(bdev); |
1126 | goto free; | 1127 | goto free; |
1127 | } | 1128 | } |
1128 | 1129 | ||
1129 | if ((rc = bd_claim(bdev, log))) { | ||
1130 | goto close; | ||
1131 | } | ||
1132 | |||
1133 | log->bdev = bdev; | 1130 | log->bdev = bdev; |
1134 | memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); | 1131 | memcpy(log->uuid, sbi->loguuid, sizeof(log->uuid)); |
1135 | 1132 | ||
@@ -1137,7 +1134,7 @@ int lmLogOpen(struct super_block *sb) | |||
1137 | * initialize log: | 1134 | * initialize log: |
1138 | */ | 1135 | */ |
1139 | if ((rc = lmLogInit(log))) | 1136 | if ((rc = lmLogInit(log))) |
1140 | goto unclaim; | 1137 | goto close; |
1141 | 1138 | ||
1142 | list_add(&log->journal_list, &jfs_external_logs); | 1139 | list_add(&log->journal_list, &jfs_external_logs); |
1143 | 1140 | ||
@@ -1163,11 +1160,8 @@ journal_found: | |||
1163 | list_del(&log->journal_list); | 1160 | list_del(&log->journal_list); |
1164 | lbmLogShutdown(log); | 1161 | lbmLogShutdown(log); |
1165 | 1162 | ||
1166 | unclaim: | ||
1167 | bd_release(bdev); | ||
1168 | |||
1169 | close: /* close external log device */ | 1163 | close: /* close external log device */ |
1170 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | 1164 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
1171 | 1165 | ||
1172 | free: /* free log descriptor */ | 1166 | free: /* free log descriptor */ |
1173 | mutex_unlock(&jfs_log_mutex); | 1167 | mutex_unlock(&jfs_log_mutex); |
@@ -1512,8 +1506,7 @@ int lmLogClose(struct super_block *sb) | |||
1512 | bdev = log->bdev; | 1506 | bdev = log->bdev; |
1513 | rc = lmLogShutdown(log); | 1507 | rc = lmLogShutdown(log); |
1514 | 1508 | ||
1515 | bd_release(bdev); | 1509 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
1516 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE); | ||
1517 | 1510 | ||
1518 | kfree(log); | 1511 | kfree(log); |
1519 | 1512 | ||
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c index 92ca6fbe09bd..723bc5bca09a 100644 --- a/fs/logfs/dev_bdev.c +++ b/fs/logfs/dev_bdev.c | |||
@@ -300,7 +300,7 @@ static int bdev_write_sb(struct super_block *sb, struct page *page) | |||
300 | 300 | ||
301 | static void bdev_put_device(struct logfs_super *s) | 301 | static void bdev_put_device(struct logfs_super *s) |
302 | { | 302 | { |
303 | close_bdev_exclusive(s->s_bdev, FMODE_READ|FMODE_WRITE); | 303 | blkdev_put(s->s_bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
304 | } | 304 | } |
305 | 305 | ||
306 | static int bdev_can_write_buf(struct super_block *sb, u64 ofs) | 306 | static int bdev_can_write_buf(struct super_block *sb, u64 ofs) |
@@ -325,13 +325,14 @@ int logfs_get_sb_bdev(struct logfs_super *p, struct file_system_type *type, | |||
325 | { | 325 | { |
326 | struct block_device *bdev; | 326 | struct block_device *bdev; |
327 | 327 | ||
328 | bdev = open_bdev_exclusive(devname, FMODE_READ|FMODE_WRITE, type); | 328 | bdev = blkdev_get_by_path(devname, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
329 | type); | ||
329 | if (IS_ERR(bdev)) | 330 | if (IS_ERR(bdev)) |
330 | return PTR_ERR(bdev); | 331 | return PTR_ERR(bdev); |
331 | 332 | ||
332 | if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { | 333 | if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) { |
333 | int mtdnr = MINOR(bdev->bd_dev); | 334 | int mtdnr = MINOR(bdev->bd_dev); |
334 | close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); | 335 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
335 | return logfs_get_sb_mtd(p, mtdnr); | 336 | return logfs_get_sb_mtd(p, mtdnr); |
336 | } | 337 | } |
337 | 338 | ||
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c index 3a359023c9f7..230b79fbf005 100644 --- a/fs/nfsd/vfs.c +++ b/fs/nfsd/vfs.c | |||
@@ -845,11 +845,6 @@ nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
845 | struct page **pp = rqstp->rq_respages + rqstp->rq_resused; | 845 | struct page **pp = rqstp->rq_respages + rqstp->rq_resused; |
846 | struct page *page = buf->page; | 846 | struct page *page = buf->page; |
847 | size_t size; | 847 | size_t size; |
848 | int ret; | ||
849 | |||
850 | ret = buf->ops->confirm(pipe, buf); | ||
851 | if (unlikely(ret)) | ||
852 | return ret; | ||
853 | 848 | ||
854 | size = sd->len; | 849 | size = sd->len; |
855 | 850 | ||
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c index 70dfdd532b83..0994f6a76c07 100644 --- a/fs/nilfs2/super.c +++ b/fs/nilfs2/super.c | |||
@@ -1163,14 +1163,14 @@ nilfs_mount(struct file_system_type *fs_type, int flags, | |||
1163 | { | 1163 | { |
1164 | struct nilfs_super_data sd; | 1164 | struct nilfs_super_data sd; |
1165 | struct super_block *s; | 1165 | struct super_block *s; |
1166 | fmode_t mode = FMODE_READ; | 1166 | fmode_t mode = FMODE_READ | FMODE_EXCL; |
1167 | struct dentry *root_dentry; | 1167 | struct dentry *root_dentry; |
1168 | int err, s_new = false; | 1168 | int err, s_new = false; |
1169 | 1169 | ||
1170 | if (!(flags & MS_RDONLY)) | 1170 | if (!(flags & MS_RDONLY)) |
1171 | mode |= FMODE_WRITE; | 1171 | mode |= FMODE_WRITE; |
1172 | 1172 | ||
1173 | sd.bdev = open_bdev_exclusive(dev_name, mode, fs_type); | 1173 | sd.bdev = blkdev_get_by_path(dev_name, mode, fs_type); |
1174 | if (IS_ERR(sd.bdev)) | 1174 | if (IS_ERR(sd.bdev)) |
1175 | return ERR_CAST(sd.bdev); | 1175 | return ERR_CAST(sd.bdev); |
1176 | 1176 | ||
@@ -1249,7 +1249,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, | |||
1249 | } | 1249 | } |
1250 | 1250 | ||
1251 | if (!s_new) | 1251 | if (!s_new) |
1252 | close_bdev_exclusive(sd.bdev, mode); | 1252 | blkdev_put(sd.bdev, mode); |
1253 | 1253 | ||
1254 | return root_dentry; | 1254 | return root_dentry; |
1255 | 1255 | ||
@@ -1258,7 +1258,7 @@ nilfs_mount(struct file_system_type *fs_type, int flags, | |||
1258 | 1258 | ||
1259 | failed: | 1259 | failed: |
1260 | if (!s_new) | 1260 | if (!s_new) |
1261 | close_bdev_exclusive(sd.bdev, mode); | 1261 | blkdev_put(sd.bdev, mode); |
1262 | return ERR_PTR(err); | 1262 | return ERR_PTR(err); |
1263 | } | 1263 | } |
1264 | 1264 | ||
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c index a6cc05302e9f..b108e863d8f6 100644 --- a/fs/ocfs2/cluster/heartbeat.c +++ b/fs/ocfs2/cluster/heartbeat.c | |||
@@ -1729,7 +1729,7 @@ static ssize_t o2hb_region_dev_write(struct o2hb_region *reg, | |||
1729 | goto out; | 1729 | goto out; |
1730 | 1730 | ||
1731 | reg->hr_bdev = I_BDEV(filp->f_mapping->host); | 1731 | reg->hr_bdev = I_BDEV(filp->f_mapping->host); |
1732 | ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ); | 1732 | ret = blkdev_get(reg->hr_bdev, FMODE_WRITE | FMODE_READ, NULL); |
1733 | if (ret) { | 1733 | if (ret) { |
1734 | reg->hr_bdev = NULL; | 1734 | reg->hr_bdev = NULL; |
1735 | goto out; | 1735 | goto out; |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 0a8b0ad0c7e2..9c21119512b9 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -237,6 +237,13 @@ ssize_t part_size_show(struct device *dev, | |||
237 | return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); | 237 | return sprintf(buf, "%llu\n",(unsigned long long)p->nr_sects); |
238 | } | 238 | } |
239 | 239 | ||
240 | ssize_t part_ro_show(struct device *dev, | ||
241 | struct device_attribute *attr, char *buf) | ||
242 | { | ||
243 | struct hd_struct *p = dev_to_part(dev); | ||
244 | return sprintf(buf, "%d\n", p->policy ? 1 : 0); | ||
245 | } | ||
246 | |||
240 | ssize_t part_alignment_offset_show(struct device *dev, | 247 | ssize_t part_alignment_offset_show(struct device *dev, |
241 | struct device_attribute *attr, char *buf) | 248 | struct device_attribute *attr, char *buf) |
242 | { | 249 | { |
@@ -312,6 +319,7 @@ ssize_t part_fail_store(struct device *dev, | |||
312 | static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); | 319 | static DEVICE_ATTR(partition, S_IRUGO, part_partition_show, NULL); |
313 | static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); | 320 | static DEVICE_ATTR(start, S_IRUGO, part_start_show, NULL); |
314 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); | 321 | static DEVICE_ATTR(size, S_IRUGO, part_size_show, NULL); |
322 | static DEVICE_ATTR(ro, S_IRUGO, part_ro_show, NULL); | ||
315 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); | 323 | static DEVICE_ATTR(alignment_offset, S_IRUGO, part_alignment_offset_show, NULL); |
316 | static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, | 324 | static DEVICE_ATTR(discard_alignment, S_IRUGO, part_discard_alignment_show, |
317 | NULL); | 325 | NULL); |
@@ -326,6 +334,7 @@ static struct attribute *part_attrs[] = { | |||
326 | &dev_attr_partition.attr, | 334 | &dev_attr_partition.attr, |
327 | &dev_attr_start.attr, | 335 | &dev_attr_start.attr, |
328 | &dev_attr_size.attr, | 336 | &dev_attr_size.attr, |
337 | &dev_attr_ro.attr, | ||
329 | &dev_attr_alignment_offset.attr, | 338 | &dev_attr_alignment_offset.attr, |
330 | &dev_attr_discard_alignment.attr, | 339 | &dev_attr_discard_alignment.attr, |
331 | &dev_attr_stat.attr, | 340 | &dev_attr_stat.attr, |
@@ -372,6 +381,11 @@ static void delete_partition_rcu_cb(struct rcu_head *head) | |||
372 | put_device(part_to_dev(part)); | 381 | put_device(part_to_dev(part)); |
373 | } | 382 | } |
374 | 383 | ||
384 | void __delete_partition(struct hd_struct *part) | ||
385 | { | ||
386 | call_rcu(&part->rcu_head, delete_partition_rcu_cb); | ||
387 | } | ||
388 | |||
375 | void delete_partition(struct gendisk *disk, int partno) | 389 | void delete_partition(struct gendisk *disk, int partno) |
376 | { | 390 | { |
377 | struct disk_part_tbl *ptbl = disk->part_tbl; | 391 | struct disk_part_tbl *ptbl = disk->part_tbl; |
@@ -390,7 +404,7 @@ void delete_partition(struct gendisk *disk, int partno) | |||
390 | kobject_put(part->holder_dir); | 404 | kobject_put(part->holder_dir); |
391 | device_del(part_to_dev(part)); | 405 | device_del(part_to_dev(part)); |
392 | 406 | ||
393 | call_rcu(&part->rcu_head, delete_partition_rcu_cb); | 407 | hd_struct_put(part); |
394 | } | 408 | } |
395 | 409 | ||
396 | static ssize_t whole_disk_show(struct device *dev, | 410 | static ssize_t whole_disk_show(struct device *dev, |
@@ -489,6 +503,7 @@ struct hd_struct *add_partition(struct gendisk *disk, int partno, | |||
489 | if (!dev_get_uevent_suppress(ddev)) | 503 | if (!dev_get_uevent_suppress(ddev)) |
490 | kobject_uevent(&pdev->kobj, KOBJ_ADD); | 504 | kobject_uevent(&pdev->kobj, KOBJ_ADD); |
491 | 505 | ||
506 | hd_ref_init(p); | ||
492 | return p; | 507 | return p; |
493 | 508 | ||
494 | out_free_info: | 509 | out_free_info: |
@@ -507,65 +522,6 @@ out_put: | |||
507 | return ERR_PTR(err); | 522 | return ERR_PTR(err); |
508 | } | 523 | } |
509 | 524 | ||
510 | /* Not exported, helper to add_disk(). */ | ||
511 | void register_disk(struct gendisk *disk) | ||
512 | { | ||
513 | struct device *ddev = disk_to_dev(disk); | ||
514 | struct block_device *bdev; | ||
515 | struct disk_part_iter piter; | ||
516 | struct hd_struct *part; | ||
517 | int err; | ||
518 | |||
519 | ddev->parent = disk->driverfs_dev; | ||
520 | |||
521 | dev_set_name(ddev, disk->disk_name); | ||
522 | |||
523 | /* delay uevents, until we scanned partition table */ | ||
524 | dev_set_uevent_suppress(ddev, 1); | ||
525 | |||
526 | if (device_add(ddev)) | ||
527 | return; | ||
528 | if (!sysfs_deprecated) { | ||
529 | err = sysfs_create_link(block_depr, &ddev->kobj, | ||
530 | kobject_name(&ddev->kobj)); | ||
531 | if (err) { | ||
532 | device_del(ddev); | ||
533 | return; | ||
534 | } | ||
535 | } | ||
536 | disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); | ||
537 | disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); | ||
538 | |||
539 | /* No minors to use for partitions */ | ||
540 | if (!disk_partitionable(disk)) | ||
541 | goto exit; | ||
542 | |||
543 | /* No such device (e.g., media were just removed) */ | ||
544 | if (!get_capacity(disk)) | ||
545 | goto exit; | ||
546 | |||
547 | bdev = bdget_disk(disk, 0); | ||
548 | if (!bdev) | ||
549 | goto exit; | ||
550 | |||
551 | bdev->bd_invalidated = 1; | ||
552 | err = blkdev_get(bdev, FMODE_READ); | ||
553 | if (err < 0) | ||
554 | goto exit; | ||
555 | blkdev_put(bdev, FMODE_READ); | ||
556 | |||
557 | exit: | ||
558 | /* announce disk after possible partitions are created */ | ||
559 | dev_set_uevent_suppress(ddev, 0); | ||
560 | kobject_uevent(&ddev->kobj, KOBJ_ADD); | ||
561 | |||
562 | /* announce possible partitions */ | ||
563 | disk_part_iter_init(&piter, disk, 0); | ||
564 | while ((part = disk_part_iter_next(&piter))) | ||
565 | kobject_uevent(&part_to_dev(part)->kobj, KOBJ_ADD); | ||
566 | disk_part_iter_exit(&piter); | ||
567 | } | ||
568 | |||
569 | static bool disk_unlock_native_capacity(struct gendisk *disk) | 525 | static bool disk_unlock_native_capacity(struct gendisk *disk) |
570 | { | 526 | { |
571 | const struct block_device_operations *bdops = disk->fops; | 527 | const struct block_device_operations *bdops = disk->fops; |
@@ -728,33 +684,3 @@ fail: | |||
728 | } | 684 | } |
729 | 685 | ||
730 | EXPORT_SYMBOL(read_dev_sector); | 686 | EXPORT_SYMBOL(read_dev_sector); |
731 | |||
732 | void del_gendisk(struct gendisk *disk) | ||
733 | { | ||
734 | struct disk_part_iter piter; | ||
735 | struct hd_struct *part; | ||
736 | |||
737 | /* invalidate stuff */ | ||
738 | disk_part_iter_init(&piter, disk, | ||
739 | DISK_PITER_INCL_EMPTY | DISK_PITER_REVERSE); | ||
740 | while ((part = disk_part_iter_next(&piter))) { | ||
741 | invalidate_partition(disk, part->partno); | ||
742 | delete_partition(disk, part->partno); | ||
743 | } | ||
744 | disk_part_iter_exit(&piter); | ||
745 | |||
746 | invalidate_partition(disk, 0); | ||
747 | blk_free_devt(disk_to_dev(disk)->devt); | ||
748 | set_capacity(disk, 0); | ||
749 | disk->flags &= ~GENHD_FL_UP; | ||
750 | unlink_gendisk(disk); | ||
751 | part_stat_set_all(&disk->part0, 0); | ||
752 | disk->part0.stamp = 0; | ||
753 | |||
754 | kobject_put(disk->part0.holder_dir); | ||
755 | kobject_put(disk->slave_dir); | ||
756 | disk->driverfs_dev = NULL; | ||
757 | if (!sysfs_deprecated) | ||
758 | sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); | ||
759 | device_del(disk_to_dev(disk)); | ||
760 | } | ||
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c index d31bce1a9f90..3eea859e6990 100644 --- a/fs/reiserfs/journal.c +++ b/fs/reiserfs/journal.c | |||
@@ -2551,8 +2551,6 @@ static int release_journal_dev(struct super_block *super, | |||
2551 | result = 0; | 2551 | result = 0; |
2552 | 2552 | ||
2553 | if (journal->j_dev_bd != NULL) { | 2553 | if (journal->j_dev_bd != NULL) { |
2554 | if (journal->j_dev_bd->bd_dev != super->s_dev) | ||
2555 | bd_release(journal->j_dev_bd); | ||
2556 | result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode); | 2554 | result = blkdev_put(journal->j_dev_bd, journal->j_dev_mode); |
2557 | journal->j_dev_bd = NULL; | 2555 | journal->j_dev_bd = NULL; |
2558 | } | 2556 | } |
@@ -2570,7 +2568,7 @@ static int journal_init_dev(struct super_block *super, | |||
2570 | { | 2568 | { |
2571 | int result; | 2569 | int result; |
2572 | dev_t jdev; | 2570 | dev_t jdev; |
2573 | fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE; | 2571 | fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; |
2574 | char b[BDEVNAME_SIZE]; | 2572 | char b[BDEVNAME_SIZE]; |
2575 | 2573 | ||
2576 | result = 0; | 2574 | result = 0; |
@@ -2584,7 +2582,10 @@ static int journal_init_dev(struct super_block *super, | |||
2584 | 2582 | ||
2585 | /* there is no "jdev" option and journal is on separate device */ | 2583 | /* there is no "jdev" option and journal is on separate device */ |
2586 | if ((!jdev_name || !jdev_name[0])) { | 2584 | if ((!jdev_name || !jdev_name[0])) { |
2587 | journal->j_dev_bd = open_by_devnum(jdev, blkdev_mode); | 2585 | if (jdev == super->s_dev) |
2586 | blkdev_mode &= ~FMODE_EXCL; | ||
2587 | journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, | ||
2588 | journal); | ||
2588 | journal->j_dev_mode = blkdev_mode; | 2589 | journal->j_dev_mode = blkdev_mode; |
2589 | if (IS_ERR(journal->j_dev_bd)) { | 2590 | if (IS_ERR(journal->j_dev_bd)) { |
2590 | result = PTR_ERR(journal->j_dev_bd); | 2591 | result = PTR_ERR(journal->j_dev_bd); |
@@ -2593,22 +2594,14 @@ static int journal_init_dev(struct super_block *super, | |||
2593 | "cannot init journal device '%s': %i", | 2594 | "cannot init journal device '%s': %i", |
2594 | __bdevname(jdev, b), result); | 2595 | __bdevname(jdev, b), result); |
2595 | return result; | 2596 | return result; |
2596 | } else if (jdev != super->s_dev) { | 2597 | } else if (jdev != super->s_dev) |
2597 | result = bd_claim(journal->j_dev_bd, journal); | ||
2598 | if (result) { | ||
2599 | blkdev_put(journal->j_dev_bd, blkdev_mode); | ||
2600 | return result; | ||
2601 | } | ||
2602 | |||
2603 | set_blocksize(journal->j_dev_bd, super->s_blocksize); | 2598 | set_blocksize(journal->j_dev_bd, super->s_blocksize); |
2604 | } | ||
2605 | 2599 | ||
2606 | return 0; | 2600 | return 0; |
2607 | } | 2601 | } |
2608 | 2602 | ||
2609 | journal->j_dev_mode = blkdev_mode; | 2603 | journal->j_dev_mode = blkdev_mode; |
2610 | journal->j_dev_bd = open_bdev_exclusive(jdev_name, | 2604 | journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); |
2611 | blkdev_mode, journal); | ||
2612 | if (IS_ERR(journal->j_dev_bd)) { | 2605 | if (IS_ERR(journal->j_dev_bd)) { |
2613 | result = PTR_ERR(journal->j_dev_bd); | 2606 | result = PTR_ERR(journal->j_dev_bd); |
2614 | journal->j_dev_bd = NULL; | 2607 | journal->j_dev_bd = NULL; |
diff --git a/fs/splice.c b/fs/splice.c index ce2f02579e35..50a5d978da16 100644 --- a/fs/splice.c +++ b/fs/splice.c | |||
@@ -682,19 +682,14 @@ static int pipe_to_sendpage(struct pipe_inode_info *pipe, | |||
682 | { | 682 | { |
683 | struct file *file = sd->u.file; | 683 | struct file *file = sd->u.file; |
684 | loff_t pos = sd->pos; | 684 | loff_t pos = sd->pos; |
685 | int ret, more; | 685 | int more; |
686 | |||
687 | ret = buf->ops->confirm(pipe, buf); | ||
688 | if (!ret) { | ||
689 | more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; | ||
690 | if (file->f_op && file->f_op->sendpage) | ||
691 | ret = file->f_op->sendpage(file, buf->page, buf->offset, | ||
692 | sd->len, &pos, more); | ||
693 | else | ||
694 | ret = -EINVAL; | ||
695 | } | ||
696 | 686 | ||
697 | return ret; | 687 | if (!likely(file->f_op && file->f_op->sendpage)) |
688 | return -EINVAL; | ||
689 | |||
690 | more = (sd->flags & SPLICE_F_MORE) || sd->len < sd->total_len; | ||
691 | return file->f_op->sendpage(file, buf->page, buf->offset, | ||
692 | sd->len, &pos, more); | ||
698 | } | 693 | } |
699 | 694 | ||
700 | /* | 695 | /* |
@@ -727,13 +722,6 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
727 | void *fsdata; | 722 | void *fsdata; |
728 | int ret; | 723 | int ret; |
729 | 724 | ||
730 | /* | ||
731 | * make sure the data in this buffer is uptodate | ||
732 | */ | ||
733 | ret = buf->ops->confirm(pipe, buf); | ||
734 | if (unlikely(ret)) | ||
735 | return ret; | ||
736 | |||
737 | offset = sd->pos & ~PAGE_CACHE_MASK; | 725 | offset = sd->pos & ~PAGE_CACHE_MASK; |
738 | 726 | ||
739 | this_len = sd->len; | 727 | this_len = sd->len; |
@@ -805,12 +793,17 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd, | |||
805 | if (sd->len > sd->total_len) | 793 | if (sd->len > sd->total_len) |
806 | sd->len = sd->total_len; | 794 | sd->len = sd->total_len; |
807 | 795 | ||
808 | ret = actor(pipe, buf, sd); | 796 | ret = buf->ops->confirm(pipe, buf); |
809 | if (ret <= 0) { | 797 | if (unlikely(ret)) { |
810 | if (ret == -ENODATA) | 798 | if (ret == -ENODATA) |
811 | ret = 0; | 799 | ret = 0; |
812 | return ret; | 800 | return ret; |
813 | } | 801 | } |
802 | |||
803 | ret = actor(pipe, buf, sd); | ||
804 | if (ret <= 0) | ||
805 | return ret; | ||
806 | |||
814 | buf->offset += ret; | 807 | buf->offset += ret; |
815 | buf->len -= ret; | 808 | buf->len -= ret; |
816 | 809 | ||
@@ -1044,10 +1037,6 @@ static int write_pipe_buf(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
1044 | int ret; | 1037 | int ret; |
1045 | void *data; | 1038 | void *data; |
1046 | 1039 | ||
1047 | ret = buf->ops->confirm(pipe, buf); | ||
1048 | if (ret) | ||
1049 | return ret; | ||
1050 | |||
1051 | data = buf->ops->map(pipe, buf, 0); | 1040 | data = buf->ops->map(pipe, buf, 0); |
1052 | ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); | 1041 | ret = kernel_write(sd->u.file, data + buf->offset, sd->len, sd->pos); |
1053 | buf->ops->unmap(pipe, buf, data); | 1042 | buf->ops->unmap(pipe, buf, data); |
@@ -1495,10 +1484,6 @@ static int pipe_to_user(struct pipe_inode_info *pipe, struct pipe_buffer *buf, | |||
1495 | char *src; | 1484 | char *src; |
1496 | int ret; | 1485 | int ret; |
1497 | 1486 | ||
1498 | ret = buf->ops->confirm(pipe, buf); | ||
1499 | if (unlikely(ret)) | ||
1500 | return ret; | ||
1501 | |||
1502 | /* | 1487 | /* |
1503 | * See if we can use the atomic maps, by prefaulting in the | 1488 | * See if we can use the atomic maps, by prefaulting in the |
1504 | * pages and doing an atomic copy | 1489 | * pages and doing an atomic copy |
diff --git a/fs/super.c b/fs/super.c index 823e061faa87..4f6a3571a634 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -767,13 +767,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, | |||
767 | { | 767 | { |
768 | struct block_device *bdev; | 768 | struct block_device *bdev; |
769 | struct super_block *s; | 769 | struct super_block *s; |
770 | fmode_t mode = FMODE_READ; | 770 | fmode_t mode = FMODE_READ | FMODE_EXCL; |
771 | int error = 0; | 771 | int error = 0; |
772 | 772 | ||
773 | if (!(flags & MS_RDONLY)) | 773 | if (!(flags & MS_RDONLY)) |
774 | mode |= FMODE_WRITE; | 774 | mode |= FMODE_WRITE; |
775 | 775 | ||
776 | bdev = open_bdev_exclusive(dev_name, mode, fs_type); | 776 | bdev = blkdev_get_by_path(dev_name, mode, fs_type); |
777 | if (IS_ERR(bdev)) | 777 | if (IS_ERR(bdev)) |
778 | return ERR_CAST(bdev); | 778 | return ERR_CAST(bdev); |
779 | 779 | ||
@@ -802,13 +802,13 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, | |||
802 | 802 | ||
803 | /* | 803 | /* |
804 | * s_umount nests inside bd_mutex during | 804 | * s_umount nests inside bd_mutex during |
805 | * __invalidate_device(). close_bdev_exclusive() | 805 | * __invalidate_device(). blkdev_put() acquires |
806 | * acquires bd_mutex and can't be called under | 806 | * bd_mutex and can't be called under s_umount. Drop |
807 | * s_umount. Drop s_umount temporarily. This is safe | 807 | * s_umount temporarily. This is safe as we're |
808 | * as we're holding an active reference. | 808 | * holding an active reference. |
809 | */ | 809 | */ |
810 | up_write(&s->s_umount); | 810 | up_write(&s->s_umount); |
811 | close_bdev_exclusive(bdev, mode); | 811 | blkdev_put(bdev, mode); |
812 | down_write(&s->s_umount); | 812 | down_write(&s->s_umount); |
813 | } else { | 813 | } else { |
814 | char b[BDEVNAME_SIZE]; | 814 | char b[BDEVNAME_SIZE]; |
@@ -832,7 +832,7 @@ struct dentry *mount_bdev(struct file_system_type *fs_type, | |||
832 | error_s: | 832 | error_s: |
833 | error = PTR_ERR(s); | 833 | error = PTR_ERR(s); |
834 | error_bdev: | 834 | error_bdev: |
835 | close_bdev_exclusive(bdev, mode); | 835 | blkdev_put(bdev, mode); |
836 | error: | 836 | error: |
837 | return ERR_PTR(error); | 837 | return ERR_PTR(error); |
838 | } | 838 | } |
@@ -863,7 +863,8 @@ void kill_block_super(struct super_block *sb) | |||
863 | bdev->bd_super = NULL; | 863 | bdev->bd_super = NULL; |
864 | generic_shutdown_super(sb); | 864 | generic_shutdown_super(sb); |
865 | sync_blockdev(bdev); | 865 | sync_blockdev(bdev); |
866 | close_bdev_exclusive(bdev, mode); | 866 | WARN_ON_ONCE(!(mode & FMODE_EXCL)); |
867 | blkdev_put(bdev, mode | FMODE_EXCL); | ||
867 | } | 868 | } |
868 | 869 | ||
869 | EXPORT_SYMBOL(kill_block_super); | 870 | EXPORT_SYMBOL(kill_block_super); |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index a10f6416e563..bd07f7339366 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -606,7 +606,8 @@ xfs_blkdev_get( | |||
606 | { | 606 | { |
607 | int error = 0; | 607 | int error = 0; |
608 | 608 | ||
609 | *bdevp = open_bdev_exclusive(name, FMODE_READ|FMODE_WRITE, mp); | 609 | *bdevp = blkdev_get_by_path(name, FMODE_READ|FMODE_WRITE|FMODE_EXCL, |
610 | mp); | ||
610 | if (IS_ERR(*bdevp)) { | 611 | if (IS_ERR(*bdevp)) { |
611 | error = PTR_ERR(*bdevp); | 612 | error = PTR_ERR(*bdevp); |
612 | printk("XFS: Invalid device [%s], error=%d\n", name, error); | 613 | printk("XFS: Invalid device [%s], error=%d\n", name, error); |
@@ -620,7 +621,7 @@ xfs_blkdev_put( | |||
620 | struct block_device *bdev) | 621 | struct block_device *bdev) |
621 | { | 622 | { |
622 | if (bdev) | 623 | if (bdev) |
623 | close_bdev_exclusive(bdev, FMODE_READ|FMODE_WRITE); | 624 | blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); |
624 | } | 625 | } |
625 | 626 | ||
626 | /* | 627 | /* |