aboutsummaryrefslogtreecommitdiffstats
path: root/fs/block_dev.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/block_dev.c')
-rw-r--r--fs/block_dev.c708
1 files changed, 221 insertions, 487 deletions
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 06e8ff12b97c..c1c1b8c3fb99 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -426,9 +426,6 @@ static void init_once(void *foo)
426 mutex_init(&bdev->bd_mutex); 426 mutex_init(&bdev->bd_mutex);
427 INIT_LIST_HEAD(&bdev->bd_inodes); 427 INIT_LIST_HEAD(&bdev->bd_inodes);
428 INIT_LIST_HEAD(&bdev->bd_list); 428 INIT_LIST_HEAD(&bdev->bd_list);
429#ifdef CONFIG_SYSFS
430 INIT_LIST_HEAD(&bdev->bd_holder_list);
431#endif
432 inode_init_once(&ei->vfs_inode); 429 inode_init_once(&ei->vfs_inode);
433 /* Initialize mutex for freeze. */ 430 /* Initialize mutex for freeze. */
434 mutex_init(&bdev->bd_fsfreeze_mutex); 431 mutex_init(&bdev->bd_fsfreeze_mutex);
@@ -663,7 +660,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
663 else if (bdev->bd_contains == bdev) 660 else if (bdev->bd_contains == bdev)
664 return true; /* is a whole device which isn't held */ 661 return true; /* is a whole device which isn't held */
665 662
666 else if (whole->bd_holder == bd_claim) 663 else if (whole->bd_holder == bd_may_claim)
667 return true; /* is a partition of a device that is being partitioned */ 664 return true; /* is a partition of a device that is being partitioned */
668 else if (whole->bd_holder != NULL) 665 else if (whole->bd_holder != NULL)
669 return false; /* is a partition of a held device */ 666 return false; /* is a partition of a held device */
@@ -775,439 +772,87 @@ static struct block_device *bd_start_claiming(struct block_device *bdev,
775 } 772 }
776} 773}
777 774
778/* releases bdev_lock */
779static void __bd_abort_claiming(struct block_device *whole, void *holder)
780{
781 BUG_ON(whole->bd_claiming != holder);
782 whole->bd_claiming = NULL;
783 wake_up_bit(&whole->bd_claiming, 0);
784
785 spin_unlock(&bdev_lock);
786 bdput(whole);
787}
788
789/**
790 * bd_abort_claiming - abort claiming a block device
791 * @whole: whole block device returned by bd_start_claiming()
792 * @holder: holder trying to claim @bdev
793 *
794 * Abort a claiming block started by bd_start_claiming(). Note that
795 * @whole is not the block device to be claimed but the whole device
796 * returned by bd_start_claiming().
797 *
798 * CONTEXT:
799 * Grabs and releases bdev_lock.
800 */
801static void bd_abort_claiming(struct block_device *whole, void *holder)
802{
803 spin_lock(&bdev_lock);
804 __bd_abort_claiming(whole, holder); /* releases bdev_lock */
805}
806
807/* increment holders when we have a legitimate claim. requires bdev_lock */
808static void __bd_claim(struct block_device *bdev, struct block_device *whole,
809 void *holder)
810{
811 /* note that for a whole device bd_holders
812 * will be incremented twice, and bd_holder will
813 * be set to bd_claim before being set to holder
814 */
815 whole->bd_holders++;
816 whole->bd_holder = bd_claim;
817 bdev->bd_holders++;
818 bdev->bd_holder = holder;
819}
820
821/**
822 * bd_finish_claiming - finish claiming a block device
823 * @bdev: block device of interest (passed to bd_start_claiming())
824 * @whole: whole block device returned by bd_start_claiming()
825 * @holder: holder trying to claim @bdev
826 *
827 * Finish a claiming block started by bd_start_claiming().
828 *
829 * CONTEXT:
830 * Grabs and releases bdev_lock.
831 */
832static void bd_finish_claiming(struct block_device *bdev,
833 struct block_device *whole, void *holder)
834{
835 spin_lock(&bdev_lock);
836 BUG_ON(!bd_may_claim(bdev, whole, holder));
837 __bd_claim(bdev, whole, holder);
838 __bd_abort_claiming(whole, holder); /* not actually an abort */
839}
840
841/**
842 * bd_claim - claim a block device
843 * @bdev: block device to claim
844 * @holder: holder trying to claim @bdev
845 *
846 * Try to claim @bdev which must have been opened successfully.
847 *
848 * CONTEXT:
849 * Might sleep.
850 *
851 * RETURNS:
852 * 0 if successful, -EBUSY if @bdev is already claimed.
853 */
854int bd_claim(struct block_device *bdev, void *holder)
855{
856 struct block_device *whole = bdev->bd_contains;
857 int res;
858
859 might_sleep();
860
861 spin_lock(&bdev_lock);
862 res = bd_prepare_to_claim(bdev, whole, holder);
863 if (res == 0)
864 __bd_claim(bdev, whole, holder);
865 spin_unlock(&bdev_lock);
866
867 return res;
868}
869EXPORT_SYMBOL(bd_claim);
870
871void bd_release(struct block_device *bdev)
872{
873 spin_lock(&bdev_lock);
874 if (!--bdev->bd_contains->bd_holders)
875 bdev->bd_contains->bd_holder = NULL;
876 if (!--bdev->bd_holders)
877 bdev->bd_holder = NULL;
878 spin_unlock(&bdev_lock);
879}
880
881EXPORT_SYMBOL(bd_release);
882
883#ifdef CONFIG_SYSFS 775#ifdef CONFIG_SYSFS
884/*
885 * Functions for bd_claim_by_kobject / bd_release_from_kobject
886 *
887 * If a kobject is passed to bd_claim_by_kobject()
888 * and the kobject has a parent directory,
889 * following symlinks are created:
890 * o from the kobject to the claimed bdev
891 * o from "holders" directory of the bdev to the parent of the kobject
892 * bd_release_from_kobject() removes these symlinks.
893 *
894 * Example:
895 * If /dev/dm-0 maps to /dev/sda, kobject corresponding to
896 * /sys/block/dm-0/slaves is passed to bd_claim_by_kobject(), then:
897 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
898 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
899 */
900
901static int add_symlink(struct kobject *from, struct kobject *to) 776static int add_symlink(struct kobject *from, struct kobject *to)
902{ 777{
903 if (!from || !to)
904 return 0;
905 return sysfs_create_link(from, to, kobject_name(to)); 778 return sysfs_create_link(from, to, kobject_name(to));
906} 779}
907 780
908static void del_symlink(struct kobject *from, struct kobject *to) 781static void del_symlink(struct kobject *from, struct kobject *to)
909{ 782{
910 if (!from || !to)
911 return;
912 sysfs_remove_link(from, kobject_name(to)); 783 sysfs_remove_link(from, kobject_name(to));
913} 784}
914 785
915/*
916 * 'struct bd_holder' contains pointers to kobjects symlinked by
917 * bd_claim_by_kobject.
918 * It's connected to bd_holder_list which is protected by bdev->bd_sem.
919 */
920struct bd_holder {
921 struct list_head list; /* chain of holders of the bdev */
922 int count; /* references from the holder */
923 struct kobject *sdir; /* holder object, e.g. "/block/dm-0/slaves" */
924 struct kobject *hdev; /* e.g. "/block/dm-0" */
925 struct kobject *hdir; /* e.g. "/block/sda/holders" */
926 struct kobject *sdev; /* e.g. "/block/sda" */
927};
928
929/*
930 * Get references of related kobjects at once.
931 * Returns 1 on success. 0 on failure.
932 *
933 * Should call bd_holder_release_dirs() after successful use.
934 */
935static int bd_holder_grab_dirs(struct block_device *bdev,
936 struct bd_holder *bo)
937{
938 if (!bdev || !bo)
939 return 0;
940
941 bo->sdir = kobject_get(bo->sdir);
942 if (!bo->sdir)
943 return 0;
944
945 bo->hdev = kobject_get(bo->sdir->parent);
946 if (!bo->hdev)
947 goto fail_put_sdir;
948
949 bo->sdev = kobject_get(&part_to_dev(bdev->bd_part)->kobj);
950 if (!bo->sdev)
951 goto fail_put_hdev;
952
953 bo->hdir = kobject_get(bdev->bd_part->holder_dir);
954 if (!bo->hdir)
955 goto fail_put_sdev;
956
957 return 1;
958
959fail_put_sdev:
960 kobject_put(bo->sdev);
961fail_put_hdev:
962 kobject_put(bo->hdev);
963fail_put_sdir:
964 kobject_put(bo->sdir);
965
966 return 0;
967}
968
969/* Put references of related kobjects at once. */
970static void bd_holder_release_dirs(struct bd_holder *bo)
971{
972 kobject_put(bo->hdir);
973 kobject_put(bo->sdev);
974 kobject_put(bo->hdev);
975 kobject_put(bo->sdir);
976}
977
978static struct bd_holder *alloc_bd_holder(struct kobject *kobj)
979{
980 struct bd_holder *bo;
981
982 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
983 if (!bo)
984 return NULL;
985
986 bo->count = 1;
987 bo->sdir = kobj;
988
989 return bo;
990}
991
992static void free_bd_holder(struct bd_holder *bo)
993{
994 kfree(bo);
995}
996
997/**
998 * find_bd_holder - find matching struct bd_holder from the block device
999 *
1000 * @bdev: struct block device to be searched
1001 * @bo: target struct bd_holder
1002 *
1003 * Returns matching entry with @bo in @bdev->bd_holder_list.
1004 * If found, increment the reference count and return the pointer.
1005 * If not found, returns NULL.
1006 */
1007static struct bd_holder *find_bd_holder(struct block_device *bdev,
1008 struct bd_holder *bo)
1009{
1010 struct bd_holder *tmp;
1011
1012 list_for_each_entry(tmp, &bdev->bd_holder_list, list)
1013 if (tmp->sdir == bo->sdir) {
1014 tmp->count++;
1015 return tmp;
1016 }
1017
1018 return NULL;
1019}
1020
1021/** 786/**
1022 * add_bd_holder - create sysfs symlinks for bd_claim() relationship 787 * bd_link_disk_holder - create symlinks between holding disk and slave bdev
1023 * 788 * @bdev: the claimed slave bdev
1024 * @bdev: block device to be bd_claimed 789 * @disk: the holding disk
1025 * @bo: preallocated and initialized by alloc_bd_holder()
1026 * 790 *
1027 * Add @bo to @bdev->bd_holder_list, create symlinks. 791 * This functions creates the following sysfs symlinks.
1028 *
1029 * Returns 0 if symlinks are created.
1030 * Returns -ve if something fails.
1031 */
1032static int add_bd_holder(struct block_device *bdev, struct bd_holder *bo)
1033{
1034 int err;
1035
1036 if (!bo)
1037 return -EINVAL;
1038
1039 if (!bd_holder_grab_dirs(bdev, bo))
1040 return -EBUSY;
1041
1042 err = add_symlink(bo->sdir, bo->sdev);
1043 if (err)
1044 return err;
1045
1046 err = add_symlink(bo->hdir, bo->hdev);
1047 if (err) {
1048 del_symlink(bo->sdir, bo->sdev);
1049 return err;
1050 }
1051
1052 list_add_tail(&bo->list, &bdev->bd_holder_list);
1053 return 0;
1054}
1055
1056/**
1057 * del_bd_holder - delete sysfs symlinks for bd_claim() relationship
1058 * 792 *
1059 * @bdev: block device to be bd_claimed 793 * - from "slaves" directory of the holder @disk to the claimed @bdev
1060 * @kobj: holder's kobject 794 * - from "holders" directory of the @bdev to the holder @disk
1061 * 795 *
1062 * If there is matching entry with @kobj in @bdev->bd_holder_list 796 * For example, if /dev/dm-0 maps to /dev/sda and disk for dm-0 is
1063 * and no other bd_claim() from the same kobject, 797 * passed to bd_link_disk_holder(), then:
1064 * remove the struct bd_holder from the list, delete symlinks for it.
1065 * 798 *
1066 * Returns a pointer to the struct bd_holder when it's removed from the list 799 * /sys/block/dm-0/slaves/sda --> /sys/block/sda
1067 * and ready to be freed. 800 * /sys/block/sda/holders/dm-0 --> /sys/block/dm-0
1068 * Returns NULL if matching claim isn't found or there is other bd_claim()
1069 * by the same kobject.
1070 */
1071static struct bd_holder *del_bd_holder(struct block_device *bdev,
1072 struct kobject *kobj)
1073{
1074 struct bd_holder *bo;
1075
1076 list_for_each_entry(bo, &bdev->bd_holder_list, list) {
1077 if (bo->sdir == kobj) {
1078 bo->count--;
1079 BUG_ON(bo->count < 0);
1080 if (!bo->count) {
1081 list_del(&bo->list);
1082 del_symlink(bo->sdir, bo->sdev);
1083 del_symlink(bo->hdir, bo->hdev);
1084 bd_holder_release_dirs(bo);
1085 return bo;
1086 }
1087 break;
1088 }
1089 }
1090
1091 return NULL;
1092}
1093
1094/**
1095 * bd_claim_by_kobject - bd_claim() with additional kobject signature
1096 * 801 *
1097 * @bdev: block device to be claimed 802 * The caller must have claimed @bdev before calling this function and
1098 * @holder: holder's signature 803 * ensure that both @bdev and @disk are valid during the creation and
1099 * @kobj: holder's kobject 804 * lifetime of these symlinks.
1100 * 805 *
1101 * Do bd_claim() and if it succeeds, create sysfs symlinks between 806 * CONTEXT:
1102 * the bdev and the holder's kobject. 807 * Might sleep.
1103 * Use bd_release_from_kobject() when relesing the claimed bdev.
1104 * 808 *
1105 * Returns 0 on success. (same as bd_claim()) 809 * RETURNS:
1106 * Returns errno on failure. 810 * 0 on success, -errno on failure.
1107 */ 811 */
1108static int bd_claim_by_kobject(struct block_device *bdev, void *holder, 812int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk)
1109 struct kobject *kobj)
1110{ 813{
1111 int err; 814 int ret = 0;
1112 struct bd_holder *bo, *found;
1113
1114 if (!kobj)
1115 return -EINVAL;
1116
1117 bo = alloc_bd_holder(kobj);
1118 if (!bo)
1119 return -ENOMEM;
1120 815
1121 mutex_lock(&bdev->bd_mutex); 816 mutex_lock(&bdev->bd_mutex);
1122 817
1123 err = bd_claim(bdev, holder); 818 WARN_ON_ONCE(!bdev->bd_holder || bdev->bd_holder_disk);
1124 if (err)
1125 goto fail;
1126 819
1127 found = find_bd_holder(bdev, bo); 820 /* FIXME: remove the following once add_disk() handles errors */
1128 if (found) 821 if (WARN_ON(!disk->slave_dir || !bdev->bd_part->holder_dir))
1129 goto fail; 822 goto out_unlock;
1130 823
1131 err = add_bd_holder(bdev, bo); 824 ret = add_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1132 if (err) 825 if (ret)
1133 bd_release(bdev); 826 goto out_unlock;
1134 else
1135 bo = NULL;
1136fail:
1137 mutex_unlock(&bdev->bd_mutex);
1138 free_bd_holder(bo);
1139 return err;
1140}
1141 827
1142/** 828 ret = add_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1143 * bd_release_from_kobject - bd_release() with additional kobject signature 829 if (ret) {
1144 * 830 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1145 * @bdev: block device to be released 831 goto out_unlock;
1146 * @kobj: holder's kobject 832 }
1147 *
1148 * Do bd_release() and remove sysfs symlinks created by bd_claim_by_kobject().
1149 */
1150static void bd_release_from_kobject(struct block_device *bdev,
1151 struct kobject *kobj)
1152{
1153 if (!kobj)
1154 return;
1155 833
1156 mutex_lock(&bdev->bd_mutex); 834 bdev->bd_holder_disk = disk;
1157 bd_release(bdev); 835out_unlock:
1158 free_bd_holder(del_bd_holder(bdev, kobj));
1159 mutex_unlock(&bdev->bd_mutex); 836 mutex_unlock(&bdev->bd_mutex);
837 return ret;
1160} 838}
839EXPORT_SYMBOL_GPL(bd_link_disk_holder);
1161 840
1162/** 841static void bd_unlink_disk_holder(struct block_device *bdev)
1163 * bd_claim_by_disk - wrapper function for bd_claim_by_kobject()
1164 *
1165 * @bdev: block device to be claimed
1166 * @holder: holder's signature
1167 * @disk: holder's gendisk
1168 *
1169 * Call bd_claim_by_kobject() with getting @disk->slave_dir.
1170 */
1171int bd_claim_by_disk(struct block_device *bdev, void *holder,
1172 struct gendisk *disk)
1173{ 842{
1174 return bd_claim_by_kobject(bdev, holder, kobject_get(disk->slave_dir)); 843 struct gendisk *disk = bdev->bd_holder_disk;
1175}
1176EXPORT_SYMBOL_GPL(bd_claim_by_disk);
1177 844
1178/** 845 bdev->bd_holder_disk = NULL;
1179 * bd_release_from_disk - wrapper function for bd_release_from_kobject() 846 if (!disk)
1180 * 847 return;
1181 * @bdev: block device to be claimed
1182 * @disk: holder's gendisk
1183 *
1184 * Call bd_release_from_kobject() and put @disk->slave_dir.
1185 */
1186void bd_release_from_disk(struct block_device *bdev, struct gendisk *disk)
1187{
1188 bd_release_from_kobject(bdev, disk->slave_dir);
1189 kobject_put(disk->slave_dir);
1190}
1191EXPORT_SYMBOL_GPL(bd_release_from_disk);
1192#endif
1193 848
1194/* 849 del_symlink(disk->slave_dir, &part_to_dev(bdev->bd_part)->kobj);
1195 * Tries to open block device by device number. Use it ONLY if you 850 del_symlink(bdev->bd_part->holder_dir, &disk_to_dev(disk)->kobj);
1196 * really do not have anything better - i.e. when you are behind a
1197 * truly sucky interface and all you are given is a device number. _Never_
1198 * to be used for internal purposes. If you ever need it - reconsider
1199 * your API.
1200 */
1201struct block_device *open_by_devnum(dev_t dev, fmode_t mode)
1202{
1203 struct block_device *bdev = bdget(dev);
1204 int err = -ENOMEM;
1205 if (bdev)
1206 err = blkdev_get(bdev, mode);
1207 return err ? ERR_PTR(err) : bdev;
1208} 851}
1209 852#else
1210EXPORT_SYMBOL(open_by_devnum); 853static inline void bd_unlink_disk_holder(struct block_device *bdev)
854{ }
855#endif
1211 856
1212/** 857/**
1213 * flush_disk - invalidates all buffer-cache entries on a disk 858 * flush_disk - invalidates all buffer-cache entries on a disk
@@ -1469,17 +1114,156 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part)
1469 return ret; 1114 return ret;
1470} 1115}
1471 1116
1472int blkdev_get(struct block_device *bdev, fmode_t mode) 1117/**
1118 * blkdev_get - open a block device
1119 * @bdev: block_device to open
1120 * @mode: FMODE_* mask
1121 * @holder: exclusive holder identifier
1122 *
1123 * Open @bdev with @mode. If @mode includes %FMODE_EXCL, @bdev is
1124 * open with exclusive access. Specifying %FMODE_EXCL with %NULL
1125 * @holder is invalid. Exclusive opens may nest for the same @holder.
1126 *
1127 * On success, the reference count of @bdev is unchanged. On failure,
1128 * @bdev is put.
1129 *
1130 * CONTEXT:
1131 * Might sleep.
1132 *
1133 * RETURNS:
1134 * 0 on success, -errno on failure.
1135 */
1136int blkdev_get(struct block_device *bdev, fmode_t mode, void *holder)
1473{ 1137{
1474 return __blkdev_get(bdev, mode, 0); 1138 struct block_device *whole = NULL;
1139 int res;
1140
1141 WARN_ON_ONCE((mode & FMODE_EXCL) && !holder);
1142
1143 if ((mode & FMODE_EXCL) && holder) {
1144 whole = bd_start_claiming(bdev, holder);
1145 if (IS_ERR(whole)) {
1146 bdput(bdev);
1147 return PTR_ERR(whole);
1148 }
1149 }
1150
1151 res = __blkdev_get(bdev, mode, 0);
1152
1153 /* __blkdev_get() may alter read only status, check it afterwards */
1154 if (!res && (mode & FMODE_WRITE) && bdev_read_only(bdev)) {
1155 __blkdev_put(bdev, mode, 0);
1156 res = -EACCES;
1157 }
1158
1159 if (whole) {
1160 /* finish claiming */
1161 spin_lock(&bdev_lock);
1162
1163 if (res == 0) {
1164 BUG_ON(!bd_may_claim(bdev, whole, holder));
1165 /*
1166 * Note that for a whole device bd_holders
1167 * will be incremented twice, and bd_holder
1168 * will be set to bd_may_claim before being
1169 * set to holder
1170 */
1171 whole->bd_holders++;
1172 whole->bd_holder = bd_may_claim;
1173 bdev->bd_holders++;
1174 bdev->bd_holder = holder;
1175 }
1176
1177 /* tell others that we're done */
1178 BUG_ON(whole->bd_claiming != holder);
1179 whole->bd_claiming = NULL;
1180 wake_up_bit(&whole->bd_claiming, 0);
1181
1182 spin_unlock(&bdev_lock);
1183 bdput(whole);
1184 }
1185
1186 return res;
1475} 1187}
1476EXPORT_SYMBOL(blkdev_get); 1188EXPORT_SYMBOL(blkdev_get);
1477 1189
1190/**
1191 * blkdev_get_by_path - open a block device by name
1192 * @path: path to the block device to open
1193 * @mode: FMODE_* mask
1194 * @holder: exclusive holder identifier
1195 *
1196 * Open the blockdevice described by the device file at @path. @mode
1197 * and @holder are identical to blkdev_get().
1198 *
1199 * On success, the returned block_device has reference count of one.
1200 *
1201 * CONTEXT:
1202 * Might sleep.
1203 *
1204 * RETURNS:
1205 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1206 */
1207struct block_device *blkdev_get_by_path(const char *path, fmode_t mode,
1208 void *holder)
1209{
1210 struct block_device *bdev;
1211 int err;
1212
1213 bdev = lookup_bdev(path);
1214 if (IS_ERR(bdev))
1215 return bdev;
1216
1217 err = blkdev_get(bdev, mode, holder);
1218 if (err)
1219 return ERR_PTR(err);
1220
1221 return bdev;
1222}
1223EXPORT_SYMBOL(blkdev_get_by_path);
1224
1225/**
1226 * blkdev_get_by_dev - open a block device by device number
1227 * @dev: device number of block device to open
1228 * @mode: FMODE_* mask
1229 * @holder: exclusive holder identifier
1230 *
1231 * Open the blockdevice described by device number @dev. @mode and
1232 * @holder are identical to blkdev_get().
1233 *
1234 * Use it ONLY if you really do not have anything better - i.e. when
1235 * you are behind a truly sucky interface and all you are given is a
1236 * device number. _Never_ to be used for internal purposes. If you
1237 * ever need it - reconsider your API.
1238 *
1239 * On success, the returned block_device has reference count of one.
1240 *
1241 * CONTEXT:
1242 * Might sleep.
1243 *
1244 * RETURNS:
1245 * Pointer to block_device on success, ERR_PTR(-errno) on failure.
1246 */
1247struct block_device *blkdev_get_by_dev(dev_t dev, fmode_t mode, void *holder)
1248{
1249 struct block_device *bdev;
1250 int err;
1251
1252 bdev = bdget(dev);
1253 if (!bdev)
1254 return ERR_PTR(-ENOMEM);
1255
1256 err = blkdev_get(bdev, mode, holder);
1257 if (err)
1258 return ERR_PTR(err);
1259
1260 return bdev;
1261}
1262EXPORT_SYMBOL(blkdev_get_by_dev);
1263
1478static int blkdev_open(struct inode * inode, struct file * filp) 1264static int blkdev_open(struct inode * inode, struct file * filp)
1479{ 1265{
1480 struct block_device *whole = NULL;
1481 struct block_device *bdev; 1266 struct block_device *bdev;
1482 int res;
1483 1267
1484 /* 1268 /*
1485 * Preserve backwards compatibility and allow large file access 1269 * Preserve backwards compatibility and allow large file access
@@ -1500,26 +1284,9 @@ static int blkdev_open(struct inode * inode, struct file * filp)
1500 if (bdev == NULL) 1284 if (bdev == NULL)
1501 return -ENOMEM; 1285 return -ENOMEM;
1502 1286
1503 if (filp->f_mode & FMODE_EXCL) {
1504 whole = bd_start_claiming(bdev, filp);
1505 if (IS_ERR(whole)) {
1506 bdput(bdev);
1507 return PTR_ERR(whole);
1508 }
1509 }
1510
1511 filp->f_mapping = bdev->bd_inode->i_mapping; 1287 filp->f_mapping = bdev->bd_inode->i_mapping;
1512 1288
1513 res = blkdev_get(bdev, filp->f_mode); 1289 return blkdev_get(bdev, filp->f_mode, filp);
1514
1515 if (whole) {
1516 if (res == 0)
1517 bd_finish_claiming(bdev, whole, filp);
1518 else
1519 bd_abort_claiming(whole, filp);
1520 }
1521
1522 return res;
1523} 1290}
1524 1291
1525static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) 1292static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
@@ -1533,6 +1300,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1533 bdev->bd_part_count--; 1300 bdev->bd_part_count--;
1534 1301
1535 if (!--bdev->bd_openers) { 1302 if (!--bdev->bd_openers) {
1303 WARN_ON_ONCE(bdev->bd_holders);
1536 sync_blockdev(bdev); 1304 sync_blockdev(bdev);
1537 kill_bdev(bdev); 1305 kill_bdev(bdev);
1538 } 1306 }
@@ -1563,6 +1331,34 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
1563 1331
1564int blkdev_put(struct block_device *bdev, fmode_t mode) 1332int blkdev_put(struct block_device *bdev, fmode_t mode)
1565{ 1333{
1334 if (mode & FMODE_EXCL) {
1335 bool bdev_free;
1336
1337 /*
1338 * Release a claim on the device. The holder fields
1339 * are protected with bdev_lock. bd_mutex is to
1340 * synchronize disk_holder unlinking.
1341 */
1342 mutex_lock(&bdev->bd_mutex);
1343 spin_lock(&bdev_lock);
1344
1345 WARN_ON_ONCE(--bdev->bd_holders < 0);
1346 WARN_ON_ONCE(--bdev->bd_contains->bd_holders < 0);
1347
1348 /* bd_contains might point to self, check in a separate step */
1349 if ((bdev_free = !bdev->bd_holders))
1350 bdev->bd_holder = NULL;
1351 if (!bdev->bd_contains->bd_holders)
1352 bdev->bd_contains->bd_holder = NULL;
1353
1354 spin_unlock(&bdev_lock);
1355
1356 /* if this was the last claim, holder link should go too */
1357 if (bdev_free)
1358 bd_unlink_disk_holder(bdev);
1359
1360 mutex_unlock(&bdev->bd_mutex);
1361 }
1566 return __blkdev_put(bdev, mode, 0); 1362 return __blkdev_put(bdev, mode, 0);
1567} 1363}
1568EXPORT_SYMBOL(blkdev_put); 1364EXPORT_SYMBOL(blkdev_put);
@@ -1570,8 +1366,7 @@ EXPORT_SYMBOL(blkdev_put);
1570static int blkdev_close(struct inode * inode, struct file * filp) 1366static int blkdev_close(struct inode * inode, struct file * filp)
1571{ 1367{
1572 struct block_device *bdev = I_BDEV(filp->f_mapping->host); 1368 struct block_device *bdev = I_BDEV(filp->f_mapping->host);
1573 if (bdev->bd_holder == filp) 1369
1574 bd_release(bdev);
1575 return blkdev_put(bdev, filp->f_mode); 1370 return blkdev_put(bdev, filp->f_mode);
1576} 1371}
1577 1372
@@ -1716,67 +1511,6 @@ fail:
1716} 1511}
1717EXPORT_SYMBOL(lookup_bdev); 1512EXPORT_SYMBOL(lookup_bdev);
1718 1513
1719/**
1720 * open_bdev_exclusive - open a block device by name and set it up for use
1721 *
1722 * @path: special file representing the block device
1723 * @mode: FMODE_... combination to pass be used
1724 * @holder: owner for exclusion
1725 *
1726 * Open the blockdevice described by the special file at @path, claim it
1727 * for the @holder.
1728 */
1729struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *holder)
1730{
1731 struct block_device *bdev, *whole;
1732 int error;
1733
1734 bdev = lookup_bdev(path);
1735 if (IS_ERR(bdev))
1736 return bdev;
1737
1738 whole = bd_start_claiming(bdev, holder);
1739 if (IS_ERR(whole)) {
1740 bdput(bdev);
1741 return whole;
1742 }
1743
1744 error = blkdev_get(bdev, mode);
1745 if (error)
1746 goto out_abort_claiming;
1747
1748 error = -EACCES;
1749 if ((mode & FMODE_WRITE) && bdev_read_only(bdev))
1750 goto out_blkdev_put;
1751
1752 bd_finish_claiming(bdev, whole, holder);
1753 return bdev;
1754
1755out_blkdev_put:
1756 blkdev_put(bdev, mode);
1757out_abort_claiming:
1758 bd_abort_claiming(whole, holder);
1759 return ERR_PTR(error);
1760}
1761
1762EXPORT_SYMBOL(open_bdev_exclusive);
1763
1764/**
1765 * close_bdev_exclusive - close a blockdevice opened by open_bdev_exclusive()
1766 *
1767 * @bdev: blockdevice to close
1768 * @mode: mode, must match that used to open.
1769 *
1770 * This is the counterpart to open_bdev_exclusive().
1771 */
1772void close_bdev_exclusive(struct block_device *bdev, fmode_t mode)
1773{
1774 bd_release(bdev);
1775 blkdev_put(bdev, mode);
1776}
1777
1778EXPORT_SYMBOL(close_bdev_exclusive);
1779
1780int __invalidate_device(struct block_device *bdev) 1514int __invalidate_device(struct block_device *bdev)
1781{ 1515{
1782 struct super_block *sb = get_super(bdev); 1516 struct super_block *sb = get_super(bdev);