diff options
| -rw-r--r-- | drivers/mtd/ubi/Kconfig | 13 | ||||
| -rw-r--r-- | drivers/mtd/ubi/Makefile | 2 | ||||
| -rw-r--r-- | drivers/mtd/ubi/build.c | 161 | ||||
| -rw-r--r-- | drivers/mtd/ubi/cdev.c | 32 | ||||
| -rw-r--r-- | drivers/mtd/ubi/eba.c | 99 | ||||
| -rw-r--r-- | drivers/mtd/ubi/gluebi.c | 378 | ||||
| -rw-r--r-- | drivers/mtd/ubi/io.c | 82 | ||||
| -rw-r--r-- | drivers/mtd/ubi/kapi.c | 117 | ||||
| -rw-r--r-- | drivers/mtd/ubi/ubi.h | 84 | ||||
| -rw-r--r-- | drivers/mtd/ubi/upd.c | 8 | ||||
| -rw-r--r-- | drivers/mtd/ubi/vmt.c | 65 | ||||
| -rw-r--r-- | drivers/mtd/ubi/wl.c | 179 | ||||
| -rw-r--r-- | include/linux/mtd/ubi.h | 37 |
13 files changed, 922 insertions, 335 deletions
diff --git a/drivers/mtd/ubi/Kconfig b/drivers/mtd/ubi/Kconfig index 3f063108e95f..b1cd7a1a2191 100644 --- a/drivers/mtd/ubi/Kconfig +++ b/drivers/mtd/ubi/Kconfig | |||
| @@ -49,15 +49,16 @@ config MTD_UBI_BEB_RESERVE | |||
| 49 | reserved. Leave the default value if unsure. | 49 | reserved. Leave the default value if unsure. |
| 50 | 50 | ||
| 51 | config MTD_UBI_GLUEBI | 51 | config MTD_UBI_GLUEBI |
| 52 | bool "Emulate MTD devices" | 52 | tristate "MTD devices emulation driver (gluebi)" |
| 53 | default n | 53 | default n |
| 54 | depends on MTD_UBI | 54 | depends on MTD_UBI |
| 55 | help | 55 | help |
| 56 | This option enables MTD devices emulation on top of UBI volumes: for | 56 | This option enables gluebi - an additional driver which emulates MTD |
| 57 | each UBI volumes an MTD device is created, and all I/O to this MTD | 57 | devices on top of UBI volumes: for each UBI volumes an MTD device is |
| 58 | device is redirected to the UBI volume. This is handy to make | 58 | created, and all I/O to this MTD device is redirected to the UBI |
| 59 | MTD-oriented software (like JFFS2) work on top of UBI. Do not enable | 59 | volume. This is handy to make MTD-oriented software (like JFFS2) |
| 60 | this if no legacy software will be used. | 60 | work on top of UBI. Do not enable this unless you use legacy |
| 61 | software. | ||
| 61 | 62 | ||
| 62 | source "drivers/mtd/ubi/Kconfig.debug" | 63 | source "drivers/mtd/ubi/Kconfig.debug" |
| 63 | endmenu | 64 | endmenu |
diff --git a/drivers/mtd/ubi/Makefile b/drivers/mtd/ubi/Makefile index dd834e04151b..c9302a5452b0 100644 --- a/drivers/mtd/ubi/Makefile +++ b/drivers/mtd/ubi/Makefile | |||
| @@ -4,4 +4,4 @@ ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o scan.o | |||
| 4 | ubi-y += misc.o | 4 | ubi-y += misc.o |
| 5 | 5 | ||
| 6 | ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o | 6 | ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o |
| 7 | ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o | 7 | obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o |
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c index 4048db83aef6..286ed594e5a0 100644 --- a/drivers/mtd/ubi/build.c +++ b/drivers/mtd/ubi/build.c | |||
| @@ -41,6 +41,7 @@ | |||
| 41 | #include <linux/miscdevice.h> | 41 | #include <linux/miscdevice.h> |
| 42 | #include <linux/log2.h> | 42 | #include <linux/log2.h> |
| 43 | #include <linux/kthread.h> | 43 | #include <linux/kthread.h> |
| 44 | #include <linux/reboot.h> | ||
| 44 | #include "ubi.h" | 45 | #include "ubi.h" |
| 45 | 46 | ||
| 46 | /* Maximum length of the 'mtd=' parameter */ | 47 | /* Maximum length of the 'mtd=' parameter */ |
| @@ -122,6 +123,94 @@ static struct device_attribute dev_mtd_num = | |||
| 122 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); | 123 | __ATTR(mtd_num, S_IRUGO, dev_attribute_show, NULL); |
| 123 | 124 | ||
| 124 | /** | 125 | /** |
| 126 | * ubi_volume_notify - send a volume change notification. | ||
| 127 | * @ubi: UBI device description object | ||
| 128 | * @vol: volume description object of the changed volume | ||
| 129 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | ||
| 130 | * | ||
| 131 | * This is a helper function which notifies all subscribers about a volume | ||
| 132 | * change event (creation, removal, re-sizing, re-naming, updating). Returns | ||
| 133 | * zero in case of success and a negative error code in case of failure. | ||
| 134 | */ | ||
| 135 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype) | ||
| 136 | { | ||
| 137 | struct ubi_notification nt; | ||
| 138 | |||
| 139 | ubi_do_get_device_info(ubi, &nt.di); | ||
| 140 | ubi_do_get_volume_info(ubi, vol, &nt.vi); | ||
| 141 | return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt); | ||
| 142 | } | ||
| 143 | |||
| 144 | /** | ||
| 145 | * ubi_notify_all - send a notification to all volumes. | ||
| 146 | * @ubi: UBI device description object | ||
| 147 | * @ntype: notification type to send (%UBI_VOLUME_ADDED, etc) | ||
| 148 | * @nb: the notifier to call | ||
| 149 | * | ||
| 150 | * This function walks all volumes of UBI device @ubi and sends the @ntype | ||
| 151 | * notification for each volume. If @nb is %NULL, then all registered notifiers | ||
| 152 | * are called, otherwise only the @nb notifier is called. Returns the number of | ||
| 153 | * sent notifications. | ||
| 154 | */ | ||
| 155 | int ubi_notify_all(struct ubi_device *ubi, int ntype, struct notifier_block *nb) | ||
| 156 | { | ||
| 157 | struct ubi_notification nt; | ||
| 158 | int i, count = 0; | ||
| 159 | |||
| 160 | ubi_do_get_device_info(ubi, &nt.di); | ||
| 161 | |||
| 162 | mutex_lock(&ubi->device_mutex); | ||
| 163 | for (i = 0; i < ubi->vtbl_slots; i++) { | ||
| 164 | /* | ||
| 165 | * Since the @ubi->device is locked, and we are not going to | ||
| 166 | * change @ubi->volumes, we do not have to lock | ||
| 167 | * @ubi->volumes_lock. | ||
| 168 | */ | ||
| 169 | if (!ubi->volumes[i]) | ||
| 170 | continue; | ||
| 171 | |||
| 172 | ubi_do_get_volume_info(ubi, ubi->volumes[i], &nt.vi); | ||
| 173 | if (nb) | ||
| 174 | nb->notifier_call(nb, ntype, &nt); | ||
| 175 | else | ||
| 176 | blocking_notifier_call_chain(&ubi_notifiers, ntype, | ||
| 177 | &nt); | ||
| 178 | count += 1; | ||
| 179 | } | ||
| 180 | mutex_unlock(&ubi->device_mutex); | ||
| 181 | |||
| 182 | return count; | ||
| 183 | } | ||
| 184 | |||
| 185 | /** | ||
| 186 | * ubi_enumerate_volumes - send "add" notification for all existing volumes. | ||
| 187 | * @nb: the notifier to call | ||
| 188 | * | ||
| 189 | * This function walks all UBI devices and volumes and sends the | ||
| 190 | * %UBI_VOLUME_ADDED notification for each volume. If @nb is %NULL, then all | ||
| 191 | * registered notifiers are called, otherwise only the @nb notifier is called. | ||
| 192 | * Returns the number of sent notifications. | ||
| 193 | */ | ||
| 194 | int ubi_enumerate_volumes(struct notifier_block *nb) | ||
| 195 | { | ||
| 196 | int i, count = 0; | ||
| 197 | |||
| 198 | /* | ||
| 199 | * Since the @ubi_devices_mutex is locked, and we are not going to | ||
| 200 | * change @ubi_devices, we do not have to lock @ubi_devices_lock. | ||
| 201 | */ | ||
| 202 | for (i = 0; i < UBI_MAX_DEVICES; i++) { | ||
| 203 | struct ubi_device *ubi = ubi_devices[i]; | ||
| 204 | |||
| 205 | if (!ubi) | ||
| 206 | continue; | ||
| 207 | count += ubi_notify_all(ubi, UBI_VOLUME_ADDED, nb); | ||
| 208 | } | ||
| 209 | |||
| 210 | return count; | ||
| 211 | } | ||
| 212 | |||
| 213 | /** | ||
| 125 | * ubi_get_device - get UBI device. | 214 | * ubi_get_device - get UBI device. |
| 126 | * @ubi_num: UBI device number | 215 | * @ubi_num: UBI device number |
| 127 | * | 216 | * |
| @@ -380,7 +469,7 @@ static void free_user_volumes(struct ubi_device *ubi) | |||
| 380 | * @ubi: UBI device description object | 469 | * @ubi: UBI device description object |
| 381 | * | 470 | * |
| 382 | * This function returns zero in case of success and a negative error code in | 471 | * This function returns zero in case of success and a negative error code in |
| 383 | * case of failure. Note, this function destroys all volumes if it failes. | 472 | * case of failure. Note, this function destroys all volumes if it fails. |
| 384 | */ | 473 | */ |
| 385 | static int uif_init(struct ubi_device *ubi) | 474 | static int uif_init(struct ubi_device *ubi) |
| 386 | { | 475 | { |
| @@ -633,6 +722,15 @@ static int io_init(struct ubi_device *ubi) | |||
| 633 | } | 722 | } |
| 634 | 723 | ||
| 635 | /* | 724 | /* |
| 725 | * Set maximum amount of physical erroneous eraseblocks to be 10%. | ||
| 726 | * Erroneous PEB are those which have read errors. | ||
| 727 | */ | ||
| 728 | ubi->max_erroneous = ubi->peb_count / 10; | ||
| 729 | if (ubi->max_erroneous < 16) | ||
| 730 | ubi->max_erroneous = 16; | ||
| 731 | dbg_msg("max_erroneous %d", ubi->max_erroneous); | ||
| 732 | |||
| 733 | /* | ||
| 636 | * It may happen that EC and VID headers are situated in one minimal | 734 | * It may happen that EC and VID headers are situated in one minimal |
| 637 | * I/O unit. In this case we can only accept this UBI image in | 735 | * I/O unit. In this case we can only accept this UBI image in |
| 638 | * read-only mode. | 736 | * read-only mode. |
| @@ -726,6 +824,34 @@ static int autoresize(struct ubi_device *ubi, int vol_id) | |||
| 726 | } | 824 | } |
| 727 | 825 | ||
| 728 | /** | 826 | /** |
| 827 | * ubi_reboot_notifier - halt UBI transactions immediately prior to a reboot. | ||
| 828 | * @n: reboot notifier object | ||
| 829 | * @state: SYS_RESTART, SYS_HALT, or SYS_POWER_OFF | ||
| 830 | * @cmd: pointer to command string for RESTART2 | ||
| 831 | * | ||
| 832 | * This function stops the UBI background thread so that the flash device | ||
| 833 | * remains quiescent when Linux restarts the system. Any queued work will be | ||
| 834 | * discarded, but this function will block until do_work() finishes if an | ||
| 835 | * operation is already in progress. | ||
| 836 | * | ||
| 837 | * This function solves a real-life problem observed on NOR flashes when an | ||
| 838 | * PEB erase operation starts, then the system is rebooted before the erase is | ||
| 839 | * finishes, and the boot loader gets confused and dies. So we prefer to finish | ||
| 840 | * the ongoing operation before rebooting. | ||
| 841 | */ | ||
| 842 | static int ubi_reboot_notifier(struct notifier_block *n, unsigned long state, | ||
| 843 | void *cmd) | ||
| 844 | { | ||
| 845 | struct ubi_device *ubi; | ||
| 846 | |||
| 847 | ubi = container_of(n, struct ubi_device, reboot_notifier); | ||
| 848 | if (ubi->bgt_thread) | ||
| 849 | kthread_stop(ubi->bgt_thread); | ||
| 850 | ubi_sync(ubi->ubi_num); | ||
| 851 | return NOTIFY_DONE; | ||
| 852 | } | ||
| 853 | |||
| 854 | /** | ||
| 729 | * ubi_attach_mtd_dev - attach an MTD device. | 855 | * ubi_attach_mtd_dev - attach an MTD device. |
| 730 | * @mtd: MTD device description object | 856 | * @mtd: MTD device description object |
| 731 | * @ubi_num: number to assign to the new UBI device | 857 | * @ubi_num: number to assign to the new UBI device |
| @@ -806,8 +932,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
| 806 | 932 | ||
| 807 | mutex_init(&ubi->buf_mutex); | 933 | mutex_init(&ubi->buf_mutex); |
| 808 | mutex_init(&ubi->ckvol_mutex); | 934 | mutex_init(&ubi->ckvol_mutex); |
| 809 | mutex_init(&ubi->mult_mutex); | 935 | mutex_init(&ubi->device_mutex); |
| 810 | mutex_init(&ubi->volumes_mutex); | ||
| 811 | spin_lock_init(&ubi->volumes_lock); | 936 | spin_lock_init(&ubi->volumes_lock); |
| 812 | 937 | ||
| 813 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); | 938 | ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); |
| @@ -825,7 +950,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
| 825 | if (!ubi->peb_buf2) | 950 | if (!ubi->peb_buf2) |
| 826 | goto out_free; | 951 | goto out_free; |
| 827 | 952 | ||
| 828 | #ifdef CONFIG_MTD_UBI_DEBUG | 953 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
| 829 | mutex_init(&ubi->dbg_buf_mutex); | 954 | mutex_init(&ubi->dbg_buf_mutex); |
| 830 | ubi->dbg_peb_buf = vmalloc(ubi->peb_size); | 955 | ubi->dbg_peb_buf = vmalloc(ubi->peb_size); |
| 831 | if (!ubi->dbg_peb_buf) | 956 | if (!ubi->dbg_peb_buf) |
| @@ -872,11 +997,23 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) | |||
| 872 | ubi->beb_rsvd_pebs); | 997 | ubi->beb_rsvd_pebs); |
| 873 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); | 998 | ubi_msg("max/mean erase counter: %d/%d", ubi->max_ec, ubi->mean_ec); |
| 874 | 999 | ||
| 1000 | /* | ||
| 1001 | * The below lock makes sure we do not race with 'ubi_thread()' which | ||
| 1002 | * checks @ubi->thread_enabled. Otherwise we may fail to wake it up. | ||
| 1003 | */ | ||
| 1004 | spin_lock(&ubi->wl_lock); | ||
| 875 | if (!DBG_DISABLE_BGT) | 1005 | if (!DBG_DISABLE_BGT) |
| 876 | ubi->thread_enabled = 1; | 1006 | ubi->thread_enabled = 1; |
| 877 | wake_up_process(ubi->bgt_thread); | 1007 | wake_up_process(ubi->bgt_thread); |
| 1008 | spin_unlock(&ubi->wl_lock); | ||
| 1009 | |||
| 1010 | /* Flash device priority is 0 - UBI needs to shut down first */ | ||
| 1011 | ubi->reboot_notifier.priority = 1; | ||
| 1012 | ubi->reboot_notifier.notifier_call = ubi_reboot_notifier; | ||
| 1013 | register_reboot_notifier(&ubi->reboot_notifier); | ||
| 878 | 1014 | ||
| 879 | ubi_devices[ubi_num] = ubi; | 1015 | ubi_devices[ubi_num] = ubi; |
| 1016 | ubi_notify_all(ubi, UBI_VOLUME_ADDED, NULL); | ||
| 880 | return ubi_num; | 1017 | return ubi_num; |
| 881 | 1018 | ||
| 882 | out_uif: | 1019 | out_uif: |
| @@ -892,7 +1029,7 @@ out_detach: | |||
| 892 | out_free: | 1029 | out_free: |
| 893 | vfree(ubi->peb_buf1); | 1030 | vfree(ubi->peb_buf1); |
| 894 | vfree(ubi->peb_buf2); | 1031 | vfree(ubi->peb_buf2); |
| 895 | #ifdef CONFIG_MTD_UBI_DEBUG | 1032 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
| 896 | vfree(ubi->dbg_peb_buf); | 1033 | vfree(ubi->dbg_peb_buf); |
| 897 | #endif | 1034 | #endif |
| 898 | kfree(ubi); | 1035 | kfree(ubi); |
| @@ -919,13 +1056,13 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
| 919 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 1056 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
| 920 | return -EINVAL; | 1057 | return -EINVAL; |
| 921 | 1058 | ||
| 922 | spin_lock(&ubi_devices_lock); | 1059 | ubi = ubi_get_device(ubi_num); |
| 923 | ubi = ubi_devices[ubi_num]; | 1060 | if (!ubi) |
| 924 | if (!ubi) { | ||
| 925 | spin_unlock(&ubi_devices_lock); | ||
| 926 | return -EINVAL; | 1061 | return -EINVAL; |
| 927 | } | ||
| 928 | 1062 | ||
| 1063 | spin_lock(&ubi_devices_lock); | ||
| 1064 | put_device(&ubi->dev); | ||
| 1065 | ubi->ref_count -= 1; | ||
| 929 | if (ubi->ref_count) { | 1066 | if (ubi->ref_count) { |
| 930 | if (!anyway) { | 1067 | if (!anyway) { |
| 931 | spin_unlock(&ubi_devices_lock); | 1068 | spin_unlock(&ubi_devices_lock); |
| @@ -939,12 +1076,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
| 939 | spin_unlock(&ubi_devices_lock); | 1076 | spin_unlock(&ubi_devices_lock); |
| 940 | 1077 | ||
| 941 | ubi_assert(ubi_num == ubi->ubi_num); | 1078 | ubi_assert(ubi_num == ubi->ubi_num); |
| 1079 | ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); | ||
| 942 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); | 1080 | dbg_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); |
| 943 | 1081 | ||
| 944 | /* | 1082 | /* |
| 945 | * Before freeing anything, we have to stop the background thread to | 1083 | * Before freeing anything, we have to stop the background thread to |
| 946 | * prevent it from doing anything on this device while we are freeing. | 1084 | * prevent it from doing anything on this device while we are freeing. |
| 947 | */ | 1085 | */ |
| 1086 | unregister_reboot_notifier(&ubi->reboot_notifier); | ||
| 948 | if (ubi->bgt_thread) | 1087 | if (ubi->bgt_thread) |
| 949 | kthread_stop(ubi->bgt_thread); | 1088 | kthread_stop(ubi->bgt_thread); |
| 950 | 1089 | ||
| @@ -961,7 +1100,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway) | |||
| 961 | put_mtd_device(ubi->mtd); | 1100 | put_mtd_device(ubi->mtd); |
| 962 | vfree(ubi->peb_buf1); | 1101 | vfree(ubi->peb_buf1); |
| 963 | vfree(ubi->peb_buf2); | 1102 | vfree(ubi->peb_buf2); |
| 964 | #ifdef CONFIG_MTD_UBI_DEBUG | 1103 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
| 965 | vfree(ubi->dbg_peb_buf); | 1104 | vfree(ubi->dbg_peb_buf); |
| 966 | #endif | 1105 | #endif |
| 967 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); | 1106 | ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); |
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c index f8e0f68f2186..f237ddbb2713 100644 --- a/drivers/mtd/ubi/cdev.c +++ b/drivers/mtd/ubi/cdev.c | |||
| @@ -113,7 +113,8 @@ static int vol_cdev_open(struct inode *inode, struct file *file) | |||
| 113 | else | 113 | else |
| 114 | mode = UBI_READONLY; | 114 | mode = UBI_READONLY; |
| 115 | 115 | ||
| 116 | dbg_gen("open volume %d, mode %d", vol_id, mode); | 116 | dbg_gen("open device %d, volume %d, mode %d", |
| 117 | ubi_num, vol_id, mode); | ||
| 117 | 118 | ||
| 118 | desc = ubi_open_volume(ubi_num, vol_id, mode); | 119 | desc = ubi_open_volume(ubi_num, vol_id, mode); |
| 119 | if (IS_ERR(desc)) | 120 | if (IS_ERR(desc)) |
| @@ -128,7 +129,8 @@ static int vol_cdev_release(struct inode *inode, struct file *file) | |||
| 128 | struct ubi_volume_desc *desc = file->private_data; | 129 | struct ubi_volume_desc *desc = file->private_data; |
| 129 | struct ubi_volume *vol = desc->vol; | 130 | struct ubi_volume *vol = desc->vol; |
| 130 | 131 | ||
| 131 | dbg_gen("release volume %d, mode %d", vol->vol_id, desc->mode); | 132 | dbg_gen("release device %d, volume %d, mode %d", |
| 133 | vol->ubi->ubi_num, vol->vol_id, desc->mode); | ||
| 132 | 134 | ||
| 133 | if (vol->updating) { | 135 | if (vol->updating) { |
| 134 | ubi_warn("update of volume %d not finished, volume is damaged", | 136 | ubi_warn("update of volume %d not finished, volume is damaged", |
| @@ -393,7 +395,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf, | |||
| 393 | vol->corrupted = 1; | 395 | vol->corrupted = 1; |
| 394 | } | 396 | } |
| 395 | vol->checked = 1; | 397 | vol->checked = 1; |
| 396 | ubi_gluebi_updated(vol); | 398 | ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED); |
| 397 | revoke_exclusive(desc, UBI_READWRITE); | 399 | revoke_exclusive(desc, UBI_READWRITE); |
| 398 | } | 400 | } |
| 399 | 401 | ||
| @@ -558,7 +560,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 558 | break; | 560 | break; |
| 559 | } | 561 | } |
| 560 | 562 | ||
| 561 | /* Set volume property command*/ | 563 | /* Set volume property command */ |
| 562 | case UBI_IOCSETPROP: | 564 | case UBI_IOCSETPROP: |
| 563 | { | 565 | { |
| 564 | struct ubi_set_prop_req req; | 566 | struct ubi_set_prop_req req; |
| @@ -571,9 +573,9 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 571 | } | 573 | } |
| 572 | switch (req.property) { | 574 | switch (req.property) { |
| 573 | case UBI_PROP_DIRECT_WRITE: | 575 | case UBI_PROP_DIRECT_WRITE: |
| 574 | mutex_lock(&ubi->volumes_mutex); | 576 | mutex_lock(&ubi->device_mutex); |
| 575 | desc->vol->direct_writes = !!req.value; | 577 | desc->vol->direct_writes = !!req.value; |
| 576 | mutex_unlock(&ubi->volumes_mutex); | 578 | mutex_unlock(&ubi->device_mutex); |
| 577 | break; | 579 | break; |
| 578 | default: | 580 | default: |
| 579 | err = -EINVAL; | 581 | err = -EINVAL; |
| @@ -810,9 +812,9 @@ static int rename_volumes(struct ubi_device *ubi, | |||
| 810 | re->desc->vol->vol_id, re->desc->vol->name); | 812 | re->desc->vol->vol_id, re->desc->vol->name); |
| 811 | } | 813 | } |
| 812 | 814 | ||
| 813 | mutex_lock(&ubi->volumes_mutex); | 815 | mutex_lock(&ubi->device_mutex); |
| 814 | err = ubi_rename_volumes(ubi, &rename_list); | 816 | err = ubi_rename_volumes(ubi, &rename_list); |
| 815 | mutex_unlock(&ubi->volumes_mutex); | 817 | mutex_unlock(&ubi->device_mutex); |
| 816 | 818 | ||
| 817 | out_free: | 819 | out_free: |
| 818 | list_for_each_entry_safe(re, re1, &rename_list, list) { | 820 | list_for_each_entry_safe(re, re1, &rename_list, list) { |
| @@ -856,9 +858,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 856 | if (err) | 858 | if (err) |
| 857 | break; | 859 | break; |
| 858 | 860 | ||
| 859 | mutex_lock(&ubi->volumes_mutex); | 861 | mutex_lock(&ubi->device_mutex); |
| 860 | err = ubi_create_volume(ubi, &req); | 862 | err = ubi_create_volume(ubi, &req); |
| 861 | mutex_unlock(&ubi->volumes_mutex); | 863 | mutex_unlock(&ubi->device_mutex); |
| 862 | if (err) | 864 | if (err) |
| 863 | break; | 865 | break; |
| 864 | 866 | ||
| @@ -887,9 +889,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 887 | break; | 889 | break; |
| 888 | } | 890 | } |
| 889 | 891 | ||
| 890 | mutex_lock(&ubi->volumes_mutex); | 892 | mutex_lock(&ubi->device_mutex); |
| 891 | err = ubi_remove_volume(desc, 0); | 893 | err = ubi_remove_volume(desc, 0); |
| 892 | mutex_unlock(&ubi->volumes_mutex); | 894 | mutex_unlock(&ubi->device_mutex); |
| 893 | 895 | ||
| 894 | /* | 896 | /* |
| 895 | * The volume is deleted (unless an error occurred), and the | 897 | * The volume is deleted (unless an error occurred), and the |
| @@ -926,9 +928,9 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 926 | pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, | 928 | pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, |
| 927 | desc->vol->usable_leb_size); | 929 | desc->vol->usable_leb_size); |
| 928 | 930 | ||
| 929 | mutex_lock(&ubi->volumes_mutex); | 931 | mutex_lock(&ubi->device_mutex); |
| 930 | err = ubi_resize_volume(desc, pebs); | 932 | err = ubi_resize_volume(desc, pebs); |
| 931 | mutex_unlock(&ubi->volumes_mutex); | 933 | mutex_unlock(&ubi->device_mutex); |
| 932 | ubi_close_volume(desc); | 934 | ubi_close_volume(desc); |
| 933 | break; | 935 | break; |
| 934 | } | 936 | } |
| @@ -952,9 +954,7 @@ static long ubi_cdev_ioctl(struct file *file, unsigned int cmd, | |||
| 952 | break; | 954 | break; |
| 953 | } | 955 | } |
| 954 | 956 | ||
| 955 | mutex_lock(&ubi->mult_mutex); | ||
| 956 | err = rename_volumes(ubi, req); | 957 | err = rename_volumes(ubi, req); |
| 957 | mutex_unlock(&ubi->mult_mutex); | ||
| 958 | kfree(req); | 958 | kfree(req); |
| 959 | break; | 959 | break; |
| 960 | } | 960 | } |
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 25def348e5ba..0f2034c3ed2f 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
| @@ -419,8 +419,9 @@ retry: | |||
| 419 | * not implemented. | 419 | * not implemented. |
| 420 | */ | 420 | */ |
| 421 | if (err == UBI_IO_BAD_VID_HDR) { | 421 | if (err == UBI_IO_BAD_VID_HDR) { |
| 422 | ubi_warn("bad VID header at PEB %d, LEB" | 422 | ubi_warn("corrupted VID header at PEB " |
| 423 | "%d:%d", pnum, vol_id, lnum); | 423 | "%d, LEB %d:%d", pnum, vol_id, |
| 424 | lnum); | ||
| 424 | err = -EBADMSG; | 425 | err = -EBADMSG; |
| 425 | } else | 426 | } else |
| 426 | ubi_ro_mode(ubi); | 427 | ubi_ro_mode(ubi); |
| @@ -940,6 +941,33 @@ write_error: | |||
| 940 | } | 941 | } |
| 941 | 942 | ||
| 942 | /** | 943 | /** |
| 944 | * is_error_sane - check whether a read error is sane. | ||
| 945 | * @err: code of the error happened during reading | ||
| 946 | * | ||
| 947 | * This is a helper function for 'ubi_eba_copy_leb()' which is called when we | ||
| 948 | * cannot read data from the target PEB (an error @err happened). If the error | ||
| 949 | * code is sane, then we treat this error as non-fatal. Otherwise the error is | ||
| 950 | * fatal and UBI will be switched to R/O mode later. | ||
| 951 | * | ||
| 952 | * The idea is that we try not to switch to R/O mode if the read error is | ||
| 953 | * something which suggests there was a real read problem. E.g., %-EIO. Or a | ||
| 954 | * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O | ||
| 955 | * mode, simply because we do not know what happened at the MTD level, and we | ||
| 956 | * cannot handle this. E.g., the underlying driver may have become crazy, and | ||
| 957 | * it is safer to switch to R/O mode to preserve the data. | ||
| 958 | * | ||
| 959 | * And bear in mind, this is about reading from the target PEB, i.e. the PEB | ||
| 960 | * which we have just written. | ||
| 961 | */ | ||
| 962 | static int is_error_sane(int err) | ||
| 963 | { | ||
| 964 | if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_VID_HDR || | ||
| 965 | err == -ETIMEDOUT) | ||
| 966 | return 0; | ||
| 967 | return 1; | ||
| 968 | } | ||
| 969 | |||
| 970 | /** | ||
| 943 | * ubi_eba_copy_leb - copy logical eraseblock. | 971 | * ubi_eba_copy_leb - copy logical eraseblock. |
| 944 | * @ubi: UBI device description object | 972 | * @ubi: UBI device description object |
| 945 | * @from: physical eraseblock number from where to copy | 973 | * @from: physical eraseblock number from where to copy |
| @@ -950,12 +978,7 @@ write_error: | |||
| 950 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this | 978 | * physical eraseblock @to. The @vid_hdr buffer may be changed by this |
| 951 | * function. Returns: | 979 | * function. Returns: |
| 952 | * o %0 in case of success; | 980 | * o %0 in case of success; |
| 953 | * o %1 if the operation was canceled because the volume is being deleted | 981 | * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_CANCEL_BITFLIPS, etc; |
| 954 | * or because the PEB was put meanwhile; | ||
| 955 | * o %2 if the operation was canceled because there was a write error to the | ||
| 956 | * target PEB; | ||
| 957 | * o %-EAGAIN if the operation was canceled because a bit-flip was detected | ||
| 958 | * in the target PEB; | ||
| 959 | * o a negative error code in case of failure. | 982 | * o a negative error code in case of failure. |
| 960 | */ | 983 | */ |
| 961 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | 984 | int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, |
| @@ -968,7 +991,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 968 | vol_id = be32_to_cpu(vid_hdr->vol_id); | 991 | vol_id = be32_to_cpu(vid_hdr->vol_id); |
| 969 | lnum = be32_to_cpu(vid_hdr->lnum); | 992 | lnum = be32_to_cpu(vid_hdr->lnum); |
| 970 | 993 | ||
| 971 | dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); | 994 | dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to); |
| 972 | 995 | ||
| 973 | if (vid_hdr->vol_type == UBI_VID_STATIC) { | 996 | if (vid_hdr->vol_type == UBI_VID_STATIC) { |
| 974 | data_size = be32_to_cpu(vid_hdr->data_size); | 997 | data_size = be32_to_cpu(vid_hdr->data_size); |
| @@ -986,13 +1009,12 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 986 | * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. | 1009 | * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. |
| 987 | */ | 1010 | */ |
| 988 | vol = ubi->volumes[idx]; | 1011 | vol = ubi->volumes[idx]; |
| 1012 | spin_unlock(&ubi->volumes_lock); | ||
| 989 | if (!vol) { | 1013 | if (!vol) { |
| 990 | /* No need to do further work, cancel */ | 1014 | /* No need to do further work, cancel */ |
| 991 | dbg_eba("volume %d is being removed, cancel", vol_id); | 1015 | dbg_wl("volume %d is being removed, cancel", vol_id); |
| 992 | spin_unlock(&ubi->volumes_lock); | 1016 | return MOVE_CANCEL_RACE; |
| 993 | return 1; | ||
| 994 | } | 1017 | } |
| 995 | spin_unlock(&ubi->volumes_lock); | ||
| 996 | 1018 | ||
| 997 | /* | 1019 | /* |
| 998 | * We do not want anybody to write to this logical eraseblock while we | 1020 | * We do not want anybody to write to this logical eraseblock while we |
| @@ -1004,12 +1026,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1004 | * (@from). This task locks the LEB and goes sleep in the | 1026 | * (@from). This task locks the LEB and goes sleep in the |
| 1005 | * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are | 1027 | * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are |
| 1006 | * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the | 1028 | * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the |
| 1007 | * LEB is already locked, we just do not move it and return %1. | 1029 | * LEB is already locked, we just do not move it and return |
| 1030 | * %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. | ||
| 1008 | */ | 1031 | */ |
| 1009 | err = leb_write_trylock(ubi, vol_id, lnum); | 1032 | err = leb_write_trylock(ubi, vol_id, lnum); |
| 1010 | if (err) { | 1033 | if (err) { |
| 1011 | dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum); | 1034 | dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); |
| 1012 | return err; | 1035 | return MOVE_CANCEL_RACE; |
| 1013 | } | 1036 | } |
| 1014 | 1037 | ||
| 1015 | /* | 1038 | /* |
| @@ -1018,25 +1041,26 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1018 | * cancel it. | 1041 | * cancel it. |
| 1019 | */ | 1042 | */ |
| 1020 | if (vol->eba_tbl[lnum] != from) { | 1043 | if (vol->eba_tbl[lnum] != from) { |
| 1021 | dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to " | 1044 | dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to " |
| 1022 | "PEB %d, cancel", vol_id, lnum, from, | 1045 | "PEB %d, cancel", vol_id, lnum, from, |
| 1023 | vol->eba_tbl[lnum]); | 1046 | vol->eba_tbl[lnum]); |
| 1024 | err = 1; | 1047 | err = MOVE_CANCEL_RACE; |
| 1025 | goto out_unlock_leb; | 1048 | goto out_unlock_leb; |
| 1026 | } | 1049 | } |
| 1027 | 1050 | ||
| 1028 | /* | 1051 | /* |
| 1029 | * OK, now the LEB is locked and we can safely start moving it. Since | 1052 | * OK, now the LEB is locked and we can safely start moving it. Since |
| 1030 | * this function utilizes the @ubi->peb1_buf buffer which is shared | 1053 | * this function utilizes the @ubi->peb_buf1 buffer which is shared |
| 1031 | * with some other functions, so lock the buffer by taking the | 1054 | * with some other functions - we lock the buffer by taking the |
| 1032 | * @ubi->buf_mutex. | 1055 | * @ubi->buf_mutex. |
| 1033 | */ | 1056 | */ |
| 1034 | mutex_lock(&ubi->buf_mutex); | 1057 | mutex_lock(&ubi->buf_mutex); |
| 1035 | dbg_eba("read %d bytes of data", aldata_size); | 1058 | dbg_wl("read %d bytes of data", aldata_size); |
| 1036 | err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); | 1059 | err = ubi_io_read_data(ubi, ubi->peb_buf1, from, 0, aldata_size); |
| 1037 | if (err && err != UBI_IO_BITFLIPS) { | 1060 | if (err && err != UBI_IO_BITFLIPS) { |
| 1038 | ubi_warn("error %d while reading data from PEB %d", | 1061 | ubi_warn("error %d while reading data from PEB %d", |
| 1039 | err, from); | 1062 | err, from); |
| 1063 | err = MOVE_SOURCE_RD_ERR; | ||
| 1040 | goto out_unlock_buf; | 1064 | goto out_unlock_buf; |
| 1041 | } | 1065 | } |
| 1042 | 1066 | ||
| @@ -1059,7 +1083,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1059 | cond_resched(); | 1083 | cond_resched(); |
| 1060 | 1084 | ||
| 1061 | /* | 1085 | /* |
| 1062 | * It may turn out to me that the whole @from physical eraseblock | 1086 | * It may turn out to be that the whole @from physical eraseblock |
| 1063 | * contains only 0xFF bytes. Then we have to only write the VID header | 1087 | * contains only 0xFF bytes. Then we have to only write the VID header |
| 1064 | * and do not write any data. This also means we should not set | 1088 | * and do not write any data. This also means we should not set |
| 1065 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. | 1089 | * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. |
| @@ -1074,7 +1098,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1074 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); | 1098 | err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); |
| 1075 | if (err) { | 1099 | if (err) { |
| 1076 | if (err == -EIO) | 1100 | if (err == -EIO) |
| 1077 | err = 2; | 1101 | err = MOVE_TARGET_WR_ERR; |
| 1078 | goto out_unlock_buf; | 1102 | goto out_unlock_buf; |
| 1079 | } | 1103 | } |
| 1080 | 1104 | ||
| @@ -1083,10 +1107,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1083 | /* Read the VID header back and check if it was written correctly */ | 1107 | /* Read the VID header back and check if it was written correctly */ |
| 1084 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); | 1108 | err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); |
| 1085 | if (err) { | 1109 | if (err) { |
| 1086 | if (err != UBI_IO_BITFLIPS) | 1110 | if (err != UBI_IO_BITFLIPS) { |
| 1087 | ubi_warn("cannot read VID header back from PEB %d", to); | 1111 | ubi_warn("error %d while reading VID header back from " |
| 1088 | else | 1112 | "PEB %d", err, to); |
| 1089 | err = -EAGAIN; | 1113 | if (is_error_sane(err)) |
| 1114 | err = MOVE_TARGET_RD_ERR; | ||
| 1115 | } else | ||
| 1116 | err = MOVE_CANCEL_BITFLIPS; | ||
| 1090 | goto out_unlock_buf; | 1117 | goto out_unlock_buf; |
| 1091 | } | 1118 | } |
| 1092 | 1119 | ||
| @@ -1094,7 +1121,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1094 | err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); | 1121 | err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); |
| 1095 | if (err) { | 1122 | if (err) { |
| 1096 | if (err == -EIO) | 1123 | if (err == -EIO) |
| 1097 | err = 2; | 1124 | err = MOVE_TARGET_WR_ERR; |
| 1098 | goto out_unlock_buf; | 1125 | goto out_unlock_buf; |
| 1099 | } | 1126 | } |
| 1100 | 1127 | ||
| @@ -1107,11 +1134,13 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, | |||
| 1107 | 1134 | ||
| 1108 | err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); | 1135 | err = ubi_io_read_data(ubi, ubi->peb_buf2, to, 0, aldata_size); |
| 1109 | if (err) { | 1136 | if (err) { |
| 1110 | if (err != UBI_IO_BITFLIPS) | 1137 | if (err != UBI_IO_BITFLIPS) { |
| 1111 | ubi_warn("cannot read data back from PEB %d", | 1138 | ubi_warn("error %d while reading data back " |
| 1112 | to); | 1139 | "from PEB %d", err, to); |
| 1113 | else | 1140 | if (is_error_sane(err)) |
| 1114 | err = -EAGAIN; | 1141 | err = MOVE_TARGET_RD_ERR; |
| 1142 | } else | ||
| 1143 | err = MOVE_CANCEL_BITFLIPS; | ||
| 1115 | goto out_unlock_buf; | 1144 | goto out_unlock_buf; |
| 1116 | } | 1145 | } |
| 1117 | 1146 | ||
diff --git a/drivers/mtd/ubi/gluebi.c b/drivers/mtd/ubi/gluebi.c index 49cd55ade9c8..95aaac03f938 100644 --- a/drivers/mtd/ubi/gluebi.c +++ b/drivers/mtd/ubi/gluebi.c | |||
| @@ -19,17 +19,71 @@ | |||
| 19 | */ | 19 | */ |
| 20 | 20 | ||
| 21 | /* | 21 | /* |
| 22 | * This file includes implementation of fake MTD devices for each UBI volume. | 22 | * This is a small driver which implements fake MTD devices on top of UBI |
| 23 | * This sounds strange, but it is in fact quite useful to make MTD-oriented | 23 | * volumes. This sounds strange, but it is in fact quite useful to make |
| 24 | * software (including all the legacy software) to work on top of UBI. | 24 | * MTD-oriented software (including all the legacy software) work on top of |
| 25 | * UBI. | ||
| 25 | * | 26 | * |
| 26 | * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit | 27 | * Gluebi emulates MTD devices of "MTD_UBIVOLUME" type. Their minimal I/O unit |
| 27 | * size (mtd->writesize) is equivalent to the UBI minimal I/O unit. The | 28 | * size (@mtd->writesize) is equivalent to the UBI minimal I/O unit. The |
| 28 | * eraseblock size is equivalent to the logical eraseblock size of the volume. | 29 | * eraseblock size is equivalent to the logical eraseblock size of the volume. |
| 29 | */ | 30 | */ |
| 30 | 31 | ||
| 32 | #include <linux/err.h> | ||
| 33 | #include <linux/list.h> | ||
| 34 | #include <linux/sched.h> | ||
| 31 | #include <linux/math64.h> | 35 | #include <linux/math64.h> |
| 32 | #include "ubi.h" | 36 | #include <linux/module.h> |
| 37 | #include <linux/mutex.h> | ||
| 38 | #include <linux/mtd/ubi.h> | ||
| 39 | #include <linux/mtd/mtd.h> | ||
| 40 | #include "ubi-media.h" | ||
| 41 | |||
| 42 | #define err_msg(fmt, ...) \ | ||
| 43 | printk(KERN_DEBUG "gluebi (pid %d): %s: " fmt "\n", \ | ||
| 44 | current->pid, __func__, ##__VA_ARGS__) | ||
| 45 | |||
| 46 | /** | ||
| 47 | * struct gluebi_device - a gluebi device description data structure. | ||
| 48 | * @mtd: emulated MTD device description object | ||
| 49 | * @refcnt: gluebi device reference count | ||
| 50 | * @desc: UBI volume descriptor | ||
| 51 | * @ubi_num: UBI device number this gluebi device works on | ||
| 52 | * @vol_id: ID of UBI volume this gluebi device works on | ||
| 53 | * @list: link in a list of gluebi devices | ||
| 54 | */ | ||
| 55 | struct gluebi_device { | ||
| 56 | struct mtd_info mtd; | ||
| 57 | int refcnt; | ||
| 58 | struct ubi_volume_desc *desc; | ||
| 59 | int ubi_num; | ||
| 60 | int vol_id; | ||
| 61 | struct list_head list; | ||
| 62 | }; | ||
| 63 | |||
| 64 | /* List of all gluebi devices */ | ||
| 65 | static LIST_HEAD(gluebi_devices); | ||
| 66 | static DEFINE_MUTEX(devices_mutex); | ||
| 67 | |||
| 68 | /** | ||
| 69 | * find_gluebi_nolock - find a gluebi device. | ||
| 70 | * @ubi_num: UBI device number | ||
| 71 | * @vol_id: volume ID | ||
| 72 | * | ||
| 73 | * This function seraches for gluebi device corresponding to UBI device | ||
| 74 | * @ubi_num and UBI volume @vol_id. Returns the gluebi device description | ||
| 75 | * object in case of success and %NULL in case of failure. The caller has to | ||
| 76 | * have the &devices_mutex locked. | ||
| 77 | */ | ||
| 78 | static struct gluebi_device *find_gluebi_nolock(int ubi_num, int vol_id) | ||
| 79 | { | ||
| 80 | struct gluebi_device *gluebi; | ||
| 81 | |||
| 82 | list_for_each_entry(gluebi, &gluebi_devices, list) | ||
| 83 | if (gluebi->ubi_num == ubi_num && gluebi->vol_id == vol_id) | ||
| 84 | return gluebi; | ||
| 85 | return NULL; | ||
| 86 | } | ||
| 33 | 87 | ||
| 34 | /** | 88 | /** |
| 35 | * gluebi_get_device - get MTD device reference. | 89 | * gluebi_get_device - get MTD device reference. |
| @@ -41,15 +95,18 @@ | |||
| 41 | */ | 95 | */ |
| 42 | static int gluebi_get_device(struct mtd_info *mtd) | 96 | static int gluebi_get_device(struct mtd_info *mtd) |
| 43 | { | 97 | { |
| 44 | struct ubi_volume *vol; | 98 | struct gluebi_device *gluebi; |
| 99 | int ubi_mode = UBI_READONLY; | ||
| 45 | 100 | ||
| 46 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 101 | if (!try_module_get(THIS_MODULE)) |
| 102 | return -ENODEV; | ||
| 47 | 103 | ||
| 48 | /* | 104 | if (mtd->flags & MTD_WRITEABLE) |
| 49 | * We do not introduce locks for gluebi reference count because the | 105 | ubi_mode = UBI_READWRITE; |
| 50 | * get_device()/put_device() calls are already serialized at MTD. | 106 | |
| 51 | */ | 107 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
| 52 | if (vol->gluebi_refcount > 0) { | 108 | mutex_lock(&devices_mutex); |
| 109 | if (gluebi->refcnt > 0) { | ||
| 53 | /* | 110 | /* |
| 54 | * The MTD device is already referenced and this is just one | 111 | * The MTD device is already referenced and this is just one |
| 55 | * more reference. MTD allows many users to open the same | 112 | * more reference. MTD allows many users to open the same |
| @@ -58,7 +115,8 @@ static int gluebi_get_device(struct mtd_info *mtd) | |||
| 58 | * open the UBI volume again - just increase the reference | 115 | * open the UBI volume again - just increase the reference |
| 59 | * counter and return. | 116 | * counter and return. |
| 60 | */ | 117 | */ |
| 61 | vol->gluebi_refcount += 1; | 118 | gluebi->refcnt += 1; |
| 119 | mutex_unlock(&devices_mutex); | ||
| 62 | return 0; | 120 | return 0; |
| 63 | } | 121 | } |
| 64 | 122 | ||
| @@ -66,11 +124,15 @@ static int gluebi_get_device(struct mtd_info *mtd) | |||
| 66 | * This is the first reference to this UBI volume via the MTD device | 124 | * This is the first reference to this UBI volume via the MTD device |
| 67 | * interface. Open the corresponding volume in read-write mode. | 125 | * interface. Open the corresponding volume in read-write mode. |
| 68 | */ | 126 | */ |
| 69 | vol->gluebi_desc = ubi_open_volume(vol->ubi->ubi_num, vol->vol_id, | 127 | gluebi->desc = ubi_open_volume(gluebi->ubi_num, gluebi->vol_id, |
| 70 | UBI_READWRITE); | 128 | ubi_mode); |
| 71 | if (IS_ERR(vol->gluebi_desc)) | 129 | if (IS_ERR(gluebi->desc)) { |
| 72 | return PTR_ERR(vol->gluebi_desc); | 130 | mutex_unlock(&devices_mutex); |
| 73 | vol->gluebi_refcount += 1; | 131 | module_put(THIS_MODULE); |
| 132 | return PTR_ERR(gluebi->desc); | ||
| 133 | } | ||
| 134 | gluebi->refcnt += 1; | ||
| 135 | mutex_unlock(&devices_mutex); | ||
| 74 | return 0; | 136 | return 0; |
| 75 | } | 137 | } |
| 76 | 138 | ||
| @@ -83,13 +145,15 @@ static int gluebi_get_device(struct mtd_info *mtd) | |||
| 83 | */ | 145 | */ |
| 84 | static void gluebi_put_device(struct mtd_info *mtd) | 146 | static void gluebi_put_device(struct mtd_info *mtd) |
| 85 | { | 147 | { |
| 86 | struct ubi_volume *vol; | 148 | struct gluebi_device *gluebi; |
| 87 | 149 | ||
| 88 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 150 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
| 89 | vol->gluebi_refcount -= 1; | 151 | mutex_lock(&devices_mutex); |
| 90 | ubi_assert(vol->gluebi_refcount >= 0); | 152 | gluebi->refcnt -= 1; |
| 91 | if (vol->gluebi_refcount == 0) | 153 | if (gluebi->refcnt == 0) |
| 92 | ubi_close_volume(vol->gluebi_desc); | 154 | ubi_close_volume(gluebi->desc); |
| 155 | module_put(THIS_MODULE); | ||
| 156 | mutex_unlock(&devices_mutex); | ||
| 93 | } | 157 | } |
| 94 | 158 | ||
| 95 | /** | 159 | /** |
| @@ -107,16 +171,12 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
| 107 | size_t *retlen, unsigned char *buf) | 171 | size_t *retlen, unsigned char *buf) |
| 108 | { | 172 | { |
| 109 | int err = 0, lnum, offs, total_read; | 173 | int err = 0, lnum, offs, total_read; |
| 110 | struct ubi_volume *vol; | 174 | struct gluebi_device *gluebi; |
| 111 | struct ubi_device *ubi; | ||
| 112 | |||
| 113 | dbg_gen("read %zd bytes from offset %lld", len, from); | ||
| 114 | 175 | ||
| 115 | if (len < 0 || from < 0 || from + len > mtd->size) | 176 | if (len < 0 || from < 0 || from + len > mtd->size) |
| 116 | return -EINVAL; | 177 | return -EINVAL; |
| 117 | 178 | ||
| 118 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 179 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
| 119 | ubi = vol->ubi; | ||
| 120 | 180 | ||
| 121 | lnum = div_u64_rem(from, mtd->erasesize, &offs); | 181 | lnum = div_u64_rem(from, mtd->erasesize, &offs); |
| 122 | total_read = len; | 182 | total_read = len; |
| @@ -126,7 +186,7 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
| 126 | if (to_read > total_read) | 186 | if (to_read > total_read) |
| 127 | to_read = total_read; | 187 | to_read = total_read; |
| 128 | 188 | ||
| 129 | err = ubi_eba_read_leb(ubi, vol, lnum, buf, offs, to_read, 0); | 189 | err = ubi_read(gluebi->desc, lnum, buf, offs, to_read); |
| 130 | if (err) | 190 | if (err) |
| 131 | break; | 191 | break; |
| 132 | 192 | ||
| @@ -152,21 +212,17 @@ static int gluebi_read(struct mtd_info *mtd, loff_t from, size_t len, | |||
| 152 | * case of failure. | 212 | * case of failure. |
| 153 | */ | 213 | */ |
| 154 | static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | 214 | static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, |
| 155 | size_t *retlen, const u_char *buf) | 215 | size_t *retlen, const u_char *buf) |
| 156 | { | 216 | { |
| 157 | int err = 0, lnum, offs, total_written; | 217 | int err = 0, lnum, offs, total_written; |
| 158 | struct ubi_volume *vol; | 218 | struct gluebi_device *gluebi; |
| 159 | struct ubi_device *ubi; | ||
| 160 | |||
| 161 | dbg_gen("write %zd bytes to offset %lld", len, to); | ||
| 162 | 219 | ||
| 163 | if (len < 0 || to < 0 || len + to > mtd->size) | 220 | if (len < 0 || to < 0 || len + to > mtd->size) |
| 164 | return -EINVAL; | 221 | return -EINVAL; |
| 165 | 222 | ||
| 166 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 223 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
| 167 | ubi = vol->ubi; | ||
| 168 | 224 | ||
| 169 | if (ubi->ro_mode) | 225 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 170 | return -EROFS; | 226 | return -EROFS; |
| 171 | 227 | ||
| 172 | lnum = div_u64_rem(to, mtd->erasesize, &offs); | 228 | lnum = div_u64_rem(to, mtd->erasesize, &offs); |
| @@ -181,8 +237,7 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
| 181 | if (to_write > total_written) | 237 | if (to_write > total_written) |
| 182 | to_write = total_written; | 238 | to_write = total_written; |
| 183 | 239 | ||
| 184 | err = ubi_eba_write_leb(ubi, vol, lnum, buf, offs, to_write, | 240 | err = ubi_write(gluebi->desc, lnum, buf, offs, to_write); |
| 185 | UBI_UNKNOWN); | ||
| 186 | if (err) | 241 | if (err) |
| 187 | break; | 242 | break; |
| 188 | 243 | ||
| @@ -207,41 +262,36 @@ static int gluebi_write(struct mtd_info *mtd, loff_t to, size_t len, | |||
| 207 | static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) | 262 | static int gluebi_erase(struct mtd_info *mtd, struct erase_info *instr) |
| 208 | { | 263 | { |
| 209 | int err, i, lnum, count; | 264 | int err, i, lnum, count; |
| 210 | struct ubi_volume *vol; | 265 | struct gluebi_device *gluebi; |
| 211 | struct ubi_device *ubi; | ||
| 212 | |||
| 213 | dbg_gen("erase %llu bytes at offset %llu", (unsigned long long)instr->len, | ||
| 214 | (unsigned long long)instr->addr); | ||
| 215 | 266 | ||
| 216 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) | 267 | if (instr->addr < 0 || instr->addr > mtd->size - mtd->erasesize) |
| 217 | return -EINVAL; | 268 | return -EINVAL; |
| 218 | |||
| 219 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) | 269 | if (instr->len < 0 || instr->addr + instr->len > mtd->size) |
| 220 | return -EINVAL; | 270 | return -EINVAL; |
| 221 | |||
| 222 | if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) | 271 | if (mtd_mod_by_ws(instr->addr, mtd) || mtd_mod_by_ws(instr->len, mtd)) |
| 223 | return -EINVAL; | 272 | return -EINVAL; |
| 224 | 273 | ||
| 225 | lnum = mtd_div_by_eb(instr->addr, mtd); | 274 | lnum = mtd_div_by_eb(instr->addr, mtd); |
| 226 | count = mtd_div_by_eb(instr->len, mtd); | 275 | count = mtd_div_by_eb(instr->len, mtd); |
| 227 | 276 | ||
| 228 | vol = container_of(mtd, struct ubi_volume, gluebi_mtd); | 277 | gluebi = container_of(mtd, struct gluebi_device, mtd); |
| 229 | ubi = vol->ubi; | ||
| 230 | 278 | ||
| 231 | if (ubi->ro_mode) | 279 | if (!(mtd->flags & MTD_WRITEABLE)) |
| 232 | return -EROFS; | 280 | return -EROFS; |
| 233 | 281 | ||
| 234 | for (i = 0; i < count; i++) { | 282 | for (i = 0; i < count - 1; i++) { |
| 235 | err = ubi_eba_unmap_leb(ubi, vol, lnum + i); | 283 | err = ubi_leb_unmap(gluebi->desc, lnum + i); |
| 236 | if (err) | 284 | if (err) |
| 237 | goto out_err; | 285 | goto out_err; |
| 238 | } | 286 | } |
| 239 | |||
| 240 | /* | 287 | /* |
| 241 | * MTD erase operations are synchronous, so we have to make sure the | 288 | * MTD erase operations are synchronous, so we have to make sure the |
| 242 | * physical eraseblock is wiped out. | 289 | * physical eraseblock is wiped out. |
| 290 | * | ||
| 291 | * Thus, perform leb_erase instead of leb_unmap operation - leb_erase | ||
| 292 | * will wait for the end of operations | ||
| 243 | */ | 293 | */ |
| 244 | err = ubi_wl_flush(ubi); | 294 | err = ubi_leb_erase(gluebi->desc, lnum + i); |
| 245 | if (err) | 295 | if (err) |
| 246 | goto out_err; | 296 | goto out_err; |
| 247 | 297 | ||
| @@ -256,28 +306,38 @@ out_err: | |||
| 256 | } | 306 | } |
| 257 | 307 | ||
| 258 | /** | 308 | /** |
| 259 | * ubi_create_gluebi - initialize gluebi for an UBI volume. | 309 | * gluebi_create - create a gluebi device for an UBI volume. |
| 260 | * @ubi: UBI device description object | 310 | * @di: UBI device description object |
| 261 | * @vol: volume description object | 311 | * @vi: UBI volume description object |
| 262 | * | 312 | * |
| 263 | * This function is called when an UBI volume is created in order to create | 313 | * This function is called when a new UBI volume is created in order to create |
| 264 | * corresponding fake MTD device. Returns zero in case of success and a | 314 | * corresponding fake MTD device. Returns zero in case of success and a |
| 265 | * negative error code in case of failure. | 315 | * negative error code in case of failure. |
| 266 | */ | 316 | */ |
| 267 | int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | 317 | static int gluebi_create(struct ubi_device_info *di, |
| 318 | struct ubi_volume_info *vi) | ||
| 268 | { | 319 | { |
| 269 | struct mtd_info *mtd = &vol->gluebi_mtd; | 320 | struct gluebi_device *gluebi, *g; |
| 321 | struct mtd_info *mtd; | ||
| 270 | 322 | ||
| 271 | mtd->name = kmemdup(vol->name, vol->name_len + 1, GFP_KERNEL); | 323 | gluebi = kzalloc(sizeof(struct gluebi_device), GFP_KERNEL); |
| 272 | if (!mtd->name) | 324 | if (!gluebi) |
| 273 | return -ENOMEM; | 325 | return -ENOMEM; |
| 274 | 326 | ||
| 327 | mtd = &gluebi->mtd; | ||
| 328 | mtd->name = kmemdup(vi->name, vi->name_len + 1, GFP_KERNEL); | ||
| 329 | if (!mtd->name) { | ||
| 330 | kfree(gluebi); | ||
| 331 | return -ENOMEM; | ||
| 332 | } | ||
| 333 | |||
| 334 | gluebi->vol_id = vi->vol_id; | ||
| 275 | mtd->type = MTD_UBIVOLUME; | 335 | mtd->type = MTD_UBIVOLUME; |
| 276 | if (!ubi->ro_mode) | 336 | if (!di->ro_mode) |
| 277 | mtd->flags = MTD_WRITEABLE; | 337 | mtd->flags = MTD_WRITEABLE; |
| 278 | mtd->writesize = ubi->min_io_size; | ||
| 279 | mtd->owner = THIS_MODULE; | 338 | mtd->owner = THIS_MODULE; |
| 280 | mtd->erasesize = vol->usable_leb_size; | 339 | mtd->writesize = di->min_io_size; |
| 340 | mtd->erasesize = vi->usable_leb_size; | ||
| 281 | mtd->read = gluebi_read; | 341 | mtd->read = gluebi_read; |
| 282 | mtd->write = gluebi_write; | 342 | mtd->write = gluebi_write; |
| 283 | mtd->erase = gluebi_erase; | 343 | mtd->erase = gluebi_erase; |
| @@ -285,60 +345,196 @@ int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol) | |||
| 285 | mtd->put_device = gluebi_put_device; | 345 | mtd->put_device = gluebi_put_device; |
| 286 | 346 | ||
| 287 | /* | 347 | /* |
| 288 | * In case of dynamic volume, MTD device size is just volume size. In | 348 | * In case of dynamic a volume, MTD device size is just volume size. In |
| 289 | * case of a static volume the size is equivalent to the amount of data | 349 | * case of a static volume the size is equivalent to the amount of data |
| 290 | * bytes. | 350 | * bytes. |
| 291 | */ | 351 | */ |
| 292 | if (vol->vol_type == UBI_DYNAMIC_VOLUME) | 352 | if (vi->vol_type == UBI_DYNAMIC_VOLUME) |
| 293 | mtd->size = (long long)vol->usable_leb_size * vol->reserved_pebs; | 353 | mtd->size = (unsigned long long)vi->usable_leb_size * vi->size; |
| 294 | else | 354 | else |
| 295 | mtd->size = vol->used_bytes; | 355 | mtd->size = vi->used_bytes; |
| 356 | |||
| 357 | /* Just a sanity check - make sure this gluebi device does not exist */ | ||
| 358 | mutex_lock(&devices_mutex); | ||
| 359 | g = find_gluebi_nolock(vi->ubi_num, vi->vol_id); | ||
| 360 | if (g) | ||
| 361 | err_msg("gluebi MTD device %d form UBI device %d volume %d " | ||
| 362 | "already exists", g->mtd.index, vi->ubi_num, | ||
| 363 | vi->vol_id); | ||
| 364 | mutex_unlock(&devices_mutex); | ||
| 296 | 365 | ||
| 297 | if (add_mtd_device(mtd)) { | 366 | if (add_mtd_device(mtd)) { |
| 298 | ubi_err("cannot not add MTD device"); | 367 | err_msg("cannot add MTD device"); |
| 299 | kfree(mtd->name); | 368 | kfree(mtd->name); |
| 369 | kfree(gluebi); | ||
| 300 | return -ENFILE; | 370 | return -ENFILE; |
| 301 | } | 371 | } |
| 302 | 372 | ||
| 303 | dbg_gen("added mtd%d (\"%s\"), size %llu, EB size %u", | 373 | mutex_lock(&devices_mutex); |
| 304 | mtd->index, mtd->name, (unsigned long long)mtd->size, mtd->erasesize); | 374 | list_add_tail(&gluebi->list, &gluebi_devices); |
| 375 | mutex_unlock(&devices_mutex); | ||
| 305 | return 0; | 376 | return 0; |
| 306 | } | 377 | } |
| 307 | 378 | ||
| 308 | /** | 379 | /** |
| 309 | * ubi_destroy_gluebi - close gluebi for an UBI volume. | 380 | * gluebi_remove - remove a gluebi device. |
| 310 | * @vol: volume description object | 381 | * @vi: UBI volume description object |
| 311 | * | 382 | * |
| 312 | * This function is called when an UBI volume is removed in order to remove | 383 | * This function is called when an UBI volume is removed and it removes |
| 313 | * corresponding fake MTD device. Returns zero in case of success and a | 384 | * corresponding fake MTD device. Returns zero in case of success and a |
| 314 | * negative error code in case of failure. | 385 | * negative error code in case of failure. |
| 315 | */ | 386 | */ |
| 316 | int ubi_destroy_gluebi(struct ubi_volume *vol) | 387 | static int gluebi_remove(struct ubi_volume_info *vi) |
| 317 | { | 388 | { |
| 318 | int err; | 389 | int err = 0; |
| 319 | struct mtd_info *mtd = &vol->gluebi_mtd; | 390 | struct mtd_info *mtd; |
| 391 | struct gluebi_device *gluebi; | ||
| 392 | |||
| 393 | mutex_lock(&devices_mutex); | ||
| 394 | gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); | ||
| 395 | if (!gluebi) { | ||
| 396 | err_msg("got remove notification for unknown UBI device %d " | ||
| 397 | "volume %d", vi->ubi_num, vi->vol_id); | ||
| 398 | err = -ENOENT; | ||
| 399 | } else if (gluebi->refcnt) | ||
| 400 | err = -EBUSY; | ||
| 401 | else | ||
| 402 | list_del(&gluebi->list); | ||
| 403 | mutex_unlock(&devices_mutex); | ||
| 404 | if (err) | ||
| 405 | return err; | ||
| 320 | 406 | ||
| 321 | dbg_gen("remove mtd%d", mtd->index); | 407 | mtd = &gluebi->mtd; |
| 322 | err = del_mtd_device(mtd); | 408 | err = del_mtd_device(mtd); |
| 323 | if (err) | 409 | if (err) { |
| 410 | err_msg("cannot remove fake MTD device %d, UBI device %d, " | ||
| 411 | "volume %d, error %d", mtd->index, gluebi->ubi_num, | ||
| 412 | gluebi->vol_id, err); | ||
| 413 | mutex_lock(&devices_mutex); | ||
| 414 | list_add_tail(&gluebi->list, &gluebi_devices); | ||
| 415 | mutex_unlock(&devices_mutex); | ||
| 324 | return err; | 416 | return err; |
| 417 | } | ||
| 418 | |||
| 325 | kfree(mtd->name); | 419 | kfree(mtd->name); |
| 420 | kfree(gluebi); | ||
| 326 | return 0; | 421 | return 0; |
| 327 | } | 422 | } |
| 328 | 423 | ||
| 329 | /** | 424 | /** |
| 330 | * ubi_gluebi_updated - UBI volume was updated notifier. | 425 | * gluebi_updated - UBI volume was updated notifier. |
| 331 | * @vol: volume description object | 426 | * @vi: volume info structure |
| 332 | * | 427 | * |
| 333 | * This function is called every time an UBI volume is updated. This function | 428 | * This function is called every time an UBI volume is updated. It does nothing |
| 334 | * does nothing if volume @vol is dynamic, and changes MTD device size if the | 429 | * if te volume @vol is dynamic, and changes MTD device size if the |
| 335 | * volume is static. This is needed because static volumes cannot be read past | 430 | * volume is static. This is needed because static volumes cannot be read past |
| 336 | * data they contain. | 431 | * data they contain. This function returns zero in case of success and a |
| 432 | * negative error code in case of error. | ||
| 433 | */ | ||
| 434 | static int gluebi_updated(struct ubi_volume_info *vi) | ||
| 435 | { | ||
| 436 | struct gluebi_device *gluebi; | ||
| 437 | |||
| 438 | mutex_lock(&devices_mutex); | ||
| 439 | gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); | ||
| 440 | if (!gluebi) { | ||
| 441 | mutex_unlock(&devices_mutex); | ||
| 442 | err_msg("got update notification for unknown UBI device %d " | ||
| 443 | "volume %d", vi->ubi_num, vi->vol_id); | ||
| 444 | return -ENOENT; | ||
| 445 | } | ||
| 446 | |||
| 447 | if (vi->vol_type == UBI_STATIC_VOLUME) | ||
| 448 | gluebi->mtd.size = vi->used_bytes; | ||
| 449 | mutex_unlock(&devices_mutex); | ||
| 450 | return 0; | ||
| 451 | } | ||
| 452 | |||
| 453 | /** | ||
| 454 | * gluebi_resized - UBI volume was re-sized notifier. | ||
| 455 | * @vi: volume info structure | ||
| 456 | * | ||
| 457 | * This function is called every time an UBI volume is re-size. It changes the | ||
| 458 | * corresponding fake MTD device size. This function returns zero in case of | ||
| 459 | * success and a negative error code in case of error. | ||
| 460 | */ | ||
| 461 | static int gluebi_resized(struct ubi_volume_info *vi) | ||
| 462 | { | ||
| 463 | struct gluebi_device *gluebi; | ||
| 464 | |||
| 465 | mutex_lock(&devices_mutex); | ||
| 466 | gluebi = find_gluebi_nolock(vi->ubi_num, vi->vol_id); | ||
| 467 | if (!gluebi) { | ||
| 468 | mutex_unlock(&devices_mutex); | ||
| 469 | err_msg("got update notification for unknown UBI device %d " | ||
| 470 | "volume %d", vi->ubi_num, vi->vol_id); | ||
| 471 | return -ENOENT; | ||
| 472 | } | ||
| 473 | gluebi->mtd.size = vi->used_bytes; | ||
| 474 | mutex_unlock(&devices_mutex); | ||
| 475 | return 0; | ||
| 476 | } | ||
| 477 | |||
| 478 | /** | ||
| 479 | * gluebi_notify - UBI notification handler. | ||
| 480 | * @nb: registered notifier block | ||
| 481 | * @l: notification type | ||
| 482 | * @ptr: pointer to the &struct ubi_notification object | ||
| 337 | */ | 483 | */ |
| 338 | void ubi_gluebi_updated(struct ubi_volume *vol) | 484 | static int gluebi_notify(struct notifier_block *nb, unsigned long l, |
| 485 | void *ns_ptr) | ||
| 339 | { | 486 | { |
| 340 | struct mtd_info *mtd = &vol->gluebi_mtd; | 487 | struct ubi_notification *nt = ns_ptr; |
| 488 | |||
| 489 | switch (l) { | ||
| 490 | case UBI_VOLUME_ADDED: | ||
| 491 | gluebi_create(&nt->di, &nt->vi); | ||
| 492 | break; | ||
| 493 | case UBI_VOLUME_REMOVED: | ||
| 494 | gluebi_remove(&nt->vi); | ||
| 495 | break; | ||
| 496 | case UBI_VOLUME_RESIZED: | ||
| 497 | gluebi_resized(&nt->vi); | ||
| 498 | break; | ||
| 499 | case UBI_VOLUME_UPDATED: | ||
| 500 | gluebi_updated(&nt->vi); | ||
| 501 | break; | ||
| 502 | default: | ||
| 503 | break; | ||
| 504 | } | ||
| 505 | return NOTIFY_OK; | ||
| 506 | } | ||
| 341 | 507 | ||
| 342 | if (vol->vol_type == UBI_STATIC_VOLUME) | 508 | static struct notifier_block gluebi_notifier = { |
| 343 | mtd->size = vol->used_bytes; | 509 | .notifier_call = gluebi_notify, |
| 510 | }; | ||
| 511 | |||
| 512 | static int __init ubi_gluebi_init(void) | ||
| 513 | { | ||
| 514 | return ubi_register_volume_notifier(&gluebi_notifier, 0); | ||
| 344 | } | 515 | } |
| 516 | |||
| 517 | static void __exit ubi_gluebi_exit(void) | ||
| 518 | { | ||
| 519 | struct gluebi_device *gluebi, *g; | ||
| 520 | |||
| 521 | list_for_each_entry_safe(gluebi, g, &gluebi_devices, list) { | ||
| 522 | int err; | ||
| 523 | struct mtd_info *mtd = &gluebi->mtd; | ||
| 524 | |||
| 525 | err = del_mtd_device(mtd); | ||
| 526 | if (err) | ||
| 527 | err_msg("error %d while removing gluebi MTD device %d, " | ||
| 528 | "UBI device %d, volume %d - ignoring", err, | ||
| 529 | mtd->index, gluebi->ubi_num, gluebi->vol_id); | ||
| 530 | kfree(mtd->name); | ||
| 531 | kfree(gluebi); | ||
| 532 | } | ||
| 533 | ubi_unregister_volume_notifier(&gluebi_notifier); | ||
| 534 | } | ||
| 535 | |||
| 536 | module_init(ubi_gluebi_init); | ||
| 537 | module_exit(ubi_gluebi_exit); | ||
| 538 | MODULE_DESCRIPTION("MTD emulation layer over UBI volumes"); | ||
| 539 | MODULE_AUTHOR("Artem Bityutskiy, Joern Engel"); | ||
| 540 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c index fe81039f2a7c..effaff28bab1 100644 --- a/drivers/mtd/ubi/io.c +++ b/drivers/mtd/ubi/io.c | |||
| @@ -100,6 +100,7 @@ static int paranoid_check_vid_hdr(const struct ubi_device *ubi, int pnum, | |||
| 100 | const struct ubi_vid_hdr *vid_hdr); | 100 | const struct ubi_vid_hdr *vid_hdr); |
| 101 | static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, | 101 | static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, |
| 102 | int len); | 102 | int len); |
| 103 | static int paranoid_check_empty(struct ubi_device *ubi, int pnum); | ||
| 103 | #else | 104 | #else |
| 104 | #define paranoid_check_not_bad(ubi, pnum) 0 | 105 | #define paranoid_check_not_bad(ubi, pnum) 0 |
| 105 | #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 | 106 | #define paranoid_check_peb_ec_hdr(ubi, pnum) 0 |
| @@ -107,6 +108,7 @@ static int paranoid_check_all_ff(struct ubi_device *ubi, int pnum, int offset, | |||
| 107 | #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 | 108 | #define paranoid_check_peb_vid_hdr(ubi, pnum) 0 |
| 108 | #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 | 109 | #define paranoid_check_vid_hdr(ubi, pnum, vid_hdr) 0 |
| 109 | #define paranoid_check_all_ff(ubi, pnum, offset, len) 0 | 110 | #define paranoid_check_all_ff(ubi, pnum, offset, len) 0 |
| 111 | #define paranoid_check_empty(ubi, pnum) 0 | ||
| 110 | #endif | 112 | #endif |
| 111 | 113 | ||
| 112 | /** | 114 | /** |
| @@ -670,11 +672,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, | |||
| 670 | if (read_err != -EBADMSG && | 672 | if (read_err != -EBADMSG && |
| 671 | check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { | 673 | check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { |
| 672 | /* The physical eraseblock is supposedly empty */ | 674 | /* The physical eraseblock is supposedly empty */ |
| 673 | |||
| 674 | /* | ||
| 675 | * The below is just a paranoid check, it has to be | ||
| 676 | * compiled out if paranoid checks are disabled. | ||
| 677 | */ | ||
| 678 | err = paranoid_check_all_ff(ubi, pnum, 0, | 675 | err = paranoid_check_all_ff(ubi, pnum, 0, |
| 679 | ubi->peb_size); | 676 | ubi->peb_size); |
| 680 | if (err) | 677 | if (err) |
| @@ -902,7 +899,7 @@ bad: | |||
| 902 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected | 899 | * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected |
| 903 | * and corrected by the flash driver; this is harmless but may indicate that | 900 | * and corrected by the flash driver; this is harmless but may indicate that |
| 904 | * this eraseblock may become bad soon; | 901 | * this eraseblock may become bad soon; |
| 905 | * o %UBI_IO_BAD_VID_HRD if the volume identifier header is corrupted (a CRC | 902 | * o %UBI_IO_BAD_VID_HDR if the volume identifier header is corrupted (a CRC |
| 906 | * error detected); | 903 | * error detected); |
| 907 | * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID | 904 | * o %UBI_IO_PEB_FREE if the physical eraseblock is free (i.e., there is no VID |
| 908 | * header there); | 905 | * header there); |
| @@ -955,8 +952,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, | |||
| 955 | * The below is just a paranoid check, it has to be | 952 | * The below is just a paranoid check, it has to be |
| 956 | * compiled out if paranoid checks are disabled. | 953 | * compiled out if paranoid checks are disabled. |
| 957 | */ | 954 | */ |
| 958 | err = paranoid_check_all_ff(ubi, pnum, ubi->leb_start, | 955 | err = paranoid_check_empty(ubi, pnum); |
| 959 | ubi->leb_size); | ||
| 960 | if (err) | 956 | if (err) |
| 961 | return err > 0 ? UBI_IO_BAD_VID_HDR : err; | 957 | return err > 0 ? UBI_IO_BAD_VID_HDR : err; |
| 962 | 958 | ||
| @@ -1280,4 +1276,74 @@ error: | |||
| 1280 | return err; | 1276 | return err; |
| 1281 | } | 1277 | } |
| 1282 | 1278 | ||
| 1279 | /** | ||
| 1280 | * paranoid_check_empty - whether a PEB is empty. | ||
| 1281 | * @ubi: UBI device description object | ||
| 1282 | * @pnum: the physical eraseblock number to check | ||
| 1283 | * | ||
| 1284 | * This function makes sure PEB @pnum is empty, which means it contains only | ||
| 1285 | * %0xFF data bytes. Returns zero if the PEB is empty, %1 if not, and a | ||
| 1286 | * negative error code in case of failure. | ||
| 1287 | * | ||
| 1288 | * Empty PEBs have the EC header, and do not have the VID header. The caller of | ||
| 1289 | * this function should have already made sure the PEB does not have the VID | ||
| 1290 | * header. However, this function re-checks that, because it is possible that | ||
| 1291 | * the header and data has already been written to the PEB. | ||
| 1292 | * | ||
| 1293 | * Let's consider a possible scenario. Suppose there are 2 tasks - A and B. | ||
| 1294 | * Task A is in 'wear_leveling_worker()'. It is reading VID header of PEB X to | ||
| 1295 | * find which LEB it corresponds to. PEB X is currently unmapped, and has no | ||
| 1296 | * VID header. Task B is trying to write to PEB X. | ||
| 1297 | * | ||
| 1298 | * Task A: in 'ubi_io_read_vid_hdr()': reads the VID header from PEB X. The | ||
| 1299 | * read data contain all 0xFF bytes; | ||
| 1300 | * Task B: writes VID header and some data to PEB X; | ||
| 1301 | * Task A: assumes PEB X is empty, calls 'paranoid_check_empty()'. And if we | ||
| 1302 | * do not re-read the VID header, and do not cancel the checking if it | ||
| 1303 | * is there, we fail. | ||
| 1304 | */ | ||
| 1305 | static int paranoid_check_empty(struct ubi_device *ubi, int pnum) | ||
| 1306 | { | ||
| 1307 | int err, offs = ubi->vid_hdr_aloffset, len = ubi->vid_hdr_alsize; | ||
| 1308 | size_t read; | ||
| 1309 | uint32_t magic; | ||
| 1310 | const struct ubi_vid_hdr *vid_hdr; | ||
| 1311 | |||
| 1312 | mutex_lock(&ubi->dbg_buf_mutex); | ||
| 1313 | err = ubi->mtd->read(ubi->mtd, offs, len, &read, ubi->dbg_peb_buf); | ||
| 1314 | if (err && err != -EUCLEAN) { | ||
| 1315 | ubi_err("error %d while reading %d bytes from PEB %d:%d, " | ||
| 1316 | "read %zd bytes", err, len, pnum, offs, read); | ||
| 1317 | goto error; | ||
| 1318 | } | ||
| 1319 | |||
| 1320 | vid_hdr = ubi->dbg_peb_buf; | ||
| 1321 | magic = be32_to_cpu(vid_hdr->magic); | ||
| 1322 | if (magic == UBI_VID_HDR_MAGIC) | ||
| 1323 | /* The PEB contains VID header, so it is not empty */ | ||
| 1324 | goto out; | ||
| 1325 | |||
| 1326 | err = check_pattern(ubi->dbg_peb_buf, 0xFF, len); | ||
| 1327 | if (err == 0) { | ||
| 1328 | ubi_err("flash region at PEB %d:%d, length %d does not " | ||
| 1329 | "contain all 0xFF bytes", pnum, offs, len); | ||
| 1330 | goto fail; | ||
| 1331 | } | ||
| 1332 | |||
| 1333 | out: | ||
| 1334 | mutex_unlock(&ubi->dbg_buf_mutex); | ||
| 1335 | return 0; | ||
| 1336 | |||
| 1337 | fail: | ||
| 1338 | ubi_err("paranoid check failed for PEB %d", pnum); | ||
| 1339 | ubi_msg("hex dump of the %d-%d region", offs, offs + len); | ||
| 1340 | print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, | ||
| 1341 | ubi->dbg_peb_buf, len, 1); | ||
| 1342 | err = 1; | ||
| 1343 | error: | ||
| 1344 | ubi_dbg_dump_stack(); | ||
| 1345 | mutex_unlock(&ubi->dbg_buf_mutex); | ||
| 1346 | return err; | ||
| 1347 | } | ||
| 1348 | |||
| 1283 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ | 1349 | #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ |
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c index 4abbe573fa40..88a72e9c8beb 100644 --- a/drivers/mtd/ubi/kapi.c +++ b/drivers/mtd/ubi/kapi.c | |||
| @@ -26,6 +26,24 @@ | |||
| 26 | #include "ubi.h" | 26 | #include "ubi.h" |
| 27 | 27 | ||
| 28 | /** | 28 | /** |
| 29 | * ubi_do_get_device_info - get information about UBI device. | ||
| 30 | * @ubi: UBI device description object | ||
| 31 | * @di: the information is stored here | ||
| 32 | * | ||
| 33 | * This function is the same as 'ubi_get_device_info()', but it assumes the UBI | ||
| 34 | * device is locked and cannot disappear. | ||
| 35 | */ | ||
| 36 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di) | ||
| 37 | { | ||
| 38 | di->ubi_num = ubi->ubi_num; | ||
| 39 | di->leb_size = ubi->leb_size; | ||
| 40 | di->min_io_size = ubi->min_io_size; | ||
| 41 | di->ro_mode = ubi->ro_mode; | ||
| 42 | di->cdev = ubi->cdev.dev; | ||
| 43 | } | ||
| 44 | EXPORT_SYMBOL_GPL(ubi_do_get_device_info); | ||
| 45 | |||
| 46 | /** | ||
| 29 | * ubi_get_device_info - get information about UBI device. | 47 | * ubi_get_device_info - get information about UBI device. |
| 30 | * @ubi_num: UBI device number | 48 | * @ubi_num: UBI device number |
| 31 | * @di: the information is stored here | 49 | * @di: the information is stored here |
| @@ -39,33 +57,24 @@ int ubi_get_device_info(int ubi_num, struct ubi_device_info *di) | |||
| 39 | 57 | ||
| 40 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 58 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
| 41 | return -EINVAL; | 59 | return -EINVAL; |
| 42 | |||
| 43 | ubi = ubi_get_device(ubi_num); | 60 | ubi = ubi_get_device(ubi_num); |
| 44 | if (!ubi) | 61 | if (!ubi) |
| 45 | return -ENODEV; | 62 | return -ENODEV; |
| 46 | 63 | ubi_do_get_device_info(ubi, di); | |
| 47 | di->ubi_num = ubi->ubi_num; | ||
| 48 | di->leb_size = ubi->leb_size; | ||
| 49 | di->min_io_size = ubi->min_io_size; | ||
| 50 | di->ro_mode = ubi->ro_mode; | ||
| 51 | di->cdev = ubi->cdev.dev; | ||
| 52 | |||
| 53 | ubi_put_device(ubi); | 64 | ubi_put_device(ubi); |
| 54 | return 0; | 65 | return 0; |
| 55 | } | 66 | } |
| 56 | EXPORT_SYMBOL_GPL(ubi_get_device_info); | 67 | EXPORT_SYMBOL_GPL(ubi_get_device_info); |
| 57 | 68 | ||
| 58 | /** | 69 | /** |
| 59 | * ubi_get_volume_info - get information about UBI volume. | 70 | * ubi_do_get_volume_info - get information about UBI volume. |
| 60 | * @desc: volume descriptor | 71 | * @ubi: UBI device description object |
| 72 | * @vol: volume description object | ||
| 61 | * @vi: the information is stored here | 73 | * @vi: the information is stored here |
| 62 | */ | 74 | */ |
| 63 | void ubi_get_volume_info(struct ubi_volume_desc *desc, | 75 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, |
| 64 | struct ubi_volume_info *vi) | 76 | struct ubi_volume_info *vi) |
| 65 | { | 77 | { |
| 66 | const struct ubi_volume *vol = desc->vol; | ||
| 67 | const struct ubi_device *ubi = vol->ubi; | ||
| 68 | |||
| 69 | vi->vol_id = vol->vol_id; | 78 | vi->vol_id = vol->vol_id; |
| 70 | vi->ubi_num = ubi->ubi_num; | 79 | vi->ubi_num = ubi->ubi_num; |
| 71 | vi->size = vol->reserved_pebs; | 80 | vi->size = vol->reserved_pebs; |
| @@ -79,6 +88,17 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, | |||
| 79 | vi->name = vol->name; | 88 | vi->name = vol->name; |
| 80 | vi->cdev = vol->cdev.dev; | 89 | vi->cdev = vol->cdev.dev; |
| 81 | } | 90 | } |
| 91 | |||
| 92 | /** | ||
| 93 | * ubi_get_volume_info - get information about UBI volume. | ||
| 94 | * @desc: volume descriptor | ||
| 95 | * @vi: the information is stored here | ||
| 96 | */ | ||
| 97 | void ubi_get_volume_info(struct ubi_volume_desc *desc, | ||
| 98 | struct ubi_volume_info *vi) | ||
| 99 | { | ||
| 100 | ubi_do_get_volume_info(desc->vol->ubi, desc->vol, vi); | ||
| 101 | } | ||
| 82 | EXPORT_SYMBOL_GPL(ubi_get_volume_info); | 102 | EXPORT_SYMBOL_GPL(ubi_get_volume_info); |
| 83 | 103 | ||
| 84 | /** | 104 | /** |
| @@ -106,7 +126,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode) | |||
| 106 | struct ubi_device *ubi; | 126 | struct ubi_device *ubi; |
| 107 | struct ubi_volume *vol; | 127 | struct ubi_volume *vol; |
| 108 | 128 | ||
| 109 | dbg_gen("open device %d volume %d, mode %d", ubi_num, vol_id, mode); | 129 | dbg_gen("open device %d, volume %d, mode %d", ubi_num, vol_id, mode); |
| 110 | 130 | ||
| 111 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) | 131 | if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES) |
| 112 | return ERR_PTR(-EINVAL); | 132 | return ERR_PTR(-EINVAL); |
| @@ -196,6 +216,8 @@ out_free: | |||
| 196 | kfree(desc); | 216 | kfree(desc); |
| 197 | out_put_ubi: | 217 | out_put_ubi: |
| 198 | ubi_put_device(ubi); | 218 | ubi_put_device(ubi); |
| 219 | dbg_err("cannot open device %d, volume %d, error %d", | ||
| 220 | ubi_num, vol_id, err); | ||
| 199 | return ERR_PTR(err); | 221 | return ERR_PTR(err); |
| 200 | } | 222 | } |
| 201 | EXPORT_SYMBOL_GPL(ubi_open_volume); | 223 | EXPORT_SYMBOL_GPL(ubi_open_volume); |
| @@ -215,7 +237,7 @@ struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, | |||
| 215 | struct ubi_device *ubi; | 237 | struct ubi_device *ubi; |
| 216 | struct ubi_volume_desc *ret; | 238 | struct ubi_volume_desc *ret; |
| 217 | 239 | ||
| 218 | dbg_gen("open volume %s, mode %d", name, mode); | 240 | dbg_gen("open device %d, volume %s, mode %d", ubi_num, name, mode); |
| 219 | 241 | ||
| 220 | if (!name) | 242 | if (!name) |
| 221 | return ERR_PTR(-EINVAL); | 243 | return ERR_PTR(-EINVAL); |
| @@ -266,7 +288,8 @@ void ubi_close_volume(struct ubi_volume_desc *desc) | |||
| 266 | struct ubi_volume *vol = desc->vol; | 288 | struct ubi_volume *vol = desc->vol; |
| 267 | struct ubi_device *ubi = vol->ubi; | 289 | struct ubi_device *ubi = vol->ubi; |
| 268 | 290 | ||
| 269 | dbg_gen("close volume %d, mode %d", vol->vol_id, desc->mode); | 291 | dbg_gen("close device %d, volume %d, mode %d", |
| 292 | ubi->ubi_num, vol->vol_id, desc->mode); | ||
| 270 | 293 | ||
| 271 | spin_lock(&ubi->volumes_lock); | 294 | spin_lock(&ubi->volumes_lock); |
| 272 | switch (desc->mode) { | 295 | switch (desc->mode) { |
| @@ -558,7 +581,7 @@ int ubi_leb_unmap(struct ubi_volume_desc *desc, int lnum) | |||
| 558 | EXPORT_SYMBOL_GPL(ubi_leb_unmap); | 581 | EXPORT_SYMBOL_GPL(ubi_leb_unmap); |
| 559 | 582 | ||
| 560 | /** | 583 | /** |
| 561 | * ubi_leb_map - map logical erasblock to a physical eraseblock. | 584 | * ubi_leb_map - map logical eraseblock to a physical eraseblock. |
| 562 | * @desc: volume descriptor | 585 | * @desc: volume descriptor |
| 563 | * @lnum: logical eraseblock number | 586 | * @lnum: logical eraseblock number |
| 564 | * @dtype: expected data type | 587 | * @dtype: expected data type |
| @@ -656,3 +679,59 @@ int ubi_sync(int ubi_num) | |||
| 656 | return 0; | 679 | return 0; |
| 657 | } | 680 | } |
| 658 | EXPORT_SYMBOL_GPL(ubi_sync); | 681 | EXPORT_SYMBOL_GPL(ubi_sync); |
| 682 | |||
| 683 | BLOCKING_NOTIFIER_HEAD(ubi_notifiers); | ||
| 684 | |||
| 685 | /** | ||
| 686 | * ubi_register_volume_notifier - register a volume notifier. | ||
| 687 | * @nb: the notifier description object | ||
| 688 | * @ignore_existing: if non-zero, do not send "added" notification for all | ||
| 689 | * already existing volumes | ||
| 690 | * | ||
| 691 | * This function registers a volume notifier, which means that | ||
| 692 | * 'nb->notifier_call()' will be invoked when an UBI volume is created, | ||
| 693 | * removed, re-sized, re-named, or updated. The first argument of the function | ||
| 694 | * is the notification type. The second argument is pointer to a | ||
| 695 | * &struct ubi_notification object which describes the notification event. | ||
| 696 | * Using UBI API from the volume notifier is prohibited. | ||
| 697 | * | ||
| 698 | * This function returns zero in case of success and a negative error code | ||
| 699 | * in case of failure. | ||
| 700 | */ | ||
| 701 | int ubi_register_volume_notifier(struct notifier_block *nb, | ||
| 702 | int ignore_existing) | ||
| 703 | { | ||
| 704 | int err; | ||
| 705 | |||
| 706 | err = blocking_notifier_chain_register(&ubi_notifiers, nb); | ||
| 707 | if (err != 0) | ||
| 708 | return err; | ||
| 709 | if (ignore_existing) | ||
| 710 | return 0; | ||
| 711 | |||
| 712 | /* | ||
| 713 | * We are going to walk all UBI devices and all volumes, and | ||
| 714 | * notify the user about existing volumes by the %UBI_VOLUME_ADDED | ||
| 715 | * event. We have to lock the @ubi_devices_mutex to make sure UBI | ||
| 716 | * devices do not disappear. | ||
| 717 | */ | ||
| 718 | mutex_lock(&ubi_devices_mutex); | ||
| 719 | ubi_enumerate_volumes(nb); | ||
| 720 | mutex_unlock(&ubi_devices_mutex); | ||
| 721 | |||
| 722 | return err; | ||
| 723 | } | ||
| 724 | EXPORT_SYMBOL_GPL(ubi_register_volume_notifier); | ||
| 725 | |||
| 726 | /** | ||
| 727 | * ubi_unregister_volume_notifier - unregister the volume notifier. | ||
| 728 | * @nb: the notifier description object | ||
| 729 | * | ||
| 730 | * This function unregisters volume notifier @nm and returns zero in case of | ||
| 731 | * success and a negative error code in case of failure. | ||
| 732 | */ | ||
| 733 | int ubi_unregister_volume_notifier(struct notifier_block *nb) | ||
| 734 | { | ||
| 735 | return blocking_notifier_chain_unregister(&ubi_notifiers, nb); | ||
| 736 | } | ||
| 737 | EXPORT_SYMBOL_GPL(ubi_unregister_volume_notifier); | ||
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h index c055511bb1b2..28acd133c997 100644 --- a/drivers/mtd/ubi/ubi.h +++ b/drivers/mtd/ubi/ubi.h | |||
| @@ -36,6 +36,7 @@ | |||
| 36 | #include <linux/device.h> | 36 | #include <linux/device.h> |
| 37 | #include <linux/string.h> | 37 | #include <linux/string.h> |
| 38 | #include <linux/vmalloc.h> | 38 | #include <linux/vmalloc.h> |
| 39 | #include <linux/notifier.h> | ||
| 39 | #include <linux/mtd/mtd.h> | 40 | #include <linux/mtd/mtd.h> |
| 40 | #include <linux/mtd/ubi.h> | 41 | #include <linux/mtd/ubi.h> |
| 41 | 42 | ||
| @@ -100,6 +101,28 @@ enum { | |||
| 100 | UBI_IO_BITFLIPS | 101 | UBI_IO_BITFLIPS |
| 101 | }; | 102 | }; |
| 102 | 103 | ||
| 104 | /* | ||
| 105 | * Return codes of the 'ubi_eba_copy_leb()' function. | ||
| 106 | * | ||
| 107 | * MOVE_CANCEL_RACE: canceled because the volume is being deleted, the source | ||
| 108 | * PEB was put meanwhile, or there is I/O on the source PEB | ||
| 109 | * MOVE_SOURCE_RD_ERR: canceled because there was a read error from the source | ||
| 110 | * PEB | ||
| 111 | * MOVE_TARGET_RD_ERR: canceled because there was a read error from the target | ||
| 112 | * PEB | ||
| 113 | * MOVE_TARGET_WR_ERR: canceled because there was a write error to the target | ||
| 114 | * PEB | ||
| 115 | * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the | ||
| 116 | * target PEB | ||
| 117 | */ | ||
| 118 | enum { | ||
| 119 | MOVE_CANCEL_RACE = 1, | ||
| 120 | MOVE_SOURCE_RD_ERR, | ||
| 121 | MOVE_TARGET_RD_ERR, | ||
| 122 | MOVE_TARGET_WR_ERR, | ||
| 123 | MOVE_CANCEL_BITFLIPS, | ||
| 124 | }; | ||
| 125 | |||
| 103 | /** | 126 | /** |
| 104 | * struct ubi_wl_entry - wear-leveling entry. | 127 | * struct ubi_wl_entry - wear-leveling entry. |
| 105 | * @u.rb: link in the corresponding (free/used) RB-tree | 128 | * @u.rb: link in the corresponding (free/used) RB-tree |
| @@ -208,10 +231,6 @@ struct ubi_volume_desc; | |||
| 208 | * @changing_leb: %1 if the atomic LEB change ioctl command is in progress | 231 | * @changing_leb: %1 if the atomic LEB change ioctl command is in progress |
| 209 | * @direct_writes: %1 if direct writes are enabled for this volume | 232 | * @direct_writes: %1 if direct writes are enabled for this volume |
| 210 | * | 233 | * |
| 211 | * @gluebi_desc: gluebi UBI volume descriptor | ||
| 212 | * @gluebi_refcount: reference count of the gluebi MTD device | ||
| 213 | * @gluebi_mtd: MTD device description object of the gluebi MTD device | ||
| 214 | * | ||
| 215 | * The @corrupted field indicates that the volume's contents is corrupted. | 234 | * The @corrupted field indicates that the volume's contents is corrupted. |
| 216 | * Since UBI protects only static volumes, this field is not relevant to | 235 | * Since UBI protects only static volumes, this field is not relevant to |
| 217 | * dynamic volumes - it is user's responsibility to assure their data | 236 | * dynamic volumes - it is user's responsibility to assure their data |
| @@ -255,17 +274,6 @@ struct ubi_volume { | |||
| 255 | unsigned int updating:1; | 274 | unsigned int updating:1; |
| 256 | unsigned int changing_leb:1; | 275 | unsigned int changing_leb:1; |
| 257 | unsigned int direct_writes:1; | 276 | unsigned int direct_writes:1; |
| 258 | |||
| 259 | #ifdef CONFIG_MTD_UBI_GLUEBI | ||
| 260 | /* | ||
| 261 | * Gluebi-related stuff may be compiled out. | ||
| 262 | * Note: this should not be built into UBI but should be a separate | ||
| 263 | * ubimtd driver which works on top of UBI and emulates MTD devices. | ||
| 264 | */ | ||
| 265 | struct ubi_volume_desc *gluebi_desc; | ||
| 266 | int gluebi_refcount; | ||
| 267 | struct mtd_info gluebi_mtd; | ||
| 268 | #endif | ||
| 269 | }; | 277 | }; |
| 270 | 278 | ||
| 271 | /** | 279 | /** |
| @@ -305,9 +313,9 @@ struct ubi_wl_entry; | |||
| 305 | * @vtbl_slots: how many slots are available in the volume table | 313 | * @vtbl_slots: how many slots are available in the volume table |
| 306 | * @vtbl_size: size of the volume table in bytes | 314 | * @vtbl_size: size of the volume table in bytes |
| 307 | * @vtbl: in-RAM volume table copy | 315 | * @vtbl: in-RAM volume table copy |
| 308 | * @volumes_mutex: protects on-flash volume table and serializes volume | 316 | * @device_mutex: protects on-flash volume table and serializes volume |
| 309 | * changes, like creation, deletion, update, re-size, | 317 | * creation, deletion, update, re-size, re-name and set |
| 310 | * re-name and set property | 318 | * property |
| 311 | * | 319 | * |
| 312 | * @max_ec: current highest erase counter value | 320 | * @max_ec: current highest erase counter value |
| 313 | * @mean_ec: current mean erase counter value | 321 | * @mean_ec: current mean erase counter value |
| @@ -318,14 +326,15 @@ struct ubi_wl_entry; | |||
| 318 | * @alc_mutex: serializes "atomic LEB change" operations | 326 | * @alc_mutex: serializes "atomic LEB change" operations |
| 319 | * | 327 | * |
| 320 | * @used: RB-tree of used physical eraseblocks | 328 | * @used: RB-tree of used physical eraseblocks |
| 329 | * @erroneous: RB-tree of erroneous used physical eraseblocks | ||
| 321 | * @free: RB-tree of free physical eraseblocks | 330 | * @free: RB-tree of free physical eraseblocks |
| 322 | * @scrub: RB-tree of physical eraseblocks which need scrubbing | 331 | * @scrub: RB-tree of physical eraseblocks which need scrubbing |
| 323 | * @pq: protection queue (contain physical eraseblocks which are temporarily | 332 | * @pq: protection queue (contain physical eraseblocks which are temporarily |
| 324 | * protected from the wear-leveling worker) | 333 | * protected from the wear-leveling worker) |
| 325 | * @pq_head: protection queue head | 334 | * @pq_head: protection queue head |
| 326 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, | 335 | * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from, |
| 327 | * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works | 336 | * @move_to, @move_to_put @erase_pending, @wl_scheduled, @works, |
| 328 | * fields | 337 | * @erroneous, and @erroneous_peb_count fields |
| 329 | * @move_mutex: serializes eraseblock moves | 338 | * @move_mutex: serializes eraseblock moves |
| 330 | * @work_sem: synchronizes the WL worker with use tasks | 339 | * @work_sem: synchronizes the WL worker with use tasks |
| 331 | * @wl_scheduled: non-zero if the wear-leveling was scheduled | 340 | * @wl_scheduled: non-zero if the wear-leveling was scheduled |
| @@ -339,12 +348,15 @@ struct ubi_wl_entry; | |||
| 339 | * @bgt_thread: background thread description object | 348 | * @bgt_thread: background thread description object |
| 340 | * @thread_enabled: if the background thread is enabled | 349 | * @thread_enabled: if the background thread is enabled |
| 341 | * @bgt_name: background thread name | 350 | * @bgt_name: background thread name |
| 351 | * @reboot_notifier: notifier to terminate background thread before rebooting | ||
| 342 | * | 352 | * |
| 343 | * @flash_size: underlying MTD device size (in bytes) | 353 | * @flash_size: underlying MTD device size (in bytes) |
| 344 | * @peb_count: count of physical eraseblocks on the MTD device | 354 | * @peb_count: count of physical eraseblocks on the MTD device |
| 345 | * @peb_size: physical eraseblock size | 355 | * @peb_size: physical eraseblock size |
| 346 | * @bad_peb_count: count of bad physical eraseblocks | 356 | * @bad_peb_count: count of bad physical eraseblocks |
| 347 | * @good_peb_count: count of good physical eraseblocks | 357 | * @good_peb_count: count of good physical eraseblocks |
| 358 | * @erroneous_peb_count: count of erroneous physical eraseblocks in @erroneous | ||
| 359 | * @max_erroneous: maximum allowed amount of erroneous physical eraseblocks | ||
| 348 | * @min_io_size: minimal input/output unit size of the underlying MTD device | 360 | * @min_io_size: minimal input/output unit size of the underlying MTD device |
| 349 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers | 361 | * @hdrs_min_io_size: minimal I/O unit size used for VID and EC headers |
| 350 | * @ro_mode: if the UBI device is in read-only mode | 362 | * @ro_mode: if the UBI device is in read-only mode |
| @@ -366,7 +378,6 @@ struct ubi_wl_entry; | |||
| 366 | * @peb_buf2: another buffer of PEB size used for different purposes | 378 | * @peb_buf2: another buffer of PEB size used for different purposes |
| 367 | * @buf_mutex: protects @peb_buf1 and @peb_buf2 | 379 | * @buf_mutex: protects @peb_buf1 and @peb_buf2 |
| 368 | * @ckvol_mutex: serializes static volume checking when opening | 380 | * @ckvol_mutex: serializes static volume checking when opening |
| 369 | * @mult_mutex: serializes operations on multiple volumes, like re-naming | ||
| 370 | * @dbg_peb_buf: buffer of PEB size used for debugging | 381 | * @dbg_peb_buf: buffer of PEB size used for debugging |
| 371 | * @dbg_buf_mutex: protects @dbg_peb_buf | 382 | * @dbg_buf_mutex: protects @dbg_peb_buf |
| 372 | */ | 383 | */ |
| @@ -389,7 +400,7 @@ struct ubi_device { | |||
| 389 | int vtbl_slots; | 400 | int vtbl_slots; |
| 390 | int vtbl_size; | 401 | int vtbl_size; |
| 391 | struct ubi_vtbl_record *vtbl; | 402 | struct ubi_vtbl_record *vtbl; |
| 392 | struct mutex volumes_mutex; | 403 | struct mutex device_mutex; |
| 393 | 404 | ||
| 394 | int max_ec; | 405 | int max_ec; |
| 395 | /* Note, mean_ec is not updated run-time - should be fixed */ | 406 | /* Note, mean_ec is not updated run-time - should be fixed */ |
| @@ -403,6 +414,7 @@ struct ubi_device { | |||
| 403 | 414 | ||
| 404 | /* Wear-leveling sub-system's stuff */ | 415 | /* Wear-leveling sub-system's stuff */ |
| 405 | struct rb_root used; | 416 | struct rb_root used; |
| 417 | struct rb_root erroneous; | ||
| 406 | struct rb_root free; | 418 | struct rb_root free; |
| 407 | struct rb_root scrub; | 419 | struct rb_root scrub; |
| 408 | struct list_head pq[UBI_PROT_QUEUE_LEN]; | 420 | struct list_head pq[UBI_PROT_QUEUE_LEN]; |
| @@ -420,6 +432,7 @@ struct ubi_device { | |||
| 420 | struct task_struct *bgt_thread; | 432 | struct task_struct *bgt_thread; |
| 421 | int thread_enabled; | 433 | int thread_enabled; |
| 422 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; | 434 | char bgt_name[sizeof(UBI_BGT_NAME_PATTERN)+2]; |
| 435 | struct notifier_block reboot_notifier; | ||
| 423 | 436 | ||
| 424 | /* I/O sub-system's stuff */ | 437 | /* I/O sub-system's stuff */ |
| 425 | long long flash_size; | 438 | long long flash_size; |
| @@ -427,6 +440,8 @@ struct ubi_device { | |||
| 427 | int peb_size; | 440 | int peb_size; |
| 428 | int bad_peb_count; | 441 | int bad_peb_count; |
| 429 | int good_peb_count; | 442 | int good_peb_count; |
| 443 | int erroneous_peb_count; | ||
| 444 | int max_erroneous; | ||
| 430 | int min_io_size; | 445 | int min_io_size; |
| 431 | int hdrs_min_io_size; | 446 | int hdrs_min_io_size; |
| 432 | int ro_mode; | 447 | int ro_mode; |
| @@ -444,8 +459,7 @@ struct ubi_device { | |||
| 444 | void *peb_buf2; | 459 | void *peb_buf2; |
| 445 | struct mutex buf_mutex; | 460 | struct mutex buf_mutex; |
| 446 | struct mutex ckvol_mutex; | 461 | struct mutex ckvol_mutex; |
| 447 | struct mutex mult_mutex; | 462 | #ifdef CONFIG_MTD_UBI_DEBUG_PARANOID |
| 448 | #ifdef CONFIG_MTD_UBI_DEBUG | ||
| 449 | void *dbg_peb_buf; | 463 | void *dbg_peb_buf; |
| 450 | struct mutex dbg_buf_mutex; | 464 | struct mutex dbg_buf_mutex; |
| 451 | #endif | 465 | #endif |
| @@ -457,6 +471,7 @@ extern const struct file_operations ubi_cdev_operations; | |||
| 457 | extern const struct file_operations ubi_vol_cdev_operations; | 471 | extern const struct file_operations ubi_vol_cdev_operations; |
| 458 | extern struct class *ubi_class; | 472 | extern struct class *ubi_class; |
| 459 | extern struct mutex ubi_devices_mutex; | 473 | extern struct mutex ubi_devices_mutex; |
| 474 | extern struct blocking_notifier_head ubi_notifiers; | ||
| 460 | 475 | ||
| 461 | /* vtbl.c */ | 476 | /* vtbl.c */ |
| 462 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, | 477 | int ubi_change_vtbl_record(struct ubi_device *ubi, int idx, |
| @@ -489,17 +504,6 @@ int ubi_calc_data_len(const struct ubi_device *ubi, const void *buf, | |||
| 489 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); | 504 | int ubi_check_volume(struct ubi_device *ubi, int vol_id); |
| 490 | void ubi_calculate_reserved(struct ubi_device *ubi); | 505 | void ubi_calculate_reserved(struct ubi_device *ubi); |
| 491 | 506 | ||
| 492 | /* gluebi.c */ | ||
| 493 | #ifdef CONFIG_MTD_UBI_GLUEBI | ||
| 494 | int ubi_create_gluebi(struct ubi_device *ubi, struct ubi_volume *vol); | ||
| 495 | int ubi_destroy_gluebi(struct ubi_volume *vol); | ||
| 496 | void ubi_gluebi_updated(struct ubi_volume *vol); | ||
| 497 | #else | ||
| 498 | #define ubi_create_gluebi(ubi, vol) 0 | ||
| 499 | #define ubi_destroy_gluebi(vol) 0 | ||
| 500 | #define ubi_gluebi_updated(vol) | ||
| 501 | #endif | ||
| 502 | |||
| 503 | /* eba.c */ | 507 | /* eba.c */ |
| 504 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, | 508 | int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, |
| 505 | int lnum); | 509 | int lnum); |
| @@ -549,6 +553,16 @@ struct ubi_device *ubi_get_device(int ubi_num); | |||
| 549 | void ubi_put_device(struct ubi_device *ubi); | 553 | void ubi_put_device(struct ubi_device *ubi); |
| 550 | struct ubi_device *ubi_get_by_major(int major); | 554 | struct ubi_device *ubi_get_by_major(int major); |
| 551 | int ubi_major2num(int major); | 555 | int ubi_major2num(int major); |
| 556 | int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, | ||
| 557 | int ntype); | ||
| 558 | int ubi_notify_all(struct ubi_device *ubi, int ntype, | ||
| 559 | struct notifier_block *nb); | ||
| 560 | int ubi_enumerate_volumes(struct notifier_block *nb); | ||
| 561 | |||
| 562 | /* kapi.c */ | ||
| 563 | void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di); | ||
| 564 | void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol, | ||
| 565 | struct ubi_volume_info *vi); | ||
| 552 | 566 | ||
| 553 | /* | 567 | /* |
| 554 | * ubi_rb_for_each_entry - walk an RB-tree. | 568 | * ubi_rb_for_each_entry - walk an RB-tree. |
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c index 6b4d1ae891ae..74fdc40c8627 100644 --- a/drivers/mtd/ubi/upd.c +++ b/drivers/mtd/ubi/upd.c | |||
| @@ -68,10 +68,10 @@ static int set_update_marker(struct ubi_device *ubi, struct ubi_volume *vol) | |||
| 68 | sizeof(struct ubi_vtbl_record)); | 68 | sizeof(struct ubi_vtbl_record)); |
| 69 | vtbl_rec.upd_marker = 1; | 69 | vtbl_rec.upd_marker = 1; |
| 70 | 70 | ||
| 71 | mutex_lock(&ubi->volumes_mutex); | 71 | mutex_lock(&ubi->device_mutex); |
| 72 | err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); | 72 | err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); |
| 73 | mutex_unlock(&ubi->volumes_mutex); | ||
| 74 | vol->upd_marker = 1; | 73 | vol->upd_marker = 1; |
| 74 | mutex_unlock(&ubi->device_mutex); | ||
| 75 | return err; | 75 | return err; |
| 76 | } | 76 | } |
| 77 | 77 | ||
| @@ -109,10 +109,10 @@ static int clear_update_marker(struct ubi_device *ubi, struct ubi_volume *vol, | |||
| 109 | vol->last_eb_bytes = vol->usable_leb_size; | 109 | vol->last_eb_bytes = vol->usable_leb_size; |
| 110 | } | 110 | } |
| 111 | 111 | ||
| 112 | mutex_lock(&ubi->volumes_mutex); | 112 | mutex_lock(&ubi->device_mutex); |
| 113 | err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); | 113 | err = ubi_change_vtbl_record(ubi, vol->vol_id, &vtbl_rec); |
| 114 | mutex_unlock(&ubi->volumes_mutex); | ||
| 115 | vol->upd_marker = 0; | 114 | vol->upd_marker = 0; |
| 115 | mutex_unlock(&ubi->device_mutex); | ||
| 116 | return err; | 116 | return err; |
| 117 | } | 117 | } |
| 118 | 118 | ||
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c index df5483562b7a..ab64cb56df6e 100644 --- a/drivers/mtd/ubi/vmt.c +++ b/drivers/mtd/ubi/vmt.c | |||
| @@ -198,7 +198,7 @@ static void volume_sysfs_close(struct ubi_volume *vol) | |||
| 198 | * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume | 198 | * %UBI_VOL_NUM_AUTO, this function automatically assign ID to the new volume |
| 199 | * and saves it in @req->vol_id. Returns zero in case of success and a negative | 199 | * and saves it in @req->vol_id. Returns zero in case of success and a negative |
| 200 | * error code in case of failure. Note, the caller has to have the | 200 | * error code in case of failure. Note, the caller has to have the |
| 201 | * @ubi->volumes_mutex locked. | 201 | * @ubi->device_mutex locked. |
| 202 | */ | 202 | */ |
| 203 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | 203 | int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) |
| 204 | { | 204 | { |
| @@ -232,8 +232,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
| 232 | req->vol_id = vol_id; | 232 | req->vol_id = vol_id; |
| 233 | } | 233 | } |
| 234 | 234 | ||
| 235 | dbg_gen("volume ID %d, %llu bytes, type %d, name %s", | 235 | dbg_gen("create device %d, volume %d, %llu bytes, type %d, name %s", |
| 236 | vol_id, (unsigned long long)req->bytes, | 236 | ubi->ubi_num, vol_id, (unsigned long long)req->bytes, |
| 237 | (int)req->vol_type, req->name); | 237 | (int)req->vol_type, req->name); |
| 238 | 238 | ||
| 239 | /* Ensure that this volume does not exist */ | 239 | /* Ensure that this volume does not exist */ |
| @@ -317,10 +317,6 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
| 317 | goto out_mapping; | 317 | goto out_mapping; |
| 318 | } | 318 | } |
| 319 | 319 | ||
| 320 | err = ubi_create_gluebi(ubi, vol); | ||
| 321 | if (err) | ||
| 322 | goto out_cdev; | ||
| 323 | |||
| 324 | vol->dev.release = vol_release; | 320 | vol->dev.release = vol_release; |
| 325 | vol->dev.parent = &ubi->dev; | 321 | vol->dev.parent = &ubi->dev; |
| 326 | vol->dev.devt = dev; | 322 | vol->dev.devt = dev; |
| @@ -330,7 +326,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
| 330 | err = device_register(&vol->dev); | 326 | err = device_register(&vol->dev); |
| 331 | if (err) { | 327 | if (err) { |
| 332 | ubi_err("cannot register device"); | 328 | ubi_err("cannot register device"); |
| 333 | goto out_gluebi; | 329 | goto out_cdev; |
| 334 | } | 330 | } |
| 335 | 331 | ||
| 336 | err = volume_sysfs_init(ubi, vol); | 332 | err = volume_sysfs_init(ubi, vol); |
| @@ -358,7 +354,9 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req) | |||
| 358 | ubi->vol_count += 1; | 354 | ubi->vol_count += 1; |
| 359 | spin_unlock(&ubi->volumes_lock); | 355 | spin_unlock(&ubi->volumes_lock); |
| 360 | 356 | ||
| 361 | err = paranoid_check_volumes(ubi); | 357 | ubi_volume_notify(ubi, vol, UBI_VOLUME_ADDED); |
| 358 | if (paranoid_check_volumes(ubi)) | ||
| 359 | dbg_err("check failed while creating volume %d", vol_id); | ||
| 362 | return err; | 360 | return err; |
| 363 | 361 | ||
| 364 | out_sysfs: | 362 | out_sysfs: |
| @@ -373,10 +371,6 @@ out_sysfs: | |||
| 373 | do_free = 0; | 371 | do_free = 0; |
| 374 | get_device(&vol->dev); | 372 | get_device(&vol->dev); |
| 375 | volume_sysfs_close(vol); | 373 | volume_sysfs_close(vol); |
| 376 | out_gluebi: | ||
| 377 | if (ubi_destroy_gluebi(vol)) | ||
| 378 | dbg_err("cannot destroy gluebi for volume %d:%d", | ||
| 379 | ubi->ubi_num, vol_id); | ||
| 380 | out_cdev: | 374 | out_cdev: |
| 381 | cdev_del(&vol->cdev); | 375 | cdev_del(&vol->cdev); |
| 382 | out_mapping: | 376 | out_mapping: |
| @@ -403,7 +397,7 @@ out_unlock: | |||
| 403 | * | 397 | * |
| 404 | * This function removes volume described by @desc. The volume has to be opened | 398 | * This function removes volume described by @desc. The volume has to be opened |
| 405 | * in "exclusive" mode. Returns zero in case of success and a negative error | 399 | * in "exclusive" mode. Returns zero in case of success and a negative error |
| 406 | * code in case of failure. The caller has to have the @ubi->volumes_mutex | 400 | * code in case of failure. The caller has to have the @ubi->device_mutex |
| 407 | * locked. | 401 | * locked. |
| 408 | */ | 402 | */ |
| 409 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) | 403 | int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) |
| @@ -412,7 +406,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) | |||
| 412 | struct ubi_device *ubi = vol->ubi; | 406 | struct ubi_device *ubi = vol->ubi; |
| 413 | int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; | 407 | int i, err, vol_id = vol->vol_id, reserved_pebs = vol->reserved_pebs; |
| 414 | 408 | ||
| 415 | dbg_gen("remove UBI volume %d", vol_id); | 409 | dbg_gen("remove device %d, volume %d", ubi->ubi_num, vol_id); |
| 416 | ubi_assert(desc->mode == UBI_EXCLUSIVE); | 410 | ubi_assert(desc->mode == UBI_EXCLUSIVE); |
| 417 | ubi_assert(vol == ubi->volumes[vol_id]); | 411 | ubi_assert(vol == ubi->volumes[vol_id]); |
| 418 | 412 | ||
| @@ -431,10 +425,6 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) | |||
| 431 | ubi->volumes[vol_id] = NULL; | 425 | ubi->volumes[vol_id] = NULL; |
| 432 | spin_unlock(&ubi->volumes_lock); | 426 | spin_unlock(&ubi->volumes_lock); |
| 433 | 427 | ||
| 434 | err = ubi_destroy_gluebi(vol); | ||
| 435 | if (err) | ||
| 436 | goto out_err; | ||
| 437 | |||
| 438 | if (!no_vtbl) { | 428 | if (!no_vtbl) { |
| 439 | err = ubi_change_vtbl_record(ubi, vol_id, NULL); | 429 | err = ubi_change_vtbl_record(ubi, vol_id, NULL); |
| 440 | if (err) | 430 | if (err) |
| @@ -465,8 +455,10 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl) | |||
| 465 | ubi->vol_count -= 1; | 455 | ubi->vol_count -= 1; |
| 466 | spin_unlock(&ubi->volumes_lock); | 456 | spin_unlock(&ubi->volumes_lock); |
| 467 | 457 | ||
| 468 | if (!no_vtbl) | 458 | ubi_volume_notify(ubi, vol, UBI_VOLUME_REMOVED); |
| 469 | err = paranoid_check_volumes(ubi); | 459 | if (!no_vtbl && paranoid_check_volumes(ubi)) |
| 460 | dbg_err("check failed while removing volume %d", vol_id); | ||
| 461 | |||
| 470 | return err; | 462 | return err; |
| 471 | 463 | ||
| 472 | out_err: | 464 | out_err: |
| @@ -485,7 +477,7 @@ out_unlock: | |||
| 485 | * | 477 | * |
| 486 | * This function re-sizes the volume and returns zero in case of success, and a | 478 | * This function re-sizes the volume and returns zero in case of success, and a |
| 487 | * negative error code in case of failure. The caller has to have the | 479 | * negative error code in case of failure. The caller has to have the |
| 488 | * @ubi->volumes_mutex locked. | 480 | * @ubi->device_mutex locked. |
| 489 | */ | 481 | */ |
| 490 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | 482 | int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) |
| 491 | { | 483 | { |
| @@ -498,8 +490,8 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
| 498 | if (ubi->ro_mode) | 490 | if (ubi->ro_mode) |
| 499 | return -EROFS; | 491 | return -EROFS; |
| 500 | 492 | ||
| 501 | dbg_gen("re-size volume %d to from %d to %d PEBs", | 493 | dbg_gen("re-size device %d, volume %d to from %d to %d PEBs", |
| 502 | vol_id, vol->reserved_pebs, reserved_pebs); | 494 | ubi->ubi_num, vol_id, vol->reserved_pebs, reserved_pebs); |
| 503 | 495 | ||
| 504 | if (vol->vol_type == UBI_STATIC_VOLUME && | 496 | if (vol->vol_type == UBI_STATIC_VOLUME && |
| 505 | reserved_pebs < vol->used_ebs) { | 497 | reserved_pebs < vol->used_ebs) { |
| @@ -587,7 +579,9 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs) | |||
| 587 | (long long)vol->used_ebs * vol->usable_leb_size; | 579 | (long long)vol->used_ebs * vol->usable_leb_size; |
| 588 | } | 580 | } |
| 589 | 581 | ||
| 590 | err = paranoid_check_volumes(ubi); | 582 | ubi_volume_notify(ubi, vol, UBI_VOLUME_RESIZED); |
| 583 | if (paranoid_check_volumes(ubi)) | ||
| 584 | dbg_err("check failed while re-sizing volume %d", vol_id); | ||
| 591 | return err; | 585 | return err; |
| 592 | 586 | ||
| 593 | out_acc: | 587 | out_acc: |
| @@ -632,11 +626,12 @@ int ubi_rename_volumes(struct ubi_device *ubi, struct list_head *rename_list) | |||
| 632 | vol->name_len = re->new_name_len; | 626 | vol->name_len = re->new_name_len; |
| 633 | memcpy(vol->name, re->new_name, re->new_name_len + 1); | 627 | memcpy(vol->name, re->new_name, re->new_name_len + 1); |
| 634 | spin_unlock(&ubi->volumes_lock); | 628 | spin_unlock(&ubi->volumes_lock); |
| 629 | ubi_volume_notify(ubi, vol, UBI_VOLUME_RENAMED); | ||
| 635 | } | 630 | } |
| 636 | } | 631 | } |
| 637 | 632 | ||
| 638 | if (!err) | 633 | if (!err && paranoid_check_volumes(ubi)) |
| 639 | err = paranoid_check_volumes(ubi); | 634 | ; |
| 640 | return err; | 635 | return err; |
| 641 | } | 636 | } |
| 642 | 637 | ||
| @@ -667,10 +662,6 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
| 667 | return err; | 662 | return err; |
| 668 | } | 663 | } |
| 669 | 664 | ||
| 670 | err = ubi_create_gluebi(ubi, vol); | ||
| 671 | if (err) | ||
| 672 | goto out_cdev; | ||
| 673 | |||
| 674 | vol->dev.release = vol_release; | 665 | vol->dev.release = vol_release; |
| 675 | vol->dev.parent = &ubi->dev; | 666 | vol->dev.parent = &ubi->dev; |
| 676 | vol->dev.devt = dev; | 667 | vol->dev.devt = dev; |
| @@ -678,21 +669,19 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol) | |||
| 678 | dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); | 669 | dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); |
| 679 | err = device_register(&vol->dev); | 670 | err = device_register(&vol->dev); |
| 680 | if (err) | 671 | if (err) |
| 681 | goto out_gluebi; | 672 | goto out_cdev; |
| 682 | 673 | ||
| 683 | err = volume_sysfs_init(ubi, vol); | 674 | err = volume_sysfs_init(ubi, vol); |
| 684 | if (err) { | 675 | if (err) { |
| 685 | cdev_del(&vol->cdev); | 676 | cdev_del(&vol->cdev); |
| 686 | err = ubi_destroy_gluebi(vol); | ||
| 687 | volume_sysfs_close(vol); | 677 | volume_sysfs_close(vol); |
| 688 | return err; | 678 | return err; |
| 689 | } | 679 | } |
| 690 | 680 | ||
| 691 | err = paranoid_check_volumes(ubi); | 681 | if (paranoid_check_volumes(ubi)) |
| 682 | dbg_err("check failed while adding volume %d", vol_id); | ||
| 692 | return err; | 683 | return err; |
| 693 | 684 | ||
| 694 | out_gluebi: | ||
| 695 | err = ubi_destroy_gluebi(vol); | ||
| 696 | out_cdev: | 685 | out_cdev: |
| 697 | cdev_del(&vol->cdev); | 686 | cdev_del(&vol->cdev); |
| 698 | return err; | 687 | return err; |
| @@ -708,12 +697,9 @@ out_cdev: | |||
| 708 | */ | 697 | */ |
| 709 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) | 698 | void ubi_free_volume(struct ubi_device *ubi, struct ubi_volume *vol) |
| 710 | { | 699 | { |
| 711 | int err; | ||
| 712 | |||
| 713 | dbg_gen("free volume %d", vol->vol_id); | 700 | dbg_gen("free volume %d", vol->vol_id); |
| 714 | 701 | ||
| 715 | ubi->volumes[vol->vol_id] = NULL; | 702 | ubi->volumes[vol->vol_id] = NULL; |
| 716 | err = ubi_destroy_gluebi(vol); | ||
| 717 | cdev_del(&vol->cdev); | 703 | cdev_del(&vol->cdev); |
| 718 | volume_sysfs_close(vol); | 704 | volume_sysfs_close(vol); |
| 719 | } | 705 | } |
| @@ -868,6 +854,7 @@ fail: | |||
| 868 | if (vol) | 854 | if (vol) |
| 869 | ubi_dbg_dump_vol_info(vol); | 855 | ubi_dbg_dump_vol_info(vol); |
| 870 | ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); | 856 | ubi_dbg_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); |
| 857 | dump_stack(); | ||
| 871 | spin_unlock(&ubi->volumes_lock); | 858 | spin_unlock(&ubi->volumes_lock); |
| 872 | return -EINVAL; | 859 | return -EINVAL; |
| 873 | } | 860 | } |
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 891534f8210d..2b2472300610 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
| @@ -55,8 +55,8 @@ | |||
| 55 | * | 55 | * |
| 56 | * As it was said, for the UBI sub-system all physical eraseblocks are either | 56 | * As it was said, for the UBI sub-system all physical eraseblocks are either |
| 57 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while | 57 | * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while |
| 58 | * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or | 58 | * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub |
| 59 | * (temporarily) in the @wl->pq queue. | 59 | * RB-trees, as well as (temporarily) in the @wl->pq queue. |
| 60 | * | 60 | * |
| 61 | * When the WL sub-system returns a physical eraseblock, the physical | 61 | * When the WL sub-system returns a physical eraseblock, the physical |
| 62 | * eraseblock is protected from being moved for some "time". For this reason, | 62 | * eraseblock is protected from being moved for some "time". For this reason, |
| @@ -83,6 +83,8 @@ | |||
| 83 | * used. The former state corresponds to the @wl->free tree. The latter state | 83 | * used. The former state corresponds to the @wl->free tree. The latter state |
| 84 | * is split up on several sub-states: | 84 | * is split up on several sub-states: |
| 85 | * o the WL movement is allowed (@wl->used tree); | 85 | * o the WL movement is allowed (@wl->used tree); |
| 86 | * o the WL movement is disallowed (@wl->erroneous) because the PEB is | ||
| 87 | * erroneous - e.g., there was a read error; | ||
| 86 | * o the WL movement is temporarily prohibited (@wl->pq queue); | 88 | * o the WL movement is temporarily prohibited (@wl->pq queue); |
| 87 | * o scrubbing is needed (@wl->scrub tree). | 89 | * o scrubbing is needed (@wl->scrub tree). |
| 88 | * | 90 | * |
| @@ -653,7 +655,8 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
| 653 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | 655 | static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, |
| 654 | int cancel) | 656 | int cancel) |
| 655 | { | 657 | { |
| 656 | int err, scrubbing = 0, torture = 0; | 658 | int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0; |
| 659 | int vol_id = -1, uninitialized_var(lnum); | ||
| 657 | struct ubi_wl_entry *e1, *e2; | 660 | struct ubi_wl_entry *e1, *e2; |
| 658 | struct ubi_vid_hdr *vid_hdr; | 661 | struct ubi_vid_hdr *vid_hdr; |
| 659 | 662 | ||
| @@ -738,68 +741,78 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
| 738 | /* | 741 | /* |
| 739 | * We are trying to move PEB without a VID header. UBI | 742 | * We are trying to move PEB without a VID header. UBI |
| 740 | * always write VID headers shortly after the PEB was | 743 | * always write VID headers shortly after the PEB was |
| 741 | * given, so we have a situation when it did not have | 744 | * given, so we have a situation when it has not yet |
| 742 | * chance to write it down because it was preempted. | 745 | * had a chance to write it, because it was preempted. |
| 743 | * Just re-schedule the work, so that next time it will | 746 | * So add this PEB to the protection queue so far, |
| 744 | * likely have the VID header in place. | 747 | * because presumably more data will be written there |
| 748 | * (including the missing VID header), and then we'll | ||
| 749 | * move it. | ||
| 745 | */ | 750 | */ |
| 746 | dbg_wl("PEB %d has no VID header", e1->pnum); | 751 | dbg_wl("PEB %d has no VID header", e1->pnum); |
| 752 | protect = 1; | ||
| 747 | goto out_not_moved; | 753 | goto out_not_moved; |
| 748 | } | 754 | } |
| 749 | 755 | ||
| 750 | ubi_err("error %d while reading VID header from PEB %d", | 756 | ubi_err("error %d while reading VID header from PEB %d", |
| 751 | err, e1->pnum); | 757 | err, e1->pnum); |
| 752 | if (err > 0) | ||
| 753 | err = -EIO; | ||
| 754 | goto out_error; | 758 | goto out_error; |
| 755 | } | 759 | } |
| 756 | 760 | ||
| 761 | vol_id = be32_to_cpu(vid_hdr->vol_id); | ||
| 762 | lnum = be32_to_cpu(vid_hdr->lnum); | ||
| 763 | |||
| 757 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); | 764 | err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); |
| 758 | if (err) { | 765 | if (err) { |
| 759 | if (err == -EAGAIN) | 766 | if (err == MOVE_CANCEL_RACE) { |
| 767 | /* | ||
| 768 | * The LEB has not been moved because the volume is | ||
| 769 | * being deleted or the PEB has been put meanwhile. We | ||
| 770 | * should prevent this PEB from being selected for | ||
| 771 | * wear-leveling movement again, so put it to the | ||
| 772 | * protection queue. | ||
| 773 | */ | ||
| 774 | protect = 1; | ||
| 760 | goto out_not_moved; | 775 | goto out_not_moved; |
| 761 | if (err < 0) | 776 | } |
| 762 | goto out_error; | 777 | |
| 763 | if (err == 2) { | 778 | if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || |
| 764 | /* Target PEB write error, torture it */ | 779 | err == MOVE_TARGET_RD_ERR) { |
| 780 | /* | ||
| 781 | * Target PEB had bit-flips or write error - torture it. | ||
| 782 | */ | ||
| 765 | torture = 1; | 783 | torture = 1; |
| 766 | goto out_not_moved; | 784 | goto out_not_moved; |
| 767 | } | 785 | } |
| 768 | 786 | ||
| 769 | /* | 787 | if (err == MOVE_SOURCE_RD_ERR) { |
| 770 | * The LEB has not been moved because the volume is being | 788 | /* |
| 771 | * deleted or the PEB has been put meanwhile. We should prevent | 789 | * An error happened while reading the source PEB. Do |
| 772 | * this PEB from being selected for wear-leveling movement | 790 | * not switch to R/O mode in this case, and give the |
| 773 | * again, so put it to the protection queue. | 791 | * upper layers a possibility to recover from this, |
| 774 | */ | 792 | * e.g. by unmapping corresponding LEB. Instead, just |
| 775 | 793 | * put this PEB to the @ubi->erroneous list to prevent | |
| 776 | dbg_wl("canceled moving PEB %d", e1->pnum); | 794 | * UBI from trying to move it over and over again. |
| 777 | ubi_assert(err == 1); | 795 | */ |
| 778 | 796 | if (ubi->erroneous_peb_count > ubi->max_erroneous) { | |
| 779 | ubi_free_vid_hdr(ubi, vid_hdr); | 797 | ubi_err("too many erroneous eraseblocks (%d)", |
| 780 | vid_hdr = NULL; | 798 | ubi->erroneous_peb_count); |
| 781 | 799 | goto out_error; | |
| 782 | spin_lock(&ubi->wl_lock); | 800 | } |
| 783 | prot_queue_add(ubi, e1); | 801 | erroneous = 1; |
| 784 | ubi_assert(!ubi->move_to_put); | 802 | goto out_not_moved; |
| 785 | ubi->move_from = ubi->move_to = NULL; | 803 | } |
| 786 | ubi->wl_scheduled = 0; | ||
| 787 | spin_unlock(&ubi->wl_lock); | ||
| 788 | 804 | ||
| 789 | e1 = NULL; | 805 | if (err < 0) |
| 790 | err = schedule_erase(ubi, e2, 0); | ||
| 791 | if (err) | ||
| 792 | goto out_error; | 806 | goto out_error; |
| 793 | mutex_unlock(&ubi->move_mutex); | 807 | |
| 794 | return 0; | 808 | ubi_assert(0); |
| 795 | } | 809 | } |
| 796 | 810 | ||
| 797 | /* The PEB has been successfully moved */ | 811 | /* The PEB has been successfully moved */ |
| 798 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
| 799 | vid_hdr = NULL; | ||
| 800 | if (scrubbing) | 812 | if (scrubbing) |
| 801 | ubi_msg("scrubbed PEB %d, data moved to PEB %d", | 813 | ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", |
| 802 | e1->pnum, e2->pnum); | 814 | e1->pnum, vol_id, lnum, e2->pnum); |
| 815 | ubi_free_vid_hdr(ubi, vid_hdr); | ||
| 803 | 816 | ||
| 804 | spin_lock(&ubi->wl_lock); | 817 | spin_lock(&ubi->wl_lock); |
| 805 | if (!ubi->move_to_put) { | 818 | if (!ubi->move_to_put) { |
| @@ -812,8 +825,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
| 812 | 825 | ||
| 813 | err = schedule_erase(ubi, e1, 0); | 826 | err = schedule_erase(ubi, e1, 0); |
| 814 | if (err) { | 827 | if (err) { |
| 815 | e1 = NULL; | 828 | kmem_cache_free(ubi_wl_entry_slab, e1); |
| 816 | goto out_error; | 829 | if (e2) |
| 830 | kmem_cache_free(ubi_wl_entry_slab, e2); | ||
| 831 | goto out_ro; | ||
| 817 | } | 832 | } |
| 818 | 833 | ||
| 819 | if (e2) { | 834 | if (e2) { |
| @@ -821,10 +836,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
| 821 | * Well, the target PEB was put meanwhile, schedule it for | 836 | * Well, the target PEB was put meanwhile, schedule it for |
| 822 | * erasure. | 837 | * erasure. |
| 823 | */ | 838 | */ |
| 824 | dbg_wl("PEB %d was put meanwhile, erase", e2->pnum); | 839 | dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", |
| 840 | e2->pnum, vol_id, lnum); | ||
| 825 | err = schedule_erase(ubi, e2, 0); | 841 | err = schedule_erase(ubi, e2, 0); |
| 826 | if (err) | 842 | if (err) { |
| 827 | goto out_error; | 843 | kmem_cache_free(ubi_wl_entry_slab, e2); |
| 844 | goto out_ro; | ||
| 845 | } | ||
| 828 | } | 846 | } |
| 829 | 847 | ||
| 830 | dbg_wl("done"); | 848 | dbg_wl("done"); |
| @@ -837,11 +855,19 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
| 837 | * have been changed, schedule it for erasure. | 855 | * have been changed, schedule it for erasure. |
| 838 | */ | 856 | */ |
| 839 | out_not_moved: | 857 | out_not_moved: |
| 840 | dbg_wl("canceled moving PEB %d", e1->pnum); | 858 | if (vol_id != -1) |
| 841 | ubi_free_vid_hdr(ubi, vid_hdr); | 859 | dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)", |
| 842 | vid_hdr = NULL; | 860 | e1->pnum, vol_id, lnum, e2->pnum, err); |
| 861 | else | ||
| 862 | dbg_wl("cancel moving PEB %d to PEB %d (%d)", | ||
| 863 | e1->pnum, e2->pnum, err); | ||
| 843 | spin_lock(&ubi->wl_lock); | 864 | spin_lock(&ubi->wl_lock); |
| 844 | if (scrubbing) | 865 | if (protect) |
| 866 | prot_queue_add(ubi, e1); | ||
| 867 | else if (erroneous) { | ||
| 868 | wl_tree_add(e1, &ubi->erroneous); | ||
| 869 | ubi->erroneous_peb_count += 1; | ||
| 870 | } else if (scrubbing) | ||
| 845 | wl_tree_add(e1, &ubi->scrub); | 871 | wl_tree_add(e1, &ubi->scrub); |
| 846 | else | 872 | else |
| 847 | wl_tree_add(e1, &ubi->used); | 873 | wl_tree_add(e1, &ubi->used); |
| @@ -850,32 +876,36 @@ out_not_moved: | |||
| 850 | ubi->wl_scheduled = 0; | 876 | ubi->wl_scheduled = 0; |
| 851 | spin_unlock(&ubi->wl_lock); | 877 | spin_unlock(&ubi->wl_lock); |
| 852 | 878 | ||
| 853 | e1 = NULL; | 879 | ubi_free_vid_hdr(ubi, vid_hdr); |
| 854 | err = schedule_erase(ubi, e2, torture); | 880 | err = schedule_erase(ubi, e2, torture); |
| 855 | if (err) | 881 | if (err) { |
| 856 | goto out_error; | 882 | kmem_cache_free(ubi_wl_entry_slab, e2); |
| 857 | 883 | goto out_ro; | |
| 884 | } | ||
| 858 | mutex_unlock(&ubi->move_mutex); | 885 | mutex_unlock(&ubi->move_mutex); |
| 859 | return 0; | 886 | return 0; |
| 860 | 887 | ||
| 861 | out_error: | 888 | out_error: |
| 862 | ubi_err("error %d while moving PEB %d to PEB %d", | 889 | if (vol_id != -1) |
| 863 | err, e1->pnum, e2->pnum); | 890 | ubi_err("error %d while moving PEB %d to PEB %d", |
| 864 | 891 | err, e1->pnum, e2->pnum); | |
| 865 | ubi_free_vid_hdr(ubi, vid_hdr); | 892 | else |
| 893 | ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", | ||
| 894 | err, e1->pnum, vol_id, lnum, e2->pnum); | ||
| 866 | spin_lock(&ubi->wl_lock); | 895 | spin_lock(&ubi->wl_lock); |
| 867 | ubi->move_from = ubi->move_to = NULL; | 896 | ubi->move_from = ubi->move_to = NULL; |
| 868 | ubi->move_to_put = ubi->wl_scheduled = 0; | 897 | ubi->move_to_put = ubi->wl_scheduled = 0; |
| 869 | spin_unlock(&ubi->wl_lock); | 898 | spin_unlock(&ubi->wl_lock); |
| 870 | 899 | ||
| 871 | if (e1) | 900 | ubi_free_vid_hdr(ubi, vid_hdr); |
| 872 | kmem_cache_free(ubi_wl_entry_slab, e1); | 901 | kmem_cache_free(ubi_wl_entry_slab, e1); |
| 873 | if (e2) | 902 | kmem_cache_free(ubi_wl_entry_slab, e2); |
| 874 | kmem_cache_free(ubi_wl_entry_slab, e2); | ||
| 875 | ubi_ro_mode(ubi); | ||
| 876 | 903 | ||
| 904 | out_ro: | ||
| 905 | ubi_ro_mode(ubi); | ||
| 877 | mutex_unlock(&ubi->move_mutex); | 906 | mutex_unlock(&ubi->move_mutex); |
| 878 | return err; | 907 | ubi_assert(err != 0); |
| 908 | return err < 0 ? err : -EIO; | ||
| 879 | 909 | ||
| 880 | out_cancel: | 910 | out_cancel: |
| 881 | ubi->wl_scheduled = 0; | 911 | ubi->wl_scheduled = 0; |
| @@ -1015,7 +1045,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
| 1015 | /* | 1045 | /* |
| 1016 | * If this is not %-EIO, we have no idea what to do. Scheduling | 1046 | * If this is not %-EIO, we have no idea what to do. Scheduling |
| 1017 | * this physical eraseblock for erasure again would cause | 1047 | * this physical eraseblock for erasure again would cause |
| 1018 | * errors again and again. Well, lets switch to RO mode. | 1048 | * errors again and again. Well, lets switch to R/O mode. |
| 1019 | */ | 1049 | */ |
| 1020 | goto out_ro; | 1050 | goto out_ro; |
| 1021 | } | 1051 | } |
| @@ -1043,10 +1073,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
| 1043 | ubi_err("no reserved physical eraseblocks"); | 1073 | ubi_err("no reserved physical eraseblocks"); |
| 1044 | goto out_ro; | 1074 | goto out_ro; |
| 1045 | } | 1075 | } |
| 1046 | |||
| 1047 | spin_unlock(&ubi->volumes_lock); | 1076 | spin_unlock(&ubi->volumes_lock); |
| 1048 | ubi_msg("mark PEB %d as bad", pnum); | ||
| 1049 | 1077 | ||
| 1078 | ubi_msg("mark PEB %d as bad", pnum); | ||
| 1050 | err = ubi_io_mark_bad(ubi, pnum); | 1079 | err = ubi_io_mark_bad(ubi, pnum); |
| 1051 | if (err) | 1080 | if (err) |
| 1052 | goto out_ro; | 1081 | goto out_ro; |
| @@ -1056,7 +1085,9 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, | |||
| 1056 | ubi->bad_peb_count += 1; | 1085 | ubi->bad_peb_count += 1; |
| 1057 | ubi->good_peb_count -= 1; | 1086 | ubi->good_peb_count -= 1; |
| 1058 | ubi_calculate_reserved(ubi); | 1087 | ubi_calculate_reserved(ubi); |
| 1059 | if (ubi->beb_rsvd_pebs == 0) | 1088 | if (ubi->beb_rsvd_pebs) |
| 1089 | ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); | ||
| 1090 | else | ||
| 1060 | ubi_warn("last PEB from the reserved pool was used"); | 1091 | ubi_warn("last PEB from the reserved pool was used"); |
| 1061 | spin_unlock(&ubi->volumes_lock); | 1092 | spin_unlock(&ubi->volumes_lock); |
| 1062 | 1093 | ||
| @@ -1125,6 +1156,13 @@ retry: | |||
| 1125 | } else if (in_wl_tree(e, &ubi->scrub)) { | 1156 | } else if (in_wl_tree(e, &ubi->scrub)) { |
| 1126 | paranoid_check_in_wl_tree(e, &ubi->scrub); | 1157 | paranoid_check_in_wl_tree(e, &ubi->scrub); |
| 1127 | rb_erase(&e->u.rb, &ubi->scrub); | 1158 | rb_erase(&e->u.rb, &ubi->scrub); |
| 1159 | } else if (in_wl_tree(e, &ubi->erroneous)) { | ||
| 1160 | paranoid_check_in_wl_tree(e, &ubi->erroneous); | ||
| 1161 | rb_erase(&e->u.rb, &ubi->erroneous); | ||
| 1162 | ubi->erroneous_peb_count -= 1; | ||
| 1163 | ubi_assert(ubi->erroneous_peb_count >= 0); | ||
| 1164 | /* Erroneous PEBs should be tortured */ | ||
| 1165 | torture = 1; | ||
| 1128 | } else { | 1166 | } else { |
| 1129 | err = prot_queue_del(ubi, e->pnum); | 1167 | err = prot_queue_del(ubi, e->pnum); |
| 1130 | if (err) { | 1168 | if (err) { |
| @@ -1373,7 +1411,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) | |||
| 1373 | struct ubi_scan_leb *seb, *tmp; | 1411 | struct ubi_scan_leb *seb, *tmp; |
| 1374 | struct ubi_wl_entry *e; | 1412 | struct ubi_wl_entry *e; |
| 1375 | 1413 | ||
| 1376 | ubi->used = ubi->free = ubi->scrub = RB_ROOT; | 1414 | ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT; |
| 1377 | spin_lock_init(&ubi->wl_lock); | 1415 | spin_lock_init(&ubi->wl_lock); |
| 1378 | mutex_init(&ubi->move_mutex); | 1416 | mutex_init(&ubi->move_mutex); |
| 1379 | init_rwsem(&ubi->work_sem); | 1417 | init_rwsem(&ubi->work_sem); |
| @@ -1511,6 +1549,7 @@ void ubi_wl_close(struct ubi_device *ubi) | |||
| 1511 | cancel_pending(ubi); | 1549 | cancel_pending(ubi); |
| 1512 | protection_queue_destroy(ubi); | 1550 | protection_queue_destroy(ubi); |
| 1513 | tree_destroy(&ubi->used); | 1551 | tree_destroy(&ubi->used); |
| 1552 | tree_destroy(&ubi->erroneous); | ||
| 1514 | tree_destroy(&ubi->free); | 1553 | tree_destroy(&ubi->free); |
| 1515 | tree_destroy(&ubi->scrub); | 1554 | tree_destroy(&ubi->scrub); |
| 1516 | kfree(ubi->lookuptbl); | 1555 | kfree(ubi->lookuptbl); |
diff --git a/include/linux/mtd/ubi.h b/include/linux/mtd/ubi.h index 6316fafe5c2a..6913b71d9ab2 100644 --- a/include/linux/mtd/ubi.h +++ b/include/linux/mtd/ubi.h | |||
| @@ -132,6 +132,39 @@ struct ubi_device_info { | |||
| 132 | dev_t cdev; | 132 | dev_t cdev; |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | /* | ||
| 136 | * enum - volume notification types. | ||
| 137 | * @UBI_VOLUME_ADDED: volume has been added | ||
| 138 | * @UBI_VOLUME_REMOVED: start volume volume | ||
| 139 | * @UBI_VOLUME_RESIZED: volume size has been re-sized | ||
| 140 | * @UBI_VOLUME_RENAMED: volume name has been re-named | ||
| 141 | * @UBI_VOLUME_UPDATED: volume name has been updated | ||
| 142 | * | ||
| 143 | * These constants define which type of event has happened when a volume | ||
| 144 | * notification function is invoked. | ||
| 145 | */ | ||
| 146 | enum { | ||
| 147 | UBI_VOLUME_ADDED, | ||
| 148 | UBI_VOLUME_REMOVED, | ||
| 149 | UBI_VOLUME_RESIZED, | ||
| 150 | UBI_VOLUME_RENAMED, | ||
| 151 | UBI_VOLUME_UPDATED, | ||
| 152 | }; | ||
| 153 | |||
| 154 | /* | ||
| 155 | * struct ubi_notification - UBI notification description structure. | ||
| 156 | * @di: UBI device description object | ||
| 157 | * @vi: UBI volume description object | ||
| 158 | * | ||
| 159 | * UBI notifiers are called with a pointer to an object of this type. The | ||
| 160 | * object describes the notification. Namely, it provides a description of the | ||
| 161 | * UBI device and UBI volume the notification informs about. | ||
| 162 | */ | ||
| 163 | struct ubi_notification { | ||
| 164 | struct ubi_device_info di; | ||
| 165 | struct ubi_volume_info vi; | ||
| 166 | }; | ||
| 167 | |||
| 135 | /* UBI descriptor given to users when they open UBI volumes */ | 168 | /* UBI descriptor given to users when they open UBI volumes */ |
| 136 | struct ubi_volume_desc; | 169 | struct ubi_volume_desc; |
| 137 | 170 | ||
| @@ -141,6 +174,10 @@ void ubi_get_volume_info(struct ubi_volume_desc *desc, | |||
| 141 | struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); | 174 | struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode); |
| 142 | struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, | 175 | struct ubi_volume_desc *ubi_open_volume_nm(int ubi_num, const char *name, |
| 143 | int mode); | 176 | int mode); |
| 177 | int ubi_register_volume_notifier(struct notifier_block *nb, | ||
| 178 | int ignore_existing); | ||
| 179 | int ubi_unregister_volume_notifier(struct notifier_block *nb); | ||
| 180 | |||
| 144 | void ubi_close_volume(struct ubi_volume_desc *desc); | 181 | void ubi_close_volume(struct ubi_volume_desc *desc); |
| 145 | int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, | 182 | int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset, |
| 146 | int len, int check); | 183 | int len, int check); |
