aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/eba.c48
-rw-r--r--drivers/mtd/ubi/ubi.h6
2 files changed, 33 insertions, 21 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 81bb6a33b555..7b7add6da12f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -46,6 +46,9 @@
46#include <linux/err.h> 46#include <linux/err.h>
47#include "ubi.h" 47#include "ubi.h"
48 48
49/* Number of physical eraseblocks reserved for atomic LEB change operation */
50#define EBA_RESERVED_PEBS 1
51
49/** 52/**
50 * struct ltree_entry - an entry in the lock tree. 53 * struct ltree_entry - an entry in the lock tree.
51 * @rb: links RB-tree nodes 54 * @rb: links RB-tree nodes
@@ -827,6 +830,9 @@ write_error:
827 * data, which has to be aligned. This function guarantees that in case of an 830 * data, which has to be aligned. This function guarantees that in case of an
828 * unclean reboot the old contents is preserved. Returns zero in case of 831 * unclean reboot the old contents is preserved. Returns zero in case of
829 * success and a negative error code in case of failure. 832 * success and a negative error code in case of failure.
833 *
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
830 */ 836 */
831int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
832 const void *buf, int len, int dtype) 838 const void *buf, int len, int dtype)
@@ -843,11 +849,10 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
843 if (!vid_hdr) 849 if (!vid_hdr)
844 return -ENOMEM; 850 return -ENOMEM;
845 851
852 mutex_lock(&ubi->alc_mutex);
846 err = leb_write_lock(ubi, vol_id, lnum); 853 err = leb_write_lock(ubi, vol_id, lnum);
847 if (err) { 854 if (err)
848 ubi_free_vid_hdr(ubi, vid_hdr); 855 goto out_mutex;
849 return err;
850 }
851 856
852 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 857 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
853 vid_hdr->vol_id = cpu_to_be32(vol_id); 858 vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -864,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
864retry: 869retry:
865 pnum = ubi_wl_get_peb(ubi, dtype); 870 pnum = ubi_wl_get_peb(ubi, dtype);
866 if (pnum < 0) { 871 if (pnum < 0) {
867 ubi_free_vid_hdr(ubi, vid_hdr); 872 err = pnum;
868 leb_write_unlock(ubi, vol_id, lnum); 873 goto out_leb_unlock;
869 return pnum;
870 } 874 }
871 875
872 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 876 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
@@ -888,17 +892,18 @@ retry:
888 892
889 if (vol->eba_tbl[lnum] >= 0) { 893 if (vol->eba_tbl[lnum] >= 0) {
890 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); 894 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
891 if (err) { 895 if (err)
892 ubi_free_vid_hdr(ubi, vid_hdr); 896 goto out_leb_unlock;
893 leb_write_unlock(ubi, vol_id, lnum);
894 return err;
895 }
896 } 897 }
897 898
898 vol->eba_tbl[lnum] = pnum; 899 vol->eba_tbl[lnum] = pnum;
900
901out_leb_unlock:
899 leb_write_unlock(ubi, vol_id, lnum); 902 leb_write_unlock(ubi, vol_id, lnum);
903out_mutex:
904 mutex_unlock(&ubi->alc_mutex);
900 ubi_free_vid_hdr(ubi, vid_hdr); 905 ubi_free_vid_hdr(ubi, vid_hdr);
901 return 0; 906 return err;
902 907
903write_error: 908write_error:
904 if (err != -EIO || !ubi->bad_allowed) { 909 if (err != -EIO || !ubi->bad_allowed) {
@@ -908,17 +913,13 @@ write_error:
908 * mode just in case. 913 * mode just in case.
909 */ 914 */
910 ubi_ro_mode(ubi); 915 ubi_ro_mode(ubi);
911 leb_write_unlock(ubi, vol_id, lnum); 916 goto out_leb_unlock;
912 ubi_free_vid_hdr(ubi, vid_hdr);
913 return err;
914 } 917 }
915 918
916 err = ubi_wl_put_peb(ubi, pnum, 1); 919 err = ubi_wl_put_peb(ubi, pnum, 1);
917 if (err || ++tries > UBI_IO_RETRIES) { 920 if (err || ++tries > UBI_IO_RETRIES) {
918 ubi_ro_mode(ubi); 921 ubi_ro_mode(ubi);
919 leb_write_unlock(ubi, vol_id, lnum); 922 goto out_leb_unlock;
920 ubi_free_vid_hdr(ubi, vid_hdr);
921 return err;
922 } 923 }
923 924
924 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 925 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
@@ -1122,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1122 dbg_eba("initialize EBA unit"); 1123 dbg_eba("initialize EBA unit");
1123 1124
1124 spin_lock_init(&ubi->ltree_lock); 1125 spin_lock_init(&ubi->ltree_lock);
1126 mutex_init(&ubi->alc_mutex);
1125 ubi->ltree = RB_ROOT; 1127 ubi->ltree = RB_ROOT;
1126 1128
1127 if (ubi_devices_cnt == 0) { 1129 if (ubi_devices_cnt == 0) {
@@ -1183,6 +1185,14 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1183 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1185 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1184 } 1186 }
1185 1187
1188 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1189 ubi_err("no enough physical eraseblocks (%d, need %d)",
1190 ubi->avail_pebs, EBA_RESERVED_PEBS);
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1186 dbg_eba("EBA unit is initialized"); 1196 dbg_eba("EBA unit is initialized");
1187 return 0; 1197 return 0;
1188 1198
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index cc010111264f..5e941a633030 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -221,14 +221,15 @@ struct ubi_wl_entry;
221 * @vtbl_slots: how many slots are available in the volume table 221 * @vtbl_slots: how many slots are available in the volume table
222 * @vtbl_size: size of the volume table in bytes 222 * @vtbl_size: size of the volume table in bytes
223 * @vtbl: in-RAM volume table copy 223 * @vtbl: in-RAM volume table copy
224 * @vtbl_mutex: protects on-flash volume table
224 * 225 *
225 * @max_ec: current highest erase counter value 226 * @max_ec: current highest erase counter value
226 * @mean_ec: current mean erase counter value 227 * @mean_ec: current mean erase counter value
227 * 228 *
228 * global_sqnum: global sequence number 229 * @global_sqnum: global sequence number
229 * @ltree_lock: protects the lock tree and @global_sqnum 230 * @ltree_lock: protects the lock tree and @global_sqnum
230 * @ltree: the lock tree 231 * @ltree: the lock tree
231 * @vtbl_mutex: protects on-flash volume table 232 * @alc_mutex: serializes "atomic LEB change" operations
232 * 233 *
233 * @used: RB-tree of used physical eraseblocks 234 * @used: RB-tree of used physical eraseblocks
234 * @free: RB-tree of free physical eraseblocks 235 * @free: RB-tree of free physical eraseblocks
@@ -308,6 +309,7 @@ struct ubi_device {
308 unsigned long long global_sqnum; 309 unsigned long long global_sqnum;
309 spinlock_t ltree_lock; 310 spinlock_t ltree_lock;
310 struct rb_root ltree; 311 struct rb_root ltree;
312 struct mutex alc_mutex;
311 313
312 /* Wear-leveling unit's stuff */ 314 /* Wear-leveling unit's stuff */
313 struct rb_root used; 315 struct rb_root used;