aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-09-13 07:28:14 -0400
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-10-14 06:10:21 -0400
commite8823bd63d50bb1f9bd73f1197230e1f7217456a (patch)
tree3b8b849512686aebda99f83d290c9e09ed983967 /drivers/mtd
parent6986646ba752fef150286926aa922ef04e9d19dd (diff)
UBI: fix atomic LEB change problems
When the UBI device is nearly full, i.e. all LEBs are mapped, we have only one spare LEB left - the one we reserved for WL purposes. Well, I do not count the LEBs which were reserved for bad PEB handling - suppose NOR flash for simplicity. If an "atomic LEB change operation" is run, and the WL unit is moving a LEB, we have no spare LEBs to finish the operation and fail, which is not good. Moreover, if there are 2 or more simultanious "atomic LEB change" requests, only one of them has chances to succeed, the other will fail with -ENOSPC. Not good either. This patch does 2 things: 1. Reserves one PEB for the "atomic LEB change" operation. 2. Serealize the operations so that only on of them may run at a time (by means of a mutex). Pointed-to-by: Brijesh Singh <brijesh.s.singh@gmail.com> Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd')
-rw-r--r--drivers/mtd/ubi/eba.c48
-rw-r--r--drivers/mtd/ubi/ubi.h6
2 files changed, 33 insertions, 21 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 81bb6a33b555..7b7add6da12f 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -46,6 +46,9 @@
46#include <linux/err.h> 46#include <linux/err.h>
47#include "ubi.h" 47#include "ubi.h"
48 48
49/* Number of physical eraseblocks reserved for atomic LEB change operation */
50#define EBA_RESERVED_PEBS 1
51
49/** 52/**
50 * struct ltree_entry - an entry in the lock tree. 53 * struct ltree_entry - an entry in the lock tree.
51 * @rb: links RB-tree nodes 54 * @rb: links RB-tree nodes
@@ -827,6 +830,9 @@ write_error:
827 * data, which has to be aligned. This function guarantees that in case of an 830 * data, which has to be aligned. This function guarantees that in case of an
828 * unclean reboot the old contents is preserved. Returns zero in case of 831 * unclean reboot the old contents is preserved. Returns zero in case of
829 * success and a negative error code in case of failure. 832 * success and a negative error code in case of failure.
833 *
834 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
835 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
830 */ 836 */
831int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum, 837int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
832 const void *buf, int len, int dtype) 838 const void *buf, int len, int dtype)
@@ -843,11 +849,10 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
843 if (!vid_hdr) 849 if (!vid_hdr)
844 return -ENOMEM; 850 return -ENOMEM;
845 851
852 mutex_lock(&ubi->alc_mutex);
846 err = leb_write_lock(ubi, vol_id, lnum); 853 err = leb_write_lock(ubi, vol_id, lnum);
847 if (err) { 854 if (err)
848 ubi_free_vid_hdr(ubi, vid_hdr); 855 goto out_mutex;
849 return err;
850 }
851 856
852 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 857 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
853 vid_hdr->vol_id = cpu_to_be32(vol_id); 858 vid_hdr->vol_id = cpu_to_be32(vol_id);
@@ -864,9 +869,8 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, int vol_id, int lnum,
864retry: 869retry:
865 pnum = ubi_wl_get_peb(ubi, dtype); 870 pnum = ubi_wl_get_peb(ubi, dtype);
866 if (pnum < 0) { 871 if (pnum < 0) {
867 ubi_free_vid_hdr(ubi, vid_hdr); 872 err = pnum;
868 leb_write_unlock(ubi, vol_id, lnum); 873 goto out_leb_unlock;
869 return pnum;
870 } 874 }
871 875
872 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d", 876 dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
@@ -888,17 +892,18 @@ retry:
888 892
889 if (vol->eba_tbl[lnum] >= 0) { 893 if (vol->eba_tbl[lnum] >= 0) {
890 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1); 894 err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 1);
891 if (err) { 895 if (err)
892 ubi_free_vid_hdr(ubi, vid_hdr); 896 goto out_leb_unlock;
893 leb_write_unlock(ubi, vol_id, lnum);
894 return err;
895 }
896 } 897 }
897 898
898 vol->eba_tbl[lnum] = pnum; 899 vol->eba_tbl[lnum] = pnum;
900
901out_leb_unlock:
899 leb_write_unlock(ubi, vol_id, lnum); 902 leb_write_unlock(ubi, vol_id, lnum);
903out_mutex:
904 mutex_unlock(&ubi->alc_mutex);
900 ubi_free_vid_hdr(ubi, vid_hdr); 905 ubi_free_vid_hdr(ubi, vid_hdr);
901 return 0; 906 return err;
902 907
903write_error: 908write_error:
904 if (err != -EIO || !ubi->bad_allowed) { 909 if (err != -EIO || !ubi->bad_allowed) {
@@ -908,17 +913,13 @@ write_error:
908 * mode just in case. 913 * mode just in case.
909 */ 914 */
910 ubi_ro_mode(ubi); 915 ubi_ro_mode(ubi);
911 leb_write_unlock(ubi, vol_id, lnum); 916 goto out_leb_unlock;
912 ubi_free_vid_hdr(ubi, vid_hdr);
913 return err;
914 } 917 }
915 918
916 err = ubi_wl_put_peb(ubi, pnum, 1); 919 err = ubi_wl_put_peb(ubi, pnum, 1);
917 if (err || ++tries > UBI_IO_RETRIES) { 920 if (err || ++tries > UBI_IO_RETRIES) {
918 ubi_ro_mode(ubi); 921 ubi_ro_mode(ubi);
919 leb_write_unlock(ubi, vol_id, lnum); 922 goto out_leb_unlock;
920 ubi_free_vid_hdr(ubi, vid_hdr);
921 return err;
922 } 923 }
923 924
924 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); 925 vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
@@ -1122,6 +1123,7 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1122 dbg_eba("initialize EBA unit"); 1123 dbg_eba("initialize EBA unit");
1123 1124
1124 spin_lock_init(&ubi->ltree_lock); 1125 spin_lock_init(&ubi->ltree_lock);
1126 mutex_init(&ubi->alc_mutex);
1125 ubi->ltree = RB_ROOT; 1127 ubi->ltree = RB_ROOT;
1126 1128
1127 if (ubi_devices_cnt == 0) { 1129 if (ubi_devices_cnt == 0) {
@@ -1183,6 +1185,14 @@ int ubi_eba_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1183 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; 1185 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1184 } 1186 }
1185 1187
1188 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1189 ubi_err("no enough physical eraseblocks (%d, need %d)",
1190 ubi->avail_pebs, EBA_RESERVED_PEBS);
1191 goto out_free;
1192 }
1193 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1194 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1195
1186 dbg_eba("EBA unit is initialized"); 1196 dbg_eba("EBA unit is initialized");
1187 return 0; 1197 return 0;
1188 1198
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index cc010111264f..5e941a633030 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -221,14 +221,15 @@ struct ubi_wl_entry;
221 * @vtbl_slots: how many slots are available in the volume table 221 * @vtbl_slots: how many slots are available in the volume table
222 * @vtbl_size: size of the volume table in bytes 222 * @vtbl_size: size of the volume table in bytes
223 * @vtbl: in-RAM volume table copy 223 * @vtbl: in-RAM volume table copy
224 * @vtbl_mutex: protects on-flash volume table
224 * 225 *
225 * @max_ec: current highest erase counter value 226 * @max_ec: current highest erase counter value
226 * @mean_ec: current mean erase counter value 227 * @mean_ec: current mean erase counter value
227 * 228 *
228 * global_sqnum: global sequence number 229 * @global_sqnum: global sequence number
229 * @ltree_lock: protects the lock tree and @global_sqnum 230 * @ltree_lock: protects the lock tree and @global_sqnum
230 * @ltree: the lock tree 231 * @ltree: the lock tree
231 * @vtbl_mutex: protects on-flash volume table 232 * @alc_mutex: serializes "atomic LEB change" operations
232 * 233 *
233 * @used: RB-tree of used physical eraseblocks 234 * @used: RB-tree of used physical eraseblocks
234 * @free: RB-tree of free physical eraseblocks 235 * @free: RB-tree of free physical eraseblocks
@@ -308,6 +309,7 @@ struct ubi_device {
308 unsigned long long global_sqnum; 309 unsigned long long global_sqnum;
309 spinlock_t ltree_lock; 310 spinlock_t ltree_lock;
310 struct rb_root ltree; 311 struct rb_root ltree;
312 struct mutex alc_mutex;
311 313
312 /* Wear-leveling unit's stuff */ 314 /* Wear-leveling unit's stuff */
313 struct rb_root used; 315 struct rb_root used;