aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/ubi
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-18 08:54:35 -0500
committerArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2007-12-26 12:15:16 -0500
commit593dd33c92c6529443d5df1350dc5cc76511232d (patch)
treedda360da5a5f66eb36b55a4c2e8eb985e997ffd6 /drivers/mtd/ubi
parent458dbb3d07574e8fcdcb921ac155ccd81b16b05f (diff)
UBI: fix ubi_wl_flush
The flush function should finish all the pending jobs. But if somebody else is doing a work, this function should wait and let it finish. This patche uses rw semaphore for synchronization purpose - it just looks quite convinient. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi')
-rw-r--r--drivers/mtd/ubi/ubi.h1
-rw-r--r--drivers/mtd/ubi/wl.c39
2 files changed, 33 insertions, 7 deletions
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index ea9a6990a4dc..994233d6e1e3 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -355,6 +355,7 @@ struct ubi_device {
355 } prot; 355 } prot;
356 spinlock_t wl_lock; 356 spinlock_t wl_lock;
357 struct mutex move_mutex; 357 struct mutex move_mutex;
358 struct rw_semaphore work_sem;
358 int wl_scheduled; 359 int wl_scheduled;
359 struct ubi_wl_entry **lookuptbl; 360 struct ubi_wl_entry **lookuptbl;
360 unsigned long long abs_ec; 361 unsigned long long abs_ec;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a60f9425ab13..8421c7a9a835 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -251,10 +251,18 @@ static int do_work(struct ubi_device *ubi)
251 251
252 cond_resched(); 252 cond_resched();
253 253
254 /*
255 * @ubi->work_sem is used to synchronize with the workers. Workers take
256 * it in read mode, so many of them may be doing works at a time. But
257 * the queue flush code has to be sure the whole queue of works is
258 * done, and it takes the mutex in write mode.
259 */
260 down_read(&ubi->work_sem);
254 spin_lock(&ubi->wl_lock); 261 spin_lock(&ubi->wl_lock);
255 262
256 if (list_empty(&ubi->works)) { 263 if (list_empty(&ubi->works)) {
257 spin_unlock(&ubi->wl_lock); 264 spin_unlock(&ubi->wl_lock);
265 up_read(&ubi->work_sem);
258 return 0; 266 return 0;
259 } 267 }
260 268
@@ -275,6 +283,7 @@ static int do_work(struct ubi_device *ubi)
275 ubi->works_count -= 1; 283 ubi->works_count -= 1;
276 ubi_assert(ubi->works_count >= 0); 284 ubi_assert(ubi->works_count >= 0);
277 spin_unlock(&ubi->wl_lock); 285 spin_unlock(&ubi->wl_lock);
286 up_read(&ubi->work_sem);
278 return err; 287 return err;
279} 288}
280 289
@@ -1173,7 +1182,7 @@ retry:
1173 * the WL unit has not put the PEB to the "used" tree yet, but 1182 * the WL unit has not put the PEB to the "used" tree yet, but
1174 * it is about to do this. So we just set a flag which will 1183 * it is about to do this. So we just set a flag which will
1175 * tell the WL worker that the PEB is not needed anymore and 1184 * tell the WL worker that the PEB is not needed anymore and
1176 * should be sheduled for erasure. 1185 * should be scheduled for erasure.
1177 */ 1186 */
1178 dbg_wl("PEB %d is the target of data moving", pnum); 1187 dbg_wl("PEB %d is the target of data moving", pnum);
1179 ubi_assert(!ubi->move_to_put); 1188 ubi_assert(!ubi->move_to_put);
@@ -1280,17 +1289,32 @@ retry:
1280 */ 1289 */
1281int ubi_wl_flush(struct ubi_device *ubi) 1290int ubi_wl_flush(struct ubi_device *ubi)
1282{ 1291{
1283 int err, pending_count; 1292 int err;
1284
1285 pending_count = ubi->works_count;
1286
1287 dbg_wl("flush (%d pending works)", pending_count);
1288 1293
1289 /* 1294 /*
1290 * Erase while the pending works queue is not empty, but not more then 1295 * Erase while the pending works queue is not empty, but not more then
1291 * the number of currently pending works. 1296 * the number of currently pending works.
1292 */ 1297 */
1293 while (pending_count-- > 0) { 1298 dbg_wl("flush (%d pending works)", ubi->works_count);
1299 while (ubi->works_count) {
1300 err = do_work(ubi);
1301 if (err)
1302 return err;
1303 }
1304
1305 /*
1306 * Make sure all the works which have been done in parallel are
1307 * finished.
1308 */
1309 down_write(&ubi->work_sem);
1310 up_write(&ubi->work_sem);
1311
1312 /*
1313 * And in case last was the WL worker and it cancelled the LEB
1314 * movement, flush again.
1315 */
1316 while (ubi->works_count) {
1317 dbg_wl("flush more (%d pending works)", ubi->works_count);
1294 err = do_work(ubi); 1318 err = do_work(ubi);
1295 if (err) 1319 if (err)
1296 return err; 1320 return err;
@@ -1426,6 +1450,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
1426 ubi->prot.pnum = ubi->prot.aec = RB_ROOT; 1450 ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
1427 spin_lock_init(&ubi->wl_lock); 1451 spin_lock_init(&ubi->wl_lock);
1428 mutex_init(&ubi->move_mutex); 1452 mutex_init(&ubi->move_mutex);
1453 init_rwsem(&ubi->work_sem);
1429 ubi->max_ec = si->max_ec; 1454 ubi->max_ec = si->max_ec;
1430 INIT_LIST_HEAD(&ubi->works); 1455 INIT_LIST_HEAD(&ubi->works);
1431 1456