diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-06-16 06:35:23 -0400 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2008-07-24 06:32:56 -0400 |
commit | 23add7455c42eef63f8719bd268328047d4aed69 (patch) | |
tree | 22fff61ae989e32987635e334a0b542dd6389433 | |
parent | 472018f73e7308a7f29b753ee8c742b6f45f103f (diff) |
UBI: fix LEB locking
leb_read_unlock() may be called simultaniously by several tasks.
The would race at the following code:
up_read(&le->mutex);
if (free)
kfree(le);
And it is possible that one task frees 'le' before the other tasks
do 'up_read()'. Fix this by doing up_read and free inside the
'ubi->ltree' lock. Below it the oops we had because of this:
BUG: spinlock bad magic on CPU#0, integck/7504
BUG: unable to handle kernel paging request at 6b6b6c4f
IP: [<c0211221>] spin_bug+0x5c/0xdb
*pde = 00000000 Oops: 0000 [#1] PREEMPT SMP Modules linked in: ubifs ubi nandsim nand nand_ids nand_ecc video output
Pid: 7504, comm: integck Not tainted (2.6.26-rc3ubifs26 #8)
EIP: 0060:[<c0211221>] EFLAGS: 00010002 CPU: 0
EIP is at spin_bug+0x5c/0xdb
EAX: 00000032 EBX: 6b6b6b6b ECX: 6b6b6b6b EDX: f7f7ce30
ESI: f76491dc EDI: c044f51f EBP: e8a736cc ESP: e8a736a8
DS: 007b ES: 007b FS: 00d8 GS: 0033 SS: 0068
Process integck (pid: 7504, ti=e8a72000 task=f7f7ce30 task.ti=e8a72000)
Stack: c044f754 c044f51f 00000000 f7f7d024 00001d50 00000001 f76491dc 00000296 f6df50e0 e8a736d8 c02112f0 f76491dc e8a736e8 c039157a f7d9e830 f76491d8 e8a7370c c020b975 f76491dc 00000296 f76491f8 00000000 f76491d8 00000000 Call Trace:
[<c02112f0>] ? _raw_spin_unlock+0x50/0x7c
[<c039157a>] ? _spin_unlock_irqrestore+0x20/0x58
[<c020b975>] ? rwsem_wake+0x4b/0x122
[<c0390e0a>] ? call_rwsem_wake+0xa/0xc
[<c0139ee7>] ? up_read+0x28/0x31
[<f8873b3c>] ? leb_read_unlock+0x73/0x7b [ubi]
[<f88742a3>] ? ubi_eba_read_leb+0x195/0x2b0 [ubi]
[<f8872a04>] ? ubi_leb_read+0xaf/0xf8 [ubi]
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
-rw-r--r-- | drivers/mtd/ubi/eba.c | 27 |
1 files changed, 7 insertions, 20 deletions
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c index 623d25f4855f..8dc488fc0cdf 100644 --- a/drivers/mtd/ubi/eba.c +++ b/drivers/mtd/ubi/eba.c | |||
@@ -223,22 +223,18 @@ static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) | |||
223 | */ | 223 | */ |
224 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 224 | static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
225 | { | 225 | { |
226 | int free = 0; | ||
227 | struct ubi_ltree_entry *le; | 226 | struct ubi_ltree_entry *le; |
228 | 227 | ||
229 | spin_lock(&ubi->ltree_lock); | 228 | spin_lock(&ubi->ltree_lock); |
230 | le = ltree_lookup(ubi, vol_id, lnum); | 229 | le = ltree_lookup(ubi, vol_id, lnum); |
231 | le->users -= 1; | 230 | le->users -= 1; |
232 | ubi_assert(le->users >= 0); | 231 | ubi_assert(le->users >= 0); |
232 | up_read(&le->mutex); | ||
233 | if (le->users == 0) { | 233 | if (le->users == 0) { |
234 | rb_erase(&le->rb, &ubi->ltree); | 234 | rb_erase(&le->rb, &ubi->ltree); |
235 | free = 1; | 235 | kfree(le); |
236 | } | 236 | } |
237 | spin_unlock(&ubi->ltree_lock); | 237 | spin_unlock(&ubi->ltree_lock); |
238 | |||
239 | up_read(&le->mutex); | ||
240 | if (free) | ||
241 | kfree(le); | ||
242 | } | 238 | } |
243 | 239 | ||
244 | /** | 240 | /** |
@@ -274,7 +270,6 @@ static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) | |||
274 | */ | 270 | */ |
275 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | 271 | static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) |
276 | { | 272 | { |
277 | int free; | ||
278 | struct ubi_ltree_entry *le; | 273 | struct ubi_ltree_entry *le; |
279 | 274 | ||
280 | le = ltree_add_entry(ubi, vol_id, lnum); | 275 | le = ltree_add_entry(ubi, vol_id, lnum); |
@@ -289,12 +284,9 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | |||
289 | ubi_assert(le->users >= 0); | 284 | ubi_assert(le->users >= 0); |
290 | if (le->users == 0) { | 285 | if (le->users == 0) { |
291 | rb_erase(&le->rb, &ubi->ltree); | 286 | rb_erase(&le->rb, &ubi->ltree); |
292 | free = 1; | ||
293 | } else | ||
294 | free = 0; | ||
295 | spin_unlock(&ubi->ltree_lock); | ||
296 | if (free) | ||
297 | kfree(le); | 287 | kfree(le); |
288 | } | ||
289 | spin_unlock(&ubi->ltree_lock); | ||
298 | 290 | ||
299 | return 1; | 291 | return 1; |
300 | } | 292 | } |
@@ -307,23 +299,18 @@ static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) | |||
307 | */ | 299 | */ |
308 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) | 300 | static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) |
309 | { | 301 | { |
310 | int free; | ||
311 | struct ubi_ltree_entry *le; | 302 | struct ubi_ltree_entry *le; |
312 | 303 | ||
313 | spin_lock(&ubi->ltree_lock); | 304 | spin_lock(&ubi->ltree_lock); |
314 | le = ltree_lookup(ubi, vol_id, lnum); | 305 | le = ltree_lookup(ubi, vol_id, lnum); |
315 | le->users -= 1; | 306 | le->users -= 1; |
316 | ubi_assert(le->users >= 0); | 307 | ubi_assert(le->users >= 0); |
308 | up_write(&le->mutex); | ||
317 | if (le->users == 0) { | 309 | if (le->users == 0) { |
318 | rb_erase(&le->rb, &ubi->ltree); | 310 | rb_erase(&le->rb, &ubi->ltree); |
319 | free = 1; | ||
320 | } else | ||
321 | free = 0; | ||
322 | spin_unlock(&ubi->ltree_lock); | ||
323 | |||
324 | up_write(&le->mutex); | ||
325 | if (free) | ||
326 | kfree(le); | 311 | kfree(le); |
312 | } | ||
313 | spin_unlock(&ubi->ltree_lock); | ||
327 | } | 314 | } |
328 | 315 | ||
329 | /** | 316 | /** |