diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-08-28 14:29:32 -0400 |
---|---|---|
committer | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2007-10-14 06:10:20 -0400 |
commit | 33818bbb84cd371b63ed8849cc5264d24c8b3aa2 (patch) | |
tree | ecee6a8f02d2d103c6eb33cc6173e74343b1861b /drivers/mtd/ubi/wl.c | |
parent | ef6075fbfca9139f428d52d60e671da38aa2e212 (diff) |
UBI: allocate memory with GFP_NOFS
Use GFP_NOFS flag when allocating memory on I/O path, because otherwise
we may deadlock the filesystem which works on top of us. We observed
the deadlocks with UBIFS. Example:
VFS->FS lock a lock->UBI->kmalloc()->VFS writeback->FS locks the same
lock again.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Diffstat (limited to 'drivers/mtd/ubi/wl.c')
-rw-r--r-- | drivers/mtd/ubi/wl.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c index 12b25e5b7b81..248ff9e477e7 100644 --- a/drivers/mtd/ubi/wl.c +++ b/drivers/mtd/ubi/wl.c | |||
@@ -508,7 +508,7 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) | |||
508 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || | 508 | ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || |
509 | dtype == UBI_UNKNOWN); | 509 | dtype == UBI_UNKNOWN); |
510 | 510 | ||
511 | pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_KERNEL); | 511 | pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); |
512 | if (!pe) | 512 | if (!pe) |
513 | return -ENOMEM; | 513 | return -ENOMEM; |
514 | 514 | ||
@@ -645,7 +645,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, int tortur | |||
645 | if (err > 0) | 645 | if (err > 0) |
646 | return -EINVAL; | 646 | return -EINVAL; |
647 | 647 | ||
648 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 648 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
649 | if (!ec_hdr) | 649 | if (!ec_hdr) |
650 | return -ENOMEM; | 650 | return -ENOMEM; |
651 | 651 | ||
@@ -768,7 +768,7 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, | |||
768 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", | 768 | dbg_wl("schedule erasure of PEB %d, EC %d, torture %d", |
769 | e->pnum, e->ec, torture); | 769 | e->pnum, e->ec, torture); |
770 | 770 | ||
771 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | 771 | wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
772 | if (!wl_wrk) | 772 | if (!wl_wrk) |
773 | return -ENOMEM; | 773 | return -ENOMEM; |
774 | 774 | ||
@@ -802,7 +802,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, | |||
802 | if (cancel) | 802 | if (cancel) |
803 | return 0; | 803 | return 0; |
804 | 804 | ||
805 | vid_hdr = ubi_zalloc_vid_hdr(ubi); | 805 | vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS); |
806 | if (!vid_hdr) | 806 | if (!vid_hdr) |
807 | return -ENOMEM; | 807 | return -ENOMEM; |
808 | 808 | ||
@@ -1028,7 +1028,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) | |||
1028 | ubi->wl_scheduled = 1; | 1028 | ubi->wl_scheduled = 1; |
1029 | spin_unlock(&ubi->wl_lock); | 1029 | spin_unlock(&ubi->wl_lock); |
1030 | 1030 | ||
1031 | wrk = kmalloc(sizeof(struct ubi_work), GFP_KERNEL); | 1031 | wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS); |
1032 | if (!wrk) { | 1032 | if (!wrk) { |
1033 | err = -ENOMEM; | 1033 | err = -ENOMEM; |
1034 | goto out_cancel; | 1034 | goto out_cancel; |
@@ -1631,7 +1631,7 @@ static int paranoid_check_ec(const struct ubi_device *ubi, int pnum, int ec) | |||
1631 | long long read_ec; | 1631 | long long read_ec; |
1632 | struct ubi_ec_hdr *ec_hdr; | 1632 | struct ubi_ec_hdr *ec_hdr; |
1633 | 1633 | ||
1634 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL); | 1634 | ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); |
1635 | if (!ec_hdr) | 1635 | if (!ec_hdr) |
1636 | return -ENOMEM; | 1636 | return -ENOMEM; |
1637 | 1637 | ||