aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorNicolas Pitre <nico@cam.org>2005-05-19 13:05:47 -0400
committerThomas Gleixner <tglx@mtd.linutronix.de>2005-05-23 07:21:35 -0400
commit6da70124a1cc05bdbd7c847901964edc6f634a91 (patch)
tree3cd1afd4b23b8ed31b2b92b9d11c67a854a7be63 /drivers
parentfff7afd791f6a685b3ddedb8cfb152aed85f3cf8 (diff)
[MTD] CFI flash locking reorg for XIP
This reworks the XIP locking to make sure no lock primitive is ever called from XIP disabled paths even if in theory they should not cause any reschedule. Relying on the current spinlock implementation is rather fragile and not especially clean from an abstraction pov. The recent RT work makes it even more obvious. Signed-off-by: Nicolas Pitre <nico@cam.org> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c90
1 files changed, 40 insertions, 50 deletions
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index 71fad1601444..8b1304531d8f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -4,7 +4,7 @@
4 * 4 *
5 * (C) 2000 Red Hat. GPL'd 5 * (C) 2000 Red Hat. GPL'd
6 * 6 *
7 * $Id: cfi_cmdset_0001.c,v 1.176 2005/04/27 20:01:49 tpoynor Exp $ 7 * $Id: cfi_cmdset_0001.c,v 1.178 2005/05/19 17:05:43 nico Exp $
8 * 8 *
9 * 9 *
10 * 10/10/2000 Nicolas Pitre <nico@cam.org> 10 * 10/10/2000 Nicolas Pitre <nico@cam.org>
@@ -826,10 +826,6 @@ static void put_chip(struct map_info *map, struct flchip *chip, unsigned long ad
826 * assembly to make sure inline functions were actually inlined and that gcc 826 * assembly to make sure inline functions were actually inlined and that gcc
827 * didn't emit calls to its own support functions). Also configuring MTD CFI 827 * didn't emit calls to its own support functions). Also configuring MTD CFI
828 * support to a single buswidth and a single interleave is also recommended. 828 * support to a single buswidth and a single interleave is also recommended.
829 * Note that not only IRQs are disabled but the preemption count is also
830 * increased to prevent other locking primitives (namely spin_unlock) from
831 * decrementing the preempt count to zero and scheduling the CPU away while
832 * not in array mode.
833 */ 829 */
834 830
835static void xip_disable(struct map_info *map, struct flchip *chip, 831static void xip_disable(struct map_info *map, struct flchip *chip,
@@ -837,7 +833,6 @@ static void xip_disable(struct map_info *map, struct flchip *chip,
837{ 833{
838 /* TODO: chips with no XIP use should ignore and return */ 834 /* TODO: chips with no XIP use should ignore and return */
839 (void) map_read(map, adr); /* ensure mmu mapping is up to date */ 835 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
840 preempt_disable();
841 local_irq_disable(); 836 local_irq_disable();
842} 837}
843 838
@@ -852,7 +847,6 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
852 (void) map_read(map, adr); 847 (void) map_read(map, adr);
853 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */ 848 asm volatile (".rep 8; nop; .endr"); /* fill instruction prefetch */
854 local_irq_enable(); 849 local_irq_enable();
855 preempt_enable();
856} 850}
857 851
858/* 852/*
@@ -928,7 +922,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
928 (void) map_read(map, adr); 922 (void) map_read(map, adr);
929 asm volatile (".rep 8; nop; .endr"); 923 asm volatile (".rep 8; nop; .endr");
930 local_irq_enable(); 924 local_irq_enable();
931 preempt_enable(); 925 spin_unlock(chip->mutex);
932 asm volatile (".rep 8; nop; .endr"); 926 asm volatile (".rep 8; nop; .endr");
933 cond_resched(); 927 cond_resched();
934 928
@@ -938,15 +932,15 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
938 * a suspended erase state. If so let's wait 932 * a suspended erase state. If so let's wait
939 * until it's done. 933 * until it's done.
940 */ 934 */
941 preempt_disable(); 935 spin_lock(chip->mutex);
942 while (chip->state != newstate) { 936 while (chip->state != newstate) {
943 DECLARE_WAITQUEUE(wait, current); 937 DECLARE_WAITQUEUE(wait, current);
944 set_current_state(TASK_UNINTERRUPTIBLE); 938 set_current_state(TASK_UNINTERRUPTIBLE);
945 add_wait_queue(&chip->wq, &wait); 939 add_wait_queue(&chip->wq, &wait);
946 preempt_enable(); 940 spin_unlock(chip->mutex);
947 schedule(); 941 schedule();
948 remove_wait_queue(&chip->wq, &wait); 942 remove_wait_queue(&chip->wq, &wait);
949 preempt_disable(); 943 spin_lock(chip->mutex);
950 } 944 }
951 /* Disallow XIP again */ 945 /* Disallow XIP again */
952 local_irq_disable(); 946 local_irq_disable();
@@ -975,12 +969,14 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
975 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while 969 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
976 * the flash is actively programming or erasing since we have to poll for 970 * the flash is actively programming or erasing since we have to poll for
977 * the operation to complete anyway. We can't do that in a generic way with 971 * the operation to complete anyway. We can't do that in a generic way with
978 * a XIP setup so do it before the actual flash operation in this case. 972 * a XIP setup so do it before the actual flash operation in this case
973 * and stub it out from INVALIDATE_CACHE_UDELAY.
979 */ 974 */
980#undef INVALIDATE_CACHED_RANGE 975#define XIP_INVAL_CACHED_RANGE(map, from, size) \
981#define INVALIDATE_CACHED_RANGE(x...) 976 INVALIDATE_CACHED_RANGE(map, from, size)
982#define XIP_INVAL_CACHED_RANGE(map, from, size) \ 977
983 do { if(map->inval_cache) map->inval_cache(map, from, size); } while(0) 978#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
979 UDELAY(map, chip, adr, usec)
984 980
985/* 981/*
986 * Extra notes: 982 * Extra notes:
@@ -1003,11 +999,23 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1003 999
1004#define xip_disable(map, chip, adr) 1000#define xip_disable(map, chip, adr)
1005#define xip_enable(map, chip, adr) 1001#define xip_enable(map, chip, adr)
1006
1007#define UDELAY(map, chip, adr, usec) cfi_udelay(usec)
1008
1009#define XIP_INVAL_CACHED_RANGE(x...) 1002#define XIP_INVAL_CACHED_RANGE(x...)
1010 1003
1004#define UDELAY(map, chip, adr, usec) \
1005do { \
1006 spin_unlock(chip->mutex); \
1007 cfi_udelay(usec); \
1008 spin_lock(chip->mutex); \
1009} while (0)
1010
1011#define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1012do { \
1013 spin_unlock(chip->mutex); \
1014 INVALIDATE_CACHED_RANGE(map, adr, len); \
1015 cfi_udelay(usec); \
1016 spin_lock(chip->mutex); \
1017} while (0)
1018
1011#endif 1019#endif
1012 1020
1013static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) 1021static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1227,10 +1235,9 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1227 map_write(map, datum, adr); 1235 map_write(map, datum, adr);
1228 chip->state = mode; 1236 chip->state = mode;
1229 1237
1230 spin_unlock(chip->mutex); 1238 INVALIDATE_CACHE_UDELAY(map, chip,
1231 INVALIDATE_CACHED_RANGE(map, adr, map_bankwidth(map)); 1239 adr, map_bankwidth(map),
1232 UDELAY(map, chip, adr, chip->word_write_time); 1240 chip->word_write_time);
1233 spin_lock(chip->mutex);
1234 1241
1235 timeo = jiffies + (HZ/2); 1242 timeo = jiffies + (HZ/2);
1236 z = 0; 1243 z = 0;
@@ -1263,10 +1270,8 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1263 } 1270 }
1264 1271
1265 /* Latency issues. Drop the lock, wait a while and retry */ 1272 /* Latency issues. Drop the lock, wait a while and retry */
1266 spin_unlock(chip->mutex);
1267 z++; 1273 z++;
1268 UDELAY(map, chip, adr, 1); 1274 UDELAY(map, chip, adr, 1);
1269 spin_lock(chip->mutex);
1270 } 1275 }
1271 if (!z) { 1276 if (!z) {
1272 chip->word_write_time--; 1277 chip->word_write_time--;
@@ -1430,9 +1435,7 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1430 if (map_word_andequal(map, status, status_OK, status_OK)) 1435 if (map_word_andequal(map, status, status_OK, status_OK))
1431 break; 1436 break;
1432 1437
1433 spin_unlock(chip->mutex);
1434 UDELAY(map, chip, cmd_adr, 1); 1438 UDELAY(map, chip, cmd_adr, 1);
1435 spin_lock(chip->mutex);
1436 1439
1437 if (++z > 20) { 1440 if (++z > 20) {
1438 /* Argh. Not ready for write to buffer */ 1441 /* Argh. Not ready for write to buffer */
@@ -1478,10 +1481,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1478 map_write(map, CMD(0xd0), cmd_adr); 1481 map_write(map, CMD(0xd0), cmd_adr);
1479 chip->state = FL_WRITING; 1482 chip->state = FL_WRITING;
1480 1483
1481 spin_unlock(chip->mutex); 1484 INVALIDATE_CACHE_UDELAY(map, chip,
1482 INVALIDATE_CACHED_RANGE(map, adr, len); 1485 cmd_adr, len,
1483 UDELAY(map, chip, cmd_adr, chip->buffer_write_time); 1486 chip->buffer_write_time);
1484 spin_lock(chip->mutex);
1485 1487
1486 timeo = jiffies + (HZ/2); 1488 timeo = jiffies + (HZ/2);
1487 z = 0; 1489 z = 0;
@@ -1513,10 +1515,8 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1513 } 1515 }
1514 1516
1515 /* Latency issues. Drop the lock, wait a while and retry */ 1517 /* Latency issues. Drop the lock, wait a while and retry */
1516 spin_unlock(chip->mutex);
1517 UDELAY(map, chip, cmd_adr, 1);
1518 z++; 1518 z++;
1519 spin_lock(chip->mutex); 1519 UDELAY(map, chip, cmd_adr, 1);
1520 } 1520 }
1521 if (!z) { 1521 if (!z) {
1522 chip->buffer_write_time--; 1522 chip->buffer_write_time--;
@@ -1644,10 +1644,9 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1644 chip->state = FL_ERASING; 1644 chip->state = FL_ERASING;
1645 chip->erase_suspended = 0; 1645 chip->erase_suspended = 0;
1646 1646
1647 spin_unlock(chip->mutex); 1647 INVALIDATE_CACHE_UDELAY(map, chip,
1648 INVALIDATE_CACHED_RANGE(map, adr, len); 1648 adr, len,
1649 UDELAY(map, chip, adr, chip->erase_time*1000/2); 1649 chip->erase_time*1000/2);
1650 spin_lock(chip->mutex);
1651 1650
1652 /* FIXME. Use a timer to check this, and return immediately. */ 1651 /* FIXME. Use a timer to check this, and return immediately. */
1653 /* Once the state machine's known to be working I'll do that */ 1652 /* Once the state machine's known to be working I'll do that */
@@ -1692,9 +1691,7 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1692 } 1691 }
1693 1692
1694 /* Latency issues. Drop the lock, wait a while and retry */ 1693 /* Latency issues. Drop the lock, wait a while and retry */
1695 spin_unlock(chip->mutex);
1696 UDELAY(map, chip, adr, 1000000/HZ); 1694 UDELAY(map, chip, adr, 1000000/HZ);
1697 spin_lock(chip->mutex);
1698 } 1695 }
1699 1696
1700 /* We've broken this before. It doesn't hurt to be safe */ 1697 /* We've broken this before. It doesn't hurt to be safe */
@@ -1866,11 +1863,8 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1866 * to delay. 1863 * to delay.
1867 */ 1864 */
1868 1865
1869 if (!extp || !(extp->FeatureSupport & (1 << 5))) { 1866 if (!extp || !(extp->FeatureSupport & (1 << 5)))
1870 spin_unlock(chip->mutex);
1871 UDELAY(map, chip, adr, 1000000/HZ); 1867 UDELAY(map, chip, adr, 1000000/HZ);
1872 spin_lock(chip->mutex);
1873 }
1874 1868
1875 /* FIXME. Use a timer to check this, and return immediately. */ 1869 /* FIXME. Use a timer to check this, and return immediately. */
1876 /* Once the state machine's known to be working I'll do that */ 1870 /* Once the state machine's known to be working I'll do that */
@@ -1897,9 +1891,7 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip
1897 } 1891 }
1898 1892
1899 /* Latency issues. Drop the lock, wait a while and retry */ 1893 /* Latency issues. Drop the lock, wait a while and retry */
1900 spin_unlock(chip->mutex);
1901 UDELAY(map, chip, adr, 1); 1894 UDELAY(map, chip, adr, 1);
1902 spin_lock(chip->mutex);
1903 } 1895 }
1904 1896
1905 /* Done and happy. */ 1897 /* Done and happy. */
@@ -1979,8 +1971,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1979 } 1971 }
1980 1972
1981 /* let's ensure we're not reading back cached data from array mode */ 1973 /* let's ensure we're not reading back cached data from array mode */
1982 if (map->inval_cache) 1974 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1983 map->inval_cache(map, chip->start + offset, size);
1984 1975
1985 xip_disable(map, chip, chip->start); 1976 xip_disable(map, chip, chip->start);
1986 if (chip->state != FL_JEDEC_QUERY) { 1977 if (chip->state != FL_JEDEC_QUERY) {
@@ -1991,8 +1982,7 @@ do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1991 xip_enable(map, chip, chip->start); 1982 xip_enable(map, chip, chip->start);
1992 1983
1993 /* then ensure we don't keep OTP data in the cache */ 1984 /* then ensure we don't keep OTP data in the cache */
1994 if (map->inval_cache) 1985 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1995 map->inval_cache(map, chip->start + offset, size);
1996 1986
1997 put_chip(map, chip, chip->start); 1987 put_chip(map, chip, chip->start);
1998 spin_unlock(chip->mutex); 1988 spin_unlock(chip->mutex);