diff options
Diffstat (limited to 'drivers/mtd/chips')
-rw-r--r-- | drivers/mtd/chips/Kconfig | 1 | ||||
-rw-r--r-- | drivers/mtd/chips/Makefile | 7 | ||||
-rw-r--r-- | drivers/mtd/chips/amd_flash.c | 8 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0001.c | 474 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0002.c | 22 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_cmdset_0020.c | 22 | ||||
-rw-r--r-- | drivers/mtd/chips/cfi_probe.c | 8 | ||||
-rw-r--r-- | drivers/mtd/chips/gen_probe.c | 45 | ||||
-rw-r--r-- | drivers/mtd/chips/map_ram.c | 2 | ||||
-rw-r--r-- | drivers/mtd/chips/map_rom.c | 4 | ||||
-rw-r--r-- | drivers/mtd/chips/sharp.c | 1 |
11 files changed, 217 insertions, 377 deletions
diff --git a/drivers/mtd/chips/Kconfig b/drivers/mtd/chips/Kconfig index a7ec5954caf5..6d8f30deb868 100644 --- a/drivers/mtd/chips/Kconfig +++ b/drivers/mtd/chips/Kconfig | |||
@@ -30,7 +30,6 @@ config MTD_JEDECPROBE | |||
30 | 30 | ||
31 | config MTD_GEN_PROBE | 31 | config MTD_GEN_PROBE |
32 | tristate | 32 | tristate |
33 | select OBSOLETE_INTERMODULE | ||
34 | 33 | ||
35 | config MTD_CFI_ADV_OPTIONS | 34 | config MTD_CFI_ADV_OPTIONS |
36 | bool "Flash chip driver advanced configuration options" | 35 | bool "Flash chip driver advanced configuration options" |
diff --git a/drivers/mtd/chips/Makefile b/drivers/mtd/chips/Makefile index 8afe3092c4e3..75bc1c2a0f43 100644 --- a/drivers/mtd/chips/Makefile +++ b/drivers/mtd/chips/Makefile | |||
@@ -3,13 +3,6 @@ | |||
3 | # | 3 | # |
4 | # $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $ | 4 | # $Id: Makefile.common,v 1.5 2005/11/07 11:14:22 gleixner Exp $ |
5 | 5 | ||
6 | # *** BIG UGLY NOTE *** | ||
7 | # | ||
8 | # The removal of get_module_symbol() and replacement with | ||
9 | # inter_module_register() et al has introduced a link order dependency | ||
10 | # here where previously there was none. We now have to ensure that | ||
11 | # the CFI command set drivers are linked before gen_probe.o | ||
12 | |||
13 | obj-$(CONFIG_MTD) += chipreg.o | 6 | obj-$(CONFIG_MTD) += chipreg.o |
14 | obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o | 7 | obj-$(CONFIG_MTD_AMDSTD) += amd_flash.o |
15 | obj-$(CONFIG_MTD_CFI) += cfi_probe.o | 8 | obj-$(CONFIG_MTD_CFI) += cfi_probe.o |
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c index 57115618c496..16eaca69fb5a 100644 --- a/drivers/mtd/chips/amd_flash.c +++ b/drivers/mtd/chips/amd_flash.c | |||
@@ -97,7 +97,6 @@ struct amd_flash_private { | |||
97 | int interleave; | 97 | int interleave; |
98 | int numchips; | 98 | int numchips; |
99 | unsigned long chipshift; | 99 | unsigned long chipshift; |
100 | // const char *im_name; | ||
101 | struct flchip chips[0]; | 100 | struct flchip chips[0]; |
102 | }; | 101 | }; |
103 | 102 | ||
@@ -131,12 +130,6 @@ static struct mtd_chip_driver amd_flash_chipdrv = { | |||
131 | .module = THIS_MODULE | 130 | .module = THIS_MODULE |
132 | }; | 131 | }; |
133 | 132 | ||
134 | |||
135 | |||
136 | static const char im_name[] = "amd_flash"; | ||
137 | |||
138 | |||
139 | |||
140 | static inline __u32 wide_read(struct map_info *map, __u32 addr) | 133 | static inline __u32 wide_read(struct map_info *map, __u32 addr) |
141 | { | 134 | { |
142 | if (map->buswidth == 1) { | 135 | if (map->buswidth == 1) { |
@@ -737,6 +730,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map) | |||
737 | offset += dev_size; | 730 | offset += dev_size; |
738 | } | 731 | } |
739 | mtd->type = MTD_NORFLASH; | 732 | mtd->type = MTD_NORFLASH; |
733 | mtd->writesize = 1; | ||
740 | mtd->flags = MTD_CAP_NORFLASH; | 734 | mtd->flags = MTD_CAP_NORFLASH; |
741 | mtd->name = map->name; | 735 | mtd->name = map->name; |
742 | mtd->erase = amd_flash_erase; | 736 | mtd->erase = amd_flash_erase; |
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c index 1c074d63ff3a..0d435814aaa1 100644 --- a/drivers/mtd/chips/cfi_cmdset_0001.c +++ b/drivers/mtd/chips/cfi_cmdset_0001.c | |||
@@ -331,13 +331,6 @@ read_pri_intelext(struct map_info *map, __u16 adr) | |||
331 | return extp; | 331 | return extp; |
332 | } | 332 | } |
333 | 333 | ||
334 | /* This routine is made available to other mtd code via | ||
335 | * inter_module_register. It must only be accessed through | ||
336 | * inter_module_get which will bump the use count of this module. The | ||
337 | * addresses passed back in cfi are valid as long as the use count of | ||
338 | * this module is non-zero, i.e. between inter_module_get and | ||
339 | * inter_module_put. Keith Owens <kaos@ocs.com.au> 29 Oct 2000. | ||
340 | */ | ||
341 | struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | 334 | struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) |
342 | { | 335 | { |
343 | struct cfi_private *cfi = map->fldrv_priv; | 336 | struct cfi_private *cfi = map->fldrv_priv; |
@@ -406,7 +399,7 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
406 | for (i=0; i< cfi->numchips; i++) { | 399 | for (i=0; i< cfi->numchips; i++) { |
407 | cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; | 400 | cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp; |
408 | cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; | 401 | cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp; |
409 | cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp; | 402 | cfi->chips[i].erase_time = 1000<<cfi->cfiq->BlockEraseTimeoutTyp; |
410 | cfi->chips[i].ref_point_counter = 0; | 403 | cfi->chips[i].ref_point_counter = 0; |
411 | init_waitqueue_head(&(cfi->chips[i].wq)); | 404 | init_waitqueue_head(&(cfi->chips[i].wq)); |
412 | } | 405 | } |
@@ -415,6 +408,11 @@ struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary) | |||
415 | 408 | ||
416 | return cfi_intelext_setup(mtd); | 409 | return cfi_intelext_setup(mtd); |
417 | } | 410 | } |
411 | struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); | ||
412 | struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001"))); | ||
413 | EXPORT_SYMBOL_GPL(cfi_cmdset_0001); | ||
414 | EXPORT_SYMBOL_GPL(cfi_cmdset_0003); | ||
415 | EXPORT_SYMBOL_GPL(cfi_cmdset_0200); | ||
418 | 416 | ||
419 | static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) | 417 | static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd) |
420 | { | 418 | { |
@@ -547,12 +545,12 @@ static int cfi_intelext_partition_fixup(struct mtd_info *mtd, | |||
547 | if (extp->MinorVersion >= '4') { | 545 | if (extp->MinorVersion >= '4') { |
548 | struct cfi_intelext_programming_regioninfo *prinfo; | 546 | struct cfi_intelext_programming_regioninfo *prinfo; |
549 | prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; | 547 | prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs]; |
550 | MTD_PROGREGION_SIZE(mtd) = cfi->interleave << prinfo->ProgRegShift; | 548 | mtd->writesize = cfi->interleave << prinfo->ProgRegShift; |
551 | MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid; | 549 | MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid; |
552 | MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid; | 550 | MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid; |
553 | mtd->flags |= MTD_PROGRAM_REGIONS; | 551 | mtd->flags &= ~MTD_BIT_WRITEABLE; |
554 | printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", | 552 | printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n", |
555 | map->name, MTD_PROGREGION_SIZE(mtd), | 553 | map->name, mtd->writesize, |
556 | MTD_PROGREGION_CTRLMODE_VALID(mtd), | 554 | MTD_PROGREGION_CTRLMODE_VALID(mtd), |
557 | MTD_PROGREGION_CTRLMODE_INVALID(mtd)); | 555 | MTD_PROGREGION_CTRLMODE_INVALID(mtd)); |
558 | } | 556 | } |
@@ -896,26 +894,33 @@ static void __xipram xip_enable(struct map_info *map, struct flchip *chip, | |||
896 | 894 | ||
897 | /* | 895 | /* |
898 | * When a delay is required for the flash operation to complete, the | 896 | * When a delay is required for the flash operation to complete, the |
899 | * xip_udelay() function is polling for both the given timeout and pending | 897 | * xip_wait_for_operation() function is polling for both the given timeout |
900 | * (but still masked) hardware interrupts. Whenever there is an interrupt | 898 | * and pending (but still masked) hardware interrupts. Whenever there is an |
901 | * pending then the flash erase or write operation is suspended, array mode | 899 | * interrupt pending then the flash erase or write operation is suspended, |
902 | * restored and interrupts unmasked. Task scheduling might also happen at that | 900 | * array mode restored and interrupts unmasked. Task scheduling might also |
903 | * point. The CPU eventually returns from the interrupt or the call to | 901 | * happen at that point. The CPU eventually returns from the interrupt or |
904 | * schedule() and the suspended flash operation is resumed for the remaining | 902 | * the call to schedule() and the suspended flash operation is resumed for |
905 | * of the delay period. | 903 | * the remaining of the delay period. |
906 | * | 904 | * |
907 | * Warning: this function _will_ fool interrupt latency tracing tools. | 905 | * Warning: this function _will_ fool interrupt latency tracing tools. |
908 | */ | 906 | */ |
909 | 907 | ||
910 | static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | 908 | static int __xipram xip_wait_for_operation( |
911 | unsigned long adr, int usec) | 909 | struct map_info *map, struct flchip *chip, |
910 | unsigned long adr, int *chip_op_time ) | ||
912 | { | 911 | { |
913 | struct cfi_private *cfi = map->fldrv_priv; | 912 | struct cfi_private *cfi = map->fldrv_priv; |
914 | struct cfi_pri_intelext *cfip = cfi->cmdset_priv; | 913 | struct cfi_pri_intelext *cfip = cfi->cmdset_priv; |
915 | map_word status, OK = CMD(0x80); | 914 | map_word status, OK = CMD(0x80); |
916 | unsigned long suspended, start = xip_currtime(); | 915 | unsigned long usec, suspended, start, done; |
917 | flstate_t oldstate, newstate; | 916 | flstate_t oldstate, newstate; |
918 | 917 | ||
918 | start = xip_currtime(); | ||
919 | usec = *chip_op_time * 8; | ||
920 | if (usec == 0) | ||
921 | usec = 500000; | ||
922 | done = 0; | ||
923 | |||
919 | do { | 924 | do { |
920 | cpu_relax(); | 925 | cpu_relax(); |
921 | if (xip_irqpending() && cfip && | 926 | if (xip_irqpending() && cfip && |
@@ -932,9 +937,9 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
932 | * we resume the whole thing at once). Yes, it | 937 | * we resume the whole thing at once). Yes, it |
933 | * can happen! | 938 | * can happen! |
934 | */ | 939 | */ |
940 | usec -= done; | ||
935 | map_write(map, CMD(0xb0), adr); | 941 | map_write(map, CMD(0xb0), adr); |
936 | map_write(map, CMD(0x70), adr); | 942 | map_write(map, CMD(0x70), adr); |
937 | usec -= xip_elapsed_since(start); | ||
938 | suspended = xip_currtime(); | 943 | suspended = xip_currtime(); |
939 | do { | 944 | do { |
940 | if (xip_elapsed_since(suspended) > 100000) { | 945 | if (xip_elapsed_since(suspended) > 100000) { |
@@ -944,7 +949,7 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
944 | * This is a critical error but there | 949 | * This is a critical error but there |
945 | * is not much we can do here. | 950 | * is not much we can do here. |
946 | */ | 951 | */ |
947 | return; | 952 | return -EIO; |
948 | } | 953 | } |
949 | status = map_read(map, adr); | 954 | status = map_read(map, adr); |
950 | } while (!map_word_andequal(map, status, OK, OK)); | 955 | } while (!map_word_andequal(map, status, OK, OK)); |
@@ -1004,65 +1009,107 @@ static void __xipram xip_udelay(struct map_info *map, struct flchip *chip, | |||
1004 | xip_cpu_idle(); | 1009 | xip_cpu_idle(); |
1005 | } | 1010 | } |
1006 | status = map_read(map, adr); | 1011 | status = map_read(map, adr); |
1012 | done = xip_elapsed_since(start); | ||
1007 | } while (!map_word_andequal(map, status, OK, OK) | 1013 | } while (!map_word_andequal(map, status, OK, OK) |
1008 | && xip_elapsed_since(start) < usec); | 1014 | && done < usec); |
1009 | } | ||
1010 | 1015 | ||
1011 | #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec) | 1016 | return (done >= usec) ? -ETIME : 0; |
1017 | } | ||
1012 | 1018 | ||
1013 | /* | 1019 | /* |
1014 | * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while | 1020 | * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while |
1015 | * the flash is actively programming or erasing since we have to poll for | 1021 | * the flash is actively programming or erasing since we have to poll for |
1016 | * the operation to complete anyway. We can't do that in a generic way with | 1022 | * the operation to complete anyway. We can't do that in a generic way with |
1017 | * a XIP setup so do it before the actual flash operation in this case | 1023 | * a XIP setup so do it before the actual flash operation in this case |
1018 | * and stub it out from INVALIDATE_CACHE_UDELAY. | 1024 | * and stub it out from INVAL_CACHE_AND_WAIT. |
1019 | */ | 1025 | */ |
1020 | #define XIP_INVAL_CACHED_RANGE(map, from, size) \ | 1026 | #define XIP_INVAL_CACHED_RANGE(map, from, size) \ |
1021 | INVALIDATE_CACHED_RANGE(map, from, size) | 1027 | INVALIDATE_CACHED_RANGE(map, from, size) |
1022 | 1028 | ||
1023 | #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ | 1029 | #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \ |
1024 | UDELAY(map, chip, cmd_adr, usec) | 1030 | xip_wait_for_operation(map, chip, cmd_adr, p_usec) |
1025 | |||
1026 | /* | ||
1027 | * Extra notes: | ||
1028 | * | ||
1029 | * Activating this XIP support changes the way the code works a bit. For | ||
1030 | * example the code to suspend the current process when concurrent access | ||
1031 | * happens is never executed because xip_udelay() will always return with the | ||
1032 | * same chip state as it was entered with. This is why there is no care for | ||
1033 | * the presence of add_wait_queue() or schedule() calls from within a couple | ||
1034 | * xip_disable()'d areas of code, like in do_erase_oneblock for example. | ||
1035 | * The queueing and scheduling are always happening within xip_udelay(). | ||
1036 | * | ||
1037 | * Similarly, get_chip() and put_chip() just happen to always be executed | ||
1038 | * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state | ||
1039 | * is in array mode, therefore never executing many cases therein and not | ||
1040 | * causing any problem with XIP. | ||
1041 | */ | ||
1042 | 1031 | ||
1043 | #else | 1032 | #else |
1044 | 1033 | ||
1045 | #define xip_disable(map, chip, adr) | 1034 | #define xip_disable(map, chip, adr) |
1046 | #define xip_enable(map, chip, adr) | 1035 | #define xip_enable(map, chip, adr) |
1047 | #define XIP_INVAL_CACHED_RANGE(x...) | 1036 | #define XIP_INVAL_CACHED_RANGE(x...) |
1037 | #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation | ||
1038 | |||
1039 | static int inval_cache_and_wait_for_operation( | ||
1040 | struct map_info *map, struct flchip *chip, | ||
1041 | unsigned long cmd_adr, unsigned long inval_adr, int inval_len, | ||
1042 | int *chip_op_time ) | ||
1043 | { | ||
1044 | struct cfi_private *cfi = map->fldrv_priv; | ||
1045 | map_word status, status_OK = CMD(0x80); | ||
1046 | int z, chip_state = chip->state; | ||
1047 | unsigned long timeo; | ||
1048 | |||
1049 | spin_unlock(chip->mutex); | ||
1050 | if (inval_len) | ||
1051 | INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len); | ||
1052 | if (*chip_op_time) | ||
1053 | cfi_udelay(*chip_op_time); | ||
1054 | spin_lock(chip->mutex); | ||
1055 | |||
1056 | timeo = *chip_op_time * 8 * HZ / 1000000; | ||
1057 | if (timeo < HZ/2) | ||
1058 | timeo = HZ/2; | ||
1059 | timeo += jiffies; | ||
1060 | |||
1061 | z = 0; | ||
1062 | for (;;) { | ||
1063 | if (chip->state != chip_state) { | ||
1064 | /* Someone's suspended the operation: sleep */ | ||
1065 | DECLARE_WAITQUEUE(wait, current); | ||
1066 | |||
1067 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1068 | add_wait_queue(&chip->wq, &wait); | ||
1069 | spin_unlock(chip->mutex); | ||
1070 | schedule(); | ||
1071 | remove_wait_queue(&chip->wq, &wait); | ||
1072 | timeo = jiffies + (HZ / 2); /* FIXME */ | ||
1073 | spin_lock(chip->mutex); | ||
1074 | continue; | ||
1075 | } | ||
1048 | 1076 | ||
1049 | #define UDELAY(map, chip, adr, usec) \ | 1077 | status = map_read(map, cmd_adr); |
1050 | do { \ | 1078 | if (map_word_andequal(map, status, status_OK, status_OK)) |
1051 | spin_unlock(chip->mutex); \ | 1079 | break; |
1052 | cfi_udelay(usec); \ | 1080 | |
1053 | spin_lock(chip->mutex); \ | 1081 | /* OK Still waiting */ |
1054 | } while (0) | 1082 | if (time_after(jiffies, timeo)) { |
1055 | 1083 | map_write(map, CMD(0x70), cmd_adr); | |
1056 | #define INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, adr, len, usec) \ | 1084 | chip->state = FL_STATUS; |
1057 | do { \ | 1085 | return -ETIME; |
1058 | spin_unlock(chip->mutex); \ | 1086 | } |
1059 | INVALIDATE_CACHED_RANGE(map, adr, len); \ | 1087 | |
1060 | cfi_udelay(usec); \ | 1088 | /* Latency issues. Drop the lock, wait a while and retry */ |
1061 | spin_lock(chip->mutex); \ | 1089 | z++; |
1062 | } while (0) | 1090 | spin_unlock(chip->mutex); |
1091 | cfi_udelay(1); | ||
1092 | spin_lock(chip->mutex); | ||
1093 | } | ||
1094 | |||
1095 | if (!z) { | ||
1096 | if (!--(*chip_op_time)) | ||
1097 | *chip_op_time = 1; | ||
1098 | } else if (z > 1) | ||
1099 | ++(*chip_op_time); | ||
1100 | |||
1101 | /* Done and happy. */ | ||
1102 | chip->state = FL_STATUS; | ||
1103 | return 0; | ||
1104 | } | ||
1063 | 1105 | ||
1064 | #endif | 1106 | #endif |
1065 | 1107 | ||
1108 | #define WAIT_TIMEOUT(map, chip, adr, udelay) \ | ||
1109 | ({ int __udelay = (udelay); \ | ||
1110 | INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); }) | ||
1111 | |||
1112 | |||
1066 | static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) | 1113 | static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len) |
1067 | { | 1114 | { |
1068 | unsigned long cmd_addr; | 1115 | unsigned long cmd_addr; |
@@ -1252,14 +1299,11 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1252 | unsigned long adr, map_word datum, int mode) | 1299 | unsigned long adr, map_word datum, int mode) |
1253 | { | 1300 | { |
1254 | struct cfi_private *cfi = map->fldrv_priv; | 1301 | struct cfi_private *cfi = map->fldrv_priv; |
1255 | map_word status, status_OK, write_cmd; | 1302 | map_word status, write_cmd; |
1256 | unsigned long timeo; | 1303 | int ret=0; |
1257 | int z, ret=0; | ||
1258 | 1304 | ||
1259 | adr += chip->start; | 1305 | adr += chip->start; |
1260 | 1306 | ||
1261 | /* Let's determine those according to the interleave only once */ | ||
1262 | status_OK = CMD(0x80); | ||
1263 | switch (mode) { | 1307 | switch (mode) { |
1264 | case FL_WRITING: | 1308 | case FL_WRITING: |
1265 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); | 1309 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41); |
@@ -1285,57 +1329,17 @@ static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, | |||
1285 | map_write(map, datum, adr); | 1329 | map_write(map, datum, adr); |
1286 | chip->state = mode; | 1330 | chip->state = mode; |
1287 | 1331 | ||
1288 | INVALIDATE_CACHE_UDELAY(map, chip, adr, | 1332 | ret = INVAL_CACHE_AND_WAIT(map, chip, adr, |
1289 | adr, map_bankwidth(map), | 1333 | adr, map_bankwidth(map), |
1290 | chip->word_write_time); | 1334 | &chip->word_write_time); |
1291 | 1335 | if (ret) { | |
1292 | timeo = jiffies + (HZ/2); | 1336 | xip_enable(map, chip, adr); |
1293 | z = 0; | 1337 | printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); |
1294 | for (;;) { | 1338 | goto out; |
1295 | if (chip->state != mode) { | ||
1296 | /* Someone's suspended the write. Sleep */ | ||
1297 | DECLARE_WAITQUEUE(wait, current); | ||
1298 | |||
1299 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1300 | add_wait_queue(&chip->wq, &wait); | ||
1301 | spin_unlock(chip->mutex); | ||
1302 | schedule(); | ||
1303 | remove_wait_queue(&chip->wq, &wait); | ||
1304 | timeo = jiffies + (HZ / 2); /* FIXME */ | ||
1305 | spin_lock(chip->mutex); | ||
1306 | continue; | ||
1307 | } | ||
1308 | |||
1309 | status = map_read(map, adr); | ||
1310 | if (map_word_andequal(map, status, status_OK, status_OK)) | ||
1311 | break; | ||
1312 | |||
1313 | /* OK Still waiting */ | ||
1314 | if (time_after(jiffies, timeo)) { | ||
1315 | map_write(map, CMD(0x70), adr); | ||
1316 | chip->state = FL_STATUS; | ||
1317 | xip_enable(map, chip, adr); | ||
1318 | printk(KERN_ERR "%s: word write error (status timeout)\n", map->name); | ||
1319 | ret = -EIO; | ||
1320 | goto out; | ||
1321 | } | ||
1322 | |||
1323 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1324 | z++; | ||
1325 | UDELAY(map, chip, adr, 1); | ||
1326 | } | ||
1327 | if (!z) { | ||
1328 | chip->word_write_time--; | ||
1329 | if (!chip->word_write_time) | ||
1330 | chip->word_write_time = 1; | ||
1331 | } | 1339 | } |
1332 | if (z > 1) | ||
1333 | chip->word_write_time++; | ||
1334 | |||
1335 | /* Done and happy. */ | ||
1336 | chip->state = FL_STATUS; | ||
1337 | 1340 | ||
1338 | /* check for errors */ | 1341 | /* check for errors */ |
1342 | status = map_read(map, adr); | ||
1339 | if (map_word_bitsset(map, status, CMD(0x1a))) { | 1343 | if (map_word_bitsset(map, status, CMD(0x1a))) { |
1340 | unsigned long chipstatus = MERGESTATUS(status); | 1344 | unsigned long chipstatus = MERGESTATUS(status); |
1341 | 1345 | ||
@@ -1452,9 +1456,9 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1452 | unsigned long *pvec_seek, int len) | 1456 | unsigned long *pvec_seek, int len) |
1453 | { | 1457 | { |
1454 | struct cfi_private *cfi = map->fldrv_priv; | 1458 | struct cfi_private *cfi = map->fldrv_priv; |
1455 | map_word status, status_OK, write_cmd, datum; | 1459 | map_word status, write_cmd, datum; |
1456 | unsigned long cmd_adr, timeo; | 1460 | unsigned long cmd_adr; |
1457 | int wbufsize, z, ret=0, word_gap, words; | 1461 | int ret, wbufsize, word_gap, words; |
1458 | const struct kvec *vec; | 1462 | const struct kvec *vec; |
1459 | unsigned long vec_seek; | 1463 | unsigned long vec_seek; |
1460 | 1464 | ||
@@ -1463,7 +1467,6 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1463 | cmd_adr = adr & ~(wbufsize-1); | 1467 | cmd_adr = adr & ~(wbufsize-1); |
1464 | 1468 | ||
1465 | /* Let's determine this according to the interleave only once */ | 1469 | /* Let's determine this according to the interleave only once */ |
1466 | status_OK = CMD(0x80); | ||
1467 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); | 1470 | write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9); |
1468 | 1471 | ||
1469 | spin_lock(chip->mutex); | 1472 | spin_lock(chip->mutex); |
@@ -1477,12 +1480,14 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1477 | ENABLE_VPP(map); | 1480 | ENABLE_VPP(map); |
1478 | xip_disable(map, chip, cmd_adr); | 1481 | xip_disable(map, chip, cmd_adr); |
1479 | 1482 | ||
1480 | /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set | 1483 | /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set |
1481 | [...], the device will not accept any more Write to Buffer commands". | 1484 | [...], the device will not accept any more Write to Buffer commands". |
1482 | So we must check here and reset those bits if they're set. Otherwise | 1485 | So we must check here and reset those bits if they're set. Otherwise |
1483 | we're just pissing in the wind */ | 1486 | we're just pissing in the wind */ |
1484 | if (chip->state != FL_STATUS) | 1487 | if (chip->state != FL_STATUS) { |
1485 | map_write(map, CMD(0x70), cmd_adr); | 1488 | map_write(map, CMD(0x70), cmd_adr); |
1489 | chip->state = FL_STATUS; | ||
1490 | } | ||
1486 | status = map_read(map, cmd_adr); | 1491 | status = map_read(map, cmd_adr); |
1487 | if (map_word_bitsset(map, status, CMD(0x30))) { | 1492 | if (map_word_bitsset(map, status, CMD(0x30))) { |
1488 | xip_enable(map, chip, cmd_adr); | 1493 | xip_enable(map, chip, cmd_adr); |
@@ -1493,32 +1498,20 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1493 | } | 1498 | } |
1494 | 1499 | ||
1495 | chip->state = FL_WRITING_TO_BUFFER; | 1500 | chip->state = FL_WRITING_TO_BUFFER; |
1496 | 1501 | map_write(map, write_cmd, cmd_adr); | |
1497 | z = 0; | 1502 | ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0); |
1498 | for (;;) { | 1503 | if (ret) { |
1499 | map_write(map, write_cmd, cmd_adr); | 1504 | /* Argh. Not ready for write to buffer */ |
1500 | 1505 | map_word Xstatus = map_read(map, cmd_adr); | |
1506 | map_write(map, CMD(0x70), cmd_adr); | ||
1507 | chip->state = FL_STATUS; | ||
1501 | status = map_read(map, cmd_adr); | 1508 | status = map_read(map, cmd_adr); |
1502 | if (map_word_andequal(map, status, status_OK, status_OK)) | 1509 | map_write(map, CMD(0x50), cmd_adr); |
1503 | break; | 1510 | map_write(map, CMD(0x70), cmd_adr); |
1504 | 1511 | xip_enable(map, chip, cmd_adr); | |
1505 | UDELAY(map, chip, cmd_adr, 1); | 1512 | printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n", |
1506 | 1513 | map->name, Xstatus.x[0], status.x[0]); | |
1507 | if (++z > 20) { | 1514 | goto out; |
1508 | /* Argh. Not ready for write to buffer */ | ||
1509 | map_word Xstatus; | ||
1510 | map_write(map, CMD(0x70), cmd_adr); | ||
1511 | chip->state = FL_STATUS; | ||
1512 | Xstatus = map_read(map, cmd_adr); | ||
1513 | /* Odd. Clear status bits */ | ||
1514 | map_write(map, CMD(0x50), cmd_adr); | ||
1515 | map_write(map, CMD(0x70), cmd_adr); | ||
1516 | xip_enable(map, chip, cmd_adr); | ||
1517 | printk(KERN_ERR "%s: Chip not ready for buffer write. status = %lx, Xstatus = %lx\n", | ||
1518 | map->name, status.x[0], Xstatus.x[0]); | ||
1519 | ret = -EIO; | ||
1520 | goto out; | ||
1521 | } | ||
1522 | } | 1515 | } |
1523 | 1516 | ||
1524 | /* Figure out the number of words to write */ | 1517 | /* Figure out the number of words to write */ |
@@ -1573,56 +1566,19 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip, | |||
1573 | map_write(map, CMD(0xd0), cmd_adr); | 1566 | map_write(map, CMD(0xd0), cmd_adr); |
1574 | chip->state = FL_WRITING; | 1567 | chip->state = FL_WRITING; |
1575 | 1568 | ||
1576 | INVALIDATE_CACHE_UDELAY(map, chip, cmd_adr, | 1569 | ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, |
1577 | adr, len, | 1570 | adr, len, |
1578 | chip->buffer_write_time); | 1571 | &chip->buffer_write_time); |
1579 | 1572 | if (ret) { | |
1580 | timeo = jiffies + (HZ/2); | 1573 | map_write(map, CMD(0x70), cmd_adr); |
1581 | z = 0; | 1574 | chip->state = FL_STATUS; |
1582 | for (;;) { | 1575 | xip_enable(map, chip, cmd_adr); |
1583 | if (chip->state != FL_WRITING) { | 1576 | printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name); |
1584 | /* Someone's suspended the write. Sleep */ | 1577 | goto out; |
1585 | DECLARE_WAITQUEUE(wait, current); | ||
1586 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1587 | add_wait_queue(&chip->wq, &wait); | ||
1588 | spin_unlock(chip->mutex); | ||
1589 | schedule(); | ||
1590 | remove_wait_queue(&chip->wq, &wait); | ||
1591 | timeo = jiffies + (HZ / 2); /* FIXME */ | ||
1592 | spin_lock(chip->mutex); | ||
1593 | continue; | ||
1594 | } | ||
1595 | |||
1596 | status = map_read(map, cmd_adr); | ||
1597 | if (map_word_andequal(map, status, status_OK, status_OK)) | ||
1598 | break; | ||
1599 | |||
1600 | /* OK Still waiting */ | ||
1601 | if (time_after(jiffies, timeo)) { | ||
1602 | map_write(map, CMD(0x70), cmd_adr); | ||
1603 | chip->state = FL_STATUS; | ||
1604 | xip_enable(map, chip, cmd_adr); | ||
1605 | printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name); | ||
1606 | ret = -EIO; | ||
1607 | goto out; | ||
1608 | } | ||
1609 | |||
1610 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1611 | z++; | ||
1612 | UDELAY(map, chip, cmd_adr, 1); | ||
1613 | } | ||
1614 | if (!z) { | ||
1615 | chip->buffer_write_time--; | ||
1616 | if (!chip->buffer_write_time) | ||
1617 | chip->buffer_write_time = 1; | ||
1618 | } | 1578 | } |
1619 | if (z > 1) | ||
1620 | chip->buffer_write_time++; | ||
1621 | |||
1622 | /* Done and happy. */ | ||
1623 | chip->state = FL_STATUS; | ||
1624 | 1579 | ||
1625 | /* check for errors */ | 1580 | /* check for errors */ |
1581 | status = map_read(map, cmd_adr); | ||
1626 | if (map_word_bitsset(map, status, CMD(0x1a))) { | 1582 | if (map_word_bitsset(map, status, CMD(0x1a))) { |
1627 | unsigned long chipstatus = MERGESTATUS(status); | 1583 | unsigned long chipstatus = MERGESTATUS(status); |
1628 | 1584 | ||
@@ -1693,6 +1649,11 @@ static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs, | |||
1693 | if (chipnum == cfi->numchips) | 1649 | if (chipnum == cfi->numchips) |
1694 | return 0; | 1650 | return 0; |
1695 | } | 1651 | } |
1652 | |||
1653 | /* Be nice and reschedule with the chip in a usable state for other | ||
1654 | processes. */ | ||
1655 | cond_resched(); | ||
1656 | |||
1696 | } while (len); | 1657 | } while (len); |
1697 | 1658 | ||
1698 | return 0; | 1659 | return 0; |
@@ -1713,17 +1674,12 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1713 | unsigned long adr, int len, void *thunk) | 1674 | unsigned long adr, int len, void *thunk) |
1714 | { | 1675 | { |
1715 | struct cfi_private *cfi = map->fldrv_priv; | 1676 | struct cfi_private *cfi = map->fldrv_priv; |
1716 | map_word status, status_OK; | 1677 | map_word status; |
1717 | unsigned long timeo; | ||
1718 | int retries = 3; | 1678 | int retries = 3; |
1719 | DECLARE_WAITQUEUE(wait, current); | 1679 | int ret; |
1720 | int ret = 0; | ||
1721 | 1680 | ||
1722 | adr += chip->start; | 1681 | adr += chip->start; |
1723 | 1682 | ||
1724 | /* Let's determine this according to the interleave only once */ | ||
1725 | status_OK = CMD(0x80); | ||
1726 | |||
1727 | retry: | 1683 | retry: |
1728 | spin_lock(chip->mutex); | 1684 | spin_lock(chip->mutex); |
1729 | ret = get_chip(map, chip, adr, FL_ERASING); | 1685 | ret = get_chip(map, chip, adr, FL_ERASING); |
@@ -1745,48 +1701,15 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1745 | chip->state = FL_ERASING; | 1701 | chip->state = FL_ERASING; |
1746 | chip->erase_suspended = 0; | 1702 | chip->erase_suspended = 0; |
1747 | 1703 | ||
1748 | INVALIDATE_CACHE_UDELAY(map, chip, adr, | 1704 | ret = INVAL_CACHE_AND_WAIT(map, chip, adr, |
1749 | adr, len, | 1705 | adr, len, |
1750 | chip->erase_time*1000/2); | 1706 | &chip->erase_time); |
1751 | 1707 | if (ret) { | |
1752 | /* FIXME. Use a timer to check this, and return immediately. */ | 1708 | map_write(map, CMD(0x70), adr); |
1753 | /* Once the state machine's known to be working I'll do that */ | 1709 | chip->state = FL_STATUS; |
1754 | 1710 | xip_enable(map, chip, adr); | |
1755 | timeo = jiffies + (HZ*20); | 1711 | printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name); |
1756 | for (;;) { | 1712 | goto out; |
1757 | if (chip->state != FL_ERASING) { | ||
1758 | /* Someone's suspended the erase. Sleep */ | ||
1759 | set_current_state(TASK_UNINTERRUPTIBLE); | ||
1760 | add_wait_queue(&chip->wq, &wait); | ||
1761 | spin_unlock(chip->mutex); | ||
1762 | schedule(); | ||
1763 | remove_wait_queue(&chip->wq, &wait); | ||
1764 | spin_lock(chip->mutex); | ||
1765 | continue; | ||
1766 | } | ||
1767 | if (chip->erase_suspended) { | ||
1768 | /* This erase was suspended and resumed. | ||
1769 | Adjust the timeout */ | ||
1770 | timeo = jiffies + (HZ*20); /* FIXME */ | ||
1771 | chip->erase_suspended = 0; | ||
1772 | } | ||
1773 | |||
1774 | status = map_read(map, adr); | ||
1775 | if (map_word_andequal(map, status, status_OK, status_OK)) | ||
1776 | break; | ||
1777 | |||
1778 | /* OK Still waiting */ | ||
1779 | if (time_after(jiffies, timeo)) { | ||
1780 | map_write(map, CMD(0x70), adr); | ||
1781 | chip->state = FL_STATUS; | ||
1782 | xip_enable(map, chip, adr); | ||
1783 | printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name); | ||
1784 | ret = -EIO; | ||
1785 | goto out; | ||
1786 | } | ||
1787 | |||
1788 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1789 | UDELAY(map, chip, adr, 1000000/HZ); | ||
1790 | } | 1713 | } |
1791 | 1714 | ||
1792 | /* We've broken this before. It doesn't hurt to be safe */ | 1715 | /* We've broken this before. It doesn't hurt to be safe */ |
@@ -1815,7 +1738,6 @@ static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, | |||
1815 | ret = -EIO; | 1738 | ret = -EIO; |
1816 | } else if (chipstatus & 0x20 && retries--) { | 1739 | } else if (chipstatus & 0x20 && retries--) { |
1817 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); | 1740 | printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus); |
1818 | timeo = jiffies + HZ; | ||
1819 | put_chip(map, chip, adr); | 1741 | put_chip(map, chip, adr); |
1820 | spin_unlock(chip->mutex); | 1742 | spin_unlock(chip->mutex); |
1821 | goto retry; | 1743 | goto retry; |
@@ -1921,15 +1843,11 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
1921 | { | 1843 | { |
1922 | struct cfi_private *cfi = map->fldrv_priv; | 1844 | struct cfi_private *cfi = map->fldrv_priv; |
1923 | struct cfi_pri_intelext *extp = cfi->cmdset_priv; | 1845 | struct cfi_pri_intelext *extp = cfi->cmdset_priv; |
1924 | map_word status, status_OK; | 1846 | int udelay; |
1925 | unsigned long timeo = jiffies + HZ; | ||
1926 | int ret; | 1847 | int ret; |
1927 | 1848 | ||
1928 | adr += chip->start; | 1849 | adr += chip->start; |
1929 | 1850 | ||
1930 | /* Let's determine this according to the interleave only once */ | ||
1931 | status_OK = CMD(0x80); | ||
1932 | |||
1933 | spin_lock(chip->mutex); | 1851 | spin_lock(chip->mutex); |
1934 | ret = get_chip(map, chip, adr, FL_LOCKING); | 1852 | ret = get_chip(map, chip, adr, FL_LOCKING); |
1935 | if (ret) { | 1853 | if (ret) { |
@@ -1954,41 +1872,21 @@ static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip | |||
1954 | * If Instant Individual Block Locking supported then no need | 1872 | * If Instant Individual Block Locking supported then no need |
1955 | * to delay. | 1873 | * to delay. |
1956 | */ | 1874 | */ |
1875 | udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0; | ||
1957 | 1876 | ||
1958 | if (!extp || !(extp->FeatureSupport & (1 << 5))) | 1877 | ret = WAIT_TIMEOUT(map, chip, adr, udelay); |
1959 | UDELAY(map, chip, adr, 1000000/HZ); | 1878 | if (ret) { |
1960 | 1879 | map_write(map, CMD(0x70), adr); | |
1961 | /* FIXME. Use a timer to check this, and return immediately. */ | 1880 | chip->state = FL_STATUS; |
1962 | /* Once the state machine's known to be working I'll do that */ | 1881 | xip_enable(map, chip, adr); |
1963 | 1882 | printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name); | |
1964 | timeo = jiffies + (HZ*20); | 1883 | goto out; |
1965 | for (;;) { | ||
1966 | |||
1967 | status = map_read(map, adr); | ||
1968 | if (map_word_andequal(map, status, status_OK, status_OK)) | ||
1969 | break; | ||
1970 | |||
1971 | /* OK Still waiting */ | ||
1972 | if (time_after(jiffies, timeo)) { | ||
1973 | map_write(map, CMD(0x70), adr); | ||
1974 | chip->state = FL_STATUS; | ||
1975 | xip_enable(map, chip, adr); | ||
1976 | printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name); | ||
1977 | put_chip(map, chip, adr); | ||
1978 | spin_unlock(chip->mutex); | ||
1979 | return -EIO; | ||
1980 | } | ||
1981 | |||
1982 | /* Latency issues. Drop the lock, wait a while and retry */ | ||
1983 | UDELAY(map, chip, adr, 1); | ||
1984 | } | 1884 | } |
1985 | 1885 | ||
1986 | /* Done and happy. */ | ||
1987 | chip->state = FL_STATUS; | ||
1988 | xip_enable(map, chip, adr); | 1886 | xip_enable(map, chip, adr); |
1989 | put_chip(map, chip, adr); | 1887 | out: put_chip(map, chip, adr); |
1990 | spin_unlock(chip->mutex); | 1888 | spin_unlock(chip->mutex); |
1991 | return 0; | 1889 | return ret; |
1992 | } | 1890 | } |
1993 | 1891 | ||
1994 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) | 1892 | static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len) |
@@ -2445,28 +2343,8 @@ static void cfi_intelext_destroy(struct mtd_info *mtd) | |||
2445 | kfree(mtd->eraseregions); | 2343 | kfree(mtd->eraseregions); |
2446 | } | 2344 | } |
2447 | 2345 | ||
2448 | static char im_name_0001[] = "cfi_cmdset_0001"; | ||
2449 | static char im_name_0003[] = "cfi_cmdset_0003"; | ||
2450 | static char im_name_0200[] = "cfi_cmdset_0200"; | ||
2451 | |||
2452 | static int __init cfi_intelext_init(void) | ||
2453 | { | ||
2454 | inter_module_register(im_name_0001, THIS_MODULE, &cfi_cmdset_0001); | ||
2455 | inter_module_register(im_name_0003, THIS_MODULE, &cfi_cmdset_0001); | ||
2456 | inter_module_register(im_name_0200, THIS_MODULE, &cfi_cmdset_0001); | ||
2457 | return 0; | ||
2458 | } | ||
2459 | |||
2460 | static void __exit cfi_intelext_exit(void) | ||
2461 | { | ||
2462 | inter_module_unregister(im_name_0001); | ||
2463 | inter_module_unregister(im_name_0003); | ||
2464 | inter_module_unregister(im_name_0200); | ||
2465 | } | ||
2466 | |||
2467 | module_init(cfi_intelext_init); | ||
2468 | module_exit(cfi_intelext_exit); | ||
2469 | |||
2470 | MODULE_LICENSE("GPL"); | 2346 | MODULE_LICENSE("GPL"); |
2471 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); | 2347 | MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al."); |
2472 | MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips"); | 2348 | MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips"); |
2349 | MODULE_ALIAS("cfi_cmdset_0003"); | ||
2350 | MODULE_ALIAS("cfi_cmdset_0200"); | ||
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c index aed10bd5c3c3..1e01ad38b26e 100644 --- a/drivers/mtd/chips/cfi_cmdset_0002.c +++ b/drivers/mtd/chips/cfi_cmdset_0002.c | |||
@@ -236,6 +236,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
236 | mtd->resume = cfi_amdstd_resume; | 236 | mtd->resume = cfi_amdstd_resume; |
237 | mtd->flags = MTD_CAP_NORFLASH; | 237 | mtd->flags = MTD_CAP_NORFLASH; |
238 | mtd->name = map->name; | 238 | mtd->name = map->name; |
239 | mtd->writesize = 1; | ||
239 | 240 | ||
240 | if (cfi->cfi_mode==CFI_MODE_CFI){ | 241 | if (cfi->cfi_mode==CFI_MODE_CFI){ |
241 | unsigned char bootloc; | 242 | unsigned char bootloc; |
@@ -326,7 +327,7 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary) | |||
326 | 327 | ||
327 | return cfi_amdstd_setup(mtd); | 328 | return cfi_amdstd_setup(mtd); |
328 | } | 329 | } |
329 | 330 | EXPORT_SYMBOL_GPL(cfi_cmdset_0002); | |
330 | 331 | ||
331 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) | 332 | static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd) |
332 | { | 333 | { |
@@ -1758,25 +1759,6 @@ static void cfi_amdstd_destroy(struct mtd_info *mtd) | |||
1758 | kfree(mtd->eraseregions); | 1759 | kfree(mtd->eraseregions); |
1759 | } | 1760 | } |
1760 | 1761 | ||
1761 | static char im_name[]="cfi_cmdset_0002"; | ||
1762 | |||
1763 | |||
1764 | static int __init cfi_amdstd_init(void) | ||
1765 | { | ||
1766 | inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0002); | ||
1767 | return 0; | ||
1768 | } | ||
1769 | |||
1770 | |||
1771 | static void __exit cfi_amdstd_exit(void) | ||
1772 | { | ||
1773 | inter_module_unregister(im_name); | ||
1774 | } | ||
1775 | |||
1776 | |||
1777 | module_init(cfi_amdstd_init); | ||
1778 | module_exit(cfi_amdstd_exit); | ||
1779 | |||
1780 | MODULE_LICENSE("GPL"); | 1762 | MODULE_LICENSE("GPL"); |
1781 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); | 1763 | MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al."); |
1782 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); | 1764 | MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips"); |
diff --git a/drivers/mtd/chips/cfi_cmdset_0020.c b/drivers/mtd/chips/cfi_cmdset_0020.c index 0807c1c91e55..fae70a5db540 100644 --- a/drivers/mtd/chips/cfi_cmdset_0020.c +++ b/drivers/mtd/chips/cfi_cmdset_0020.c | |||
@@ -162,6 +162,7 @@ struct mtd_info *cfi_cmdset_0020(struct map_info *map, int primary) | |||
162 | 162 | ||
163 | return cfi_staa_setup(map); | 163 | return cfi_staa_setup(map); |
164 | } | 164 | } |
165 | EXPORT_SYMBOL_GPL(cfi_cmdset_0020); | ||
165 | 166 | ||
166 | static struct mtd_info *cfi_staa_setup(struct map_info *map) | 167 | static struct mtd_info *cfi_staa_setup(struct map_info *map) |
167 | { | 168 | { |
@@ -237,9 +238,8 @@ static struct mtd_info *cfi_staa_setup(struct map_info *map) | |||
237 | mtd->unlock = cfi_staa_unlock; | 238 | mtd->unlock = cfi_staa_unlock; |
238 | mtd->suspend = cfi_staa_suspend; | 239 | mtd->suspend = cfi_staa_suspend; |
239 | mtd->resume = cfi_staa_resume; | 240 | mtd->resume = cfi_staa_resume; |
240 | mtd->flags = MTD_CAP_NORFLASH; | 241 | mtd->flags = MTD_CAP_NORFLASH & ~MTD_BIT_WRITEABLE; |
241 | mtd->flags |= MTD_ECC; /* FIXME: Not all STMicro flashes have this */ | 242 | mtd->writesize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ |
242 | mtd->eccsize = 8; /* FIXME: Should be 0 for STMicro flashes w/out ECC */ | ||
243 | map->fldrv = &cfi_staa_chipdrv; | 243 | map->fldrv = &cfi_staa_chipdrv; |
244 | __module_get(THIS_MODULE); | 244 | __module_get(THIS_MODULE); |
245 | mtd->name = map->name; | 245 | mtd->name = map->name; |
@@ -1410,20 +1410,4 @@ static void cfi_staa_destroy(struct mtd_info *mtd) | |||
1410 | kfree(cfi); | 1410 | kfree(cfi); |
1411 | } | 1411 | } |
1412 | 1412 | ||
1413 | static char im_name[]="cfi_cmdset_0020"; | ||
1414 | |||
1415 | static int __init cfi_staa_init(void) | ||
1416 | { | ||
1417 | inter_module_register(im_name, THIS_MODULE, &cfi_cmdset_0020); | ||
1418 | return 0; | ||
1419 | } | ||
1420 | |||
1421 | static void __exit cfi_staa_exit(void) | ||
1422 | { | ||
1423 | inter_module_unregister(im_name); | ||
1424 | } | ||
1425 | |||
1426 | module_init(cfi_staa_init); | ||
1427 | module_exit(cfi_staa_exit); | ||
1428 | |||
1429 | MODULE_LICENSE("GPL"); | 1413 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mtd/chips/cfi_probe.c b/drivers/mtd/chips/cfi_probe.c index e636aa86bc24..4bf9f8cac0dd 100644 --- a/drivers/mtd/chips/cfi_probe.c +++ b/drivers/mtd/chips/cfi_probe.c | |||
@@ -349,12 +349,12 @@ static void print_cfi_ident(struct cfi_ident *cfip) | |||
349 | else | 349 | else |
350 | printk("No Vpp line\n"); | 350 | printk("No Vpp line\n"); |
351 | 351 | ||
352 | printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); | 352 | printk("Typical byte/word write timeout: %d µs\n", 1<<cfip->WordWriteTimeoutTyp); |
353 | printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); | 353 | printk("Maximum byte/word write timeout: %d µs\n", (1<<cfip->WordWriteTimeoutMax) * (1<<cfip->WordWriteTimeoutTyp)); |
354 | 354 | ||
355 | if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { | 355 | if (cfip->BufWriteTimeoutTyp || cfip->BufWriteTimeoutMax) { |
356 | printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); | 356 | printk("Typical full buffer write timeout: %d µs\n", 1<<cfip->BufWriteTimeoutTyp); |
357 | printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); | 357 | printk("Maximum full buffer write timeout: %d µs\n", (1<<cfip->BufWriteTimeoutMax) * (1<<cfip->BufWriteTimeoutTyp)); |
358 | } | 358 | } |
359 | else | 359 | else |
360 | printk("Full buffer write not supported\n"); | 360 | printk("Full buffer write not supported\n"); |
diff --git a/drivers/mtd/chips/gen_probe.c b/drivers/mtd/chips/gen_probe.c index 41bd59d20d85..cdb0f590b40c 100644 --- a/drivers/mtd/chips/gen_probe.c +++ b/drivers/mtd/chips/gen_probe.c | |||
@@ -37,8 +37,15 @@ struct mtd_info *mtd_do_chip_probe(struct map_info *map, struct chip_probe *cp) | |||
37 | if (!mtd) | 37 | if (!mtd) |
38 | mtd = check_cmd_set(map, 0); /* Then the secondary */ | 38 | mtd = check_cmd_set(map, 0); /* Then the secondary */ |
39 | 39 | ||
40 | if (mtd) | 40 | if (mtd) { |
41 | if (mtd->size > map->size) { | ||
42 | printk(KERN_WARNING "Reducing visibility of %ldKiB chip to %ldKiB\n", | ||
43 | (unsigned long)mtd->size >> 10, | ||
44 | (unsigned long)map->size >> 10); | ||
45 | mtd->size = map->size; | ||
46 | } | ||
41 | return mtd; | 47 | return mtd; |
48 | } | ||
42 | 49 | ||
43 | printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); | 50 | printk(KERN_WARNING"gen_probe: No supported Vendor Command Set found\n"); |
44 | 51 | ||
@@ -100,7 +107,12 @@ static struct cfi_private *genprobe_ident_chips(struct map_info *map, struct chi | |||
100 | * Align bitmap storage size to full byte. | 107 | * Align bitmap storage size to full byte. |
101 | */ | 108 | */ |
102 | max_chips = map->size >> cfi.chipshift; | 109 | max_chips = map->size >> cfi.chipshift; |
103 | mapsize = (max_chips / 8) + ((max_chips % 8) ? 1 : 0); | 110 | if (!max_chips) { |
111 | printk(KERN_WARNING "NOR chip too large to fit in mapping. Attempting to cope...\n"); | ||
112 | max_chips = 1; | ||
113 | } | ||
114 | |||
115 | mapsize = (max_chips + BITS_PER_LONG-1) / BITS_PER_LONG; | ||
104 | chip_map = kmalloc(mapsize, GFP_KERNEL); | 116 | chip_map = kmalloc(mapsize, GFP_KERNEL); |
105 | if (!chip_map) { | 117 | if (!chip_map) { |
106 | printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); | 118 | printk(KERN_WARNING "%s: kmalloc failed for CFI chip map\n", map->name); |
@@ -194,25 +206,28 @@ static inline struct mtd_info *cfi_cmdset_unknown(struct map_info *map, | |||
194 | { | 206 | { |
195 | struct cfi_private *cfi = map->fldrv_priv; | 207 | struct cfi_private *cfi = map->fldrv_priv; |
196 | __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; | 208 | __u16 type = primary?cfi->cfiq->P_ID:cfi->cfiq->A_ID; |
197 | #if defined(CONFIG_MODULES) && defined(HAVE_INTER_MODULE) | 209 | #ifdef CONFIG_MODULES |
198 | char probename[32]; | 210 | char probename[16+sizeof(MODULE_SYMBOL_PREFIX)]; |
199 | cfi_cmdset_fn_t *probe_function; | 211 | cfi_cmdset_fn_t *probe_function; |
200 | 212 | ||
201 | sprintf(probename, "cfi_cmdset_%4.4X", type); | 213 | sprintf(probename, MODULE_SYMBOL_PREFIX "cfi_cmdset_%4.4X", type); |
202 | 214 | ||
203 | probe_function = inter_module_get_request(probename, probename); | 215 | probe_function = __symbol_get(probename); |
216 | if (!probe_function) { | ||
217 | request_module(probename + sizeof(MODULE_SYMBOL_PREFIX) - 1); | ||
218 | probe_function = __symbol_get(probename); | ||
219 | } | ||
204 | 220 | ||
205 | if (probe_function) { | 221 | if (probe_function) { |
206 | struct mtd_info *mtd; | 222 | struct mtd_info *mtd; |
207 | 223 | ||
208 | mtd = (*probe_function)(map, primary); | 224 | mtd = (*probe_function)(map, primary); |
209 | /* If it was happy, it'll have increased its own use count */ | 225 | /* If it was happy, it'll have increased its own use count */ |
210 | inter_module_put(probename); | 226 | symbol_put_addr(probe_function); |
211 | return mtd; | 227 | return mtd; |
212 | } | 228 | } |
213 | #endif | 229 | #endif |
214 | printk(KERN_NOTICE "Support for command set %04X not present\n", | 230 | printk(KERN_NOTICE "Support for command set %04X not present\n", type); |
215 | type); | ||
216 | 231 | ||
217 | return NULL; | 232 | return NULL; |
218 | } | 233 | } |
@@ -226,12 +241,8 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary) | |||
226 | return NULL; | 241 | return NULL; |
227 | 242 | ||
228 | switch(type){ | 243 | switch(type){ |
229 | /* Urgh. Ifdefs. The version with weak symbols was | 244 | /* We need these for the !CONFIG_MODULES case, |
230 | * _much_ nicer. Shame it didn't seem to work on | 245 | because symbol_get() doesn't work there */ |
231 | * anything but x86, really. | ||
232 | * But we can't rely in inter_module_get() because | ||
233 | * that'd mean we depend on link order. | ||
234 | */ | ||
235 | #ifdef CONFIG_MTD_CFI_INTELEXT | 246 | #ifdef CONFIG_MTD_CFI_INTELEXT |
236 | case 0x0001: | 247 | case 0x0001: |
237 | case 0x0003: | 248 | case 0x0003: |
@@ -246,9 +257,9 @@ static struct mtd_info *check_cmd_set(struct map_info *map, int primary) | |||
246 | case 0x0020: | 257 | case 0x0020: |
247 | return cfi_cmdset_0020(map, primary); | 258 | return cfi_cmdset_0020(map, primary); |
248 | #endif | 259 | #endif |
260 | default: | ||
261 | return cfi_cmdset_unknown(map, primary); | ||
249 | } | 262 | } |
250 | |||
251 | return cfi_cmdset_unknown(map, primary); | ||
252 | } | 263 | } |
253 | 264 | ||
254 | MODULE_LICENSE("GPL"); | 265 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/mtd/chips/map_ram.c b/drivers/mtd/chips/map_ram.c index bd2e876a814b..763925747db6 100644 --- a/drivers/mtd/chips/map_ram.c +++ b/drivers/mtd/chips/map_ram.c | |||
@@ -70,7 +70,7 @@ static struct mtd_info *map_ram_probe(struct map_info *map) | |||
70 | mtd->read = mapram_read; | 70 | mtd->read = mapram_read; |
71 | mtd->write = mapram_write; | 71 | mtd->write = mapram_write; |
72 | mtd->sync = mapram_nop; | 72 | mtd->sync = mapram_nop; |
73 | mtd->flags = MTD_CAP_RAM | MTD_VOLATILE; | 73 | mtd->flags = MTD_CAP_RAM; |
74 | 74 | ||
75 | mtd->erasesize = PAGE_SIZE; | 75 | mtd->erasesize = PAGE_SIZE; |
76 | while(mtd->size & (mtd->erasesize - 1)) | 76 | while(mtd->size & (mtd->erasesize - 1)) |
diff --git a/drivers/mtd/chips/map_rom.c b/drivers/mtd/chips/map_rom.c index 624c12c232c8..bc6ee9ef8a31 100644 --- a/drivers/mtd/chips/map_rom.c +++ b/drivers/mtd/chips/map_rom.c | |||
@@ -46,9 +46,7 @@ static struct mtd_info *map_rom_probe(struct map_info *map) | |||
46 | mtd->write = maprom_write; | 46 | mtd->write = maprom_write; |
47 | mtd->sync = maprom_nop; | 47 | mtd->sync = maprom_nop; |
48 | mtd->flags = MTD_CAP_ROM; | 48 | mtd->flags = MTD_CAP_ROM; |
49 | mtd->erasesize = 131072; | 49 | mtd->erasesize = map->size; |
50 | while(mtd->size & (mtd->erasesize - 1)) | ||
51 | mtd->erasesize >>= 1; | ||
52 | 50 | ||
53 | __module_get(THIS_MODULE); | 51 | __module_get(THIS_MODULE); |
54 | return mtd; | 52 | return mtd; |
diff --git a/drivers/mtd/chips/sharp.c b/drivers/mtd/chips/sharp.c index 3cc0b23c5865..967abbecdff9 100644 --- a/drivers/mtd/chips/sharp.c +++ b/drivers/mtd/chips/sharp.c | |||
@@ -140,6 +140,7 @@ static struct mtd_info *sharp_probe(struct map_info *map) | |||
140 | mtd->suspend = sharp_suspend; | 140 | mtd->suspend = sharp_suspend; |
141 | mtd->resume = sharp_resume; | 141 | mtd->resume = sharp_resume; |
142 | mtd->flags = MTD_CAP_NORFLASH; | 142 | mtd->flags = MTD_CAP_NORFLASH; |
143 | mtd->writesize = 1; | ||
143 | mtd->name = map->name; | 144 | mtd->name = map->name; |
144 | 145 | ||
145 | memset(sharp, 0, sizeof(*sharp)); | 146 | memset(sharp, 0, sizeof(*sharp)); |