aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/chips/amd_flash.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/chips/amd_flash.c')
-rw-r--r--drivers/mtd/chips/amd_flash.c80
1 files changed, 40 insertions, 40 deletions
diff --git a/drivers/mtd/chips/amd_flash.c b/drivers/mtd/chips/amd_flash.c
index 2dafeba3f3d5..fdb91b6f1d97 100644
--- a/drivers/mtd/chips/amd_flash.c
+++ b/drivers/mtd/chips/amd_flash.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Author: Jonas Holmberg <jonas.holmberg@axis.com> 4 * Author: Jonas Holmberg <jonas.holmberg@axis.com>
5 * 5 *
6 * $Id: amd_flash.c,v 1.27 2005/02/04 07:43:09 jonashg Exp $ 6 * $Id: amd_flash.c,v 1.28 2005/11/07 11:14:22 gleixner Exp $
7 * 7 *
8 * Copyright (c) 2001 Axis Communications AB 8 * Copyright (c) 2001 Axis Communications AB
9 * 9 *
@@ -93,9 +93,9 @@
93#define D6_MASK 0x40 93#define D6_MASK 0x40
94 94
95struct amd_flash_private { 95struct amd_flash_private {
96 int device_type; 96 int device_type;
97 int interleave; 97 int interleave;
98 int numchips; 98 int numchips;
99 unsigned long chipshift; 99 unsigned long chipshift;
100// const char *im_name; 100// const char *im_name;
101 struct flchip chips[0]; 101 struct flchip chips[0];
@@ -253,7 +253,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
253 int i; 253 int i;
254 int retval = 0; 254 int retval = 0;
255 int lock_status; 255 int lock_status;
256 256
257 map = mtd->priv; 257 map = mtd->priv;
258 258
259 /* Pass the whole chip through sector by sector and check for each 259 /* Pass the whole chip through sector by sector and check for each
@@ -273,7 +273,7 @@ static int amd_flash_do_unlock(struct mtd_info *mtd, loff_t ofs, size_t len,
273 unlock_sector(map, eraseoffset, is_unlock); 273 unlock_sector(map, eraseoffset, is_unlock);
274 274
275 lock_status = is_sector_locked(map, eraseoffset); 275 lock_status = is_sector_locked(map, eraseoffset);
276 276
277 if (is_unlock && lock_status) { 277 if (is_unlock && lock_status) {
278 printk("Cannot unlock sector at address %x length %xx\n", 278 printk("Cannot unlock sector at address %x length %xx\n",
279 eraseoffset, merip->erasesize); 279 eraseoffset, merip->erasesize);
@@ -305,7 +305,7 @@ static int amd_flash_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
305/* 305/*
306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first 306 * Reads JEDEC manufacturer ID and device ID and returns the index of the first
307 * matching table entry (-1 if not found or alias for already found chip). 307 * matching table entry (-1 if not found or alias for already found chip).
308 */ 308 */
309static int probe_new_chip(struct mtd_info *mtd, __u32 base, 309static int probe_new_chip(struct mtd_info *mtd, __u32 base,
310 struct flchip *chips, 310 struct flchip *chips,
311 struct amd_flash_private *private, 311 struct amd_flash_private *private,
@@ -636,7 +636,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 }, 636 { .offset = 0x000000, .erasesize = 0x10000, .numblocks = 31 },
637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 } 637 { .offset = 0x1F0000, .erasesize = 0x02000, .numblocks = 8 }
638 } 638 }
639 } 639 }
640 }; 640 };
641 641
642 struct mtd_info *mtd; 642 struct mtd_info *mtd;
@@ -701,7 +701,7 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
701 701
702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) * 702 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info) *
703 mtd->numeraseregions, GFP_KERNEL); 703 mtd->numeraseregions, GFP_KERNEL);
704 if (!mtd->eraseregions) { 704 if (!mtd->eraseregions) {
705 printk(KERN_WARNING "%s: Failed to allocate " 705 printk(KERN_WARNING "%s: Failed to allocate "
706 "memory for MTD erase region info\n", map->name); 706 "memory for MTD erase region info\n", map->name);
707 kfree(mtd); 707 kfree(mtd);
@@ -739,12 +739,12 @@ static struct mtd_info *amd_flash_probe(struct map_info *map)
739 mtd->type = MTD_NORFLASH; 739 mtd->type = MTD_NORFLASH;
740 mtd->flags = MTD_CAP_NORFLASH; 740 mtd->flags = MTD_CAP_NORFLASH;
741 mtd->name = map->name; 741 mtd->name = map->name;
742 mtd->erase = amd_flash_erase; 742 mtd->erase = amd_flash_erase;
743 mtd->read = amd_flash_read; 743 mtd->read = amd_flash_read;
744 mtd->write = amd_flash_write; 744 mtd->write = amd_flash_write;
745 mtd->sync = amd_flash_sync; 745 mtd->sync = amd_flash_sync;
746 mtd->suspend = amd_flash_suspend; 746 mtd->suspend = amd_flash_suspend;
747 mtd->resume = amd_flash_resume; 747 mtd->resume = amd_flash_resume;
748 mtd->lock = amd_flash_lock; 748 mtd->lock = amd_flash_lock;
749 mtd->unlock = amd_flash_unlock; 749 mtd->unlock = amd_flash_unlock;
750 750
@@ -789,7 +789,7 @@ retry:
789 map->name, chip->state); 789 map->name, chip->state);
790 set_current_state(TASK_UNINTERRUPTIBLE); 790 set_current_state(TASK_UNINTERRUPTIBLE);
791 add_wait_queue(&chip->wq, &wait); 791 add_wait_queue(&chip->wq, &wait);
792 792
793 spin_unlock_bh(chip->mutex); 793 spin_unlock_bh(chip->mutex);
794 794
795 schedule(); 795 schedule();
@@ -802,7 +802,7 @@ retry:
802 timeo = jiffies + HZ; 802 timeo = jiffies + HZ;
803 803
804 goto retry; 804 goto retry;
805 } 805 }
806 806
807 adr += chip->start; 807 adr += chip->start;
808 808
@@ -889,7 +889,7 @@ retry:
889 map->name, chip->state); 889 map->name, chip->state);
890 set_current_state(TASK_UNINTERRUPTIBLE); 890 set_current_state(TASK_UNINTERRUPTIBLE);
891 add_wait_queue(&chip->wq, &wait); 891 add_wait_queue(&chip->wq, &wait);
892 892
893 spin_unlock_bh(chip->mutex); 893 spin_unlock_bh(chip->mutex);
894 894
895 schedule(); 895 schedule();
@@ -901,7 +901,7 @@ retry:
901 timeo = jiffies + HZ; 901 timeo = jiffies + HZ;
902 902
903 goto retry; 903 goto retry;
904 } 904 }
905 905
906 chip->state = FL_WRITING; 906 chip->state = FL_WRITING;
907 907
@@ -911,7 +911,7 @@ retry:
911 wide_write(map, datum, adr); 911 wide_write(map, datum, adr);
912 912
913 times_left = 500000; 913 times_left = 500000;
914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) { 914 while (times_left-- && flash_is_busy(map, adr, private->interleave)) {
915 if (need_resched()) { 915 if (need_resched()) {
916 spin_unlock_bh(chip->mutex); 916 spin_unlock_bh(chip->mutex);
917 schedule(); 917 schedule();
@@ -989,7 +989,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
989 if (ret) { 989 if (ret) {
990 return ret; 990 return ret;
991 } 991 }
992 992
993 ofs += n; 993 ofs += n;
994 buf += n; 994 buf += n;
995 (*retlen) += n; 995 (*retlen) += n;
@@ -1002,7 +1002,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1002 } 1002 }
1003 } 1003 }
1004 } 1004 }
1005 1005
1006 /* We are now aligned, write as much as possible. */ 1006 /* We are now aligned, write as much as possible. */
1007 while(len >= map->buswidth) { 1007 while(len >= map->buswidth) {
1008 __u32 datum; 1008 __u32 datum;
@@ -1063,7 +1063,7 @@ static int amd_flash_write(struct mtd_info *mtd, loff_t to , size_t len,
1063 if (ret) { 1063 if (ret) {
1064 return ret; 1064 return ret;
1065 } 1065 }
1066 1066
1067 (*retlen) += n; 1067 (*retlen) += n;
1068 } 1068 }
1069 1069
@@ -1085,7 +1085,7 @@ retry:
1085 if (chip->state != FL_READY){ 1085 if (chip->state != FL_READY){
1086 set_current_state(TASK_UNINTERRUPTIBLE); 1086 set_current_state(TASK_UNINTERRUPTIBLE);
1087 add_wait_queue(&chip->wq, &wait); 1087 add_wait_queue(&chip->wq, &wait);
1088 1088
1089 spin_unlock_bh(chip->mutex); 1089 spin_unlock_bh(chip->mutex);
1090 1090
1091 schedule(); 1091 schedule();
@@ -1098,7 +1098,7 @@ retry:
1098 timeo = jiffies + HZ; 1098 timeo = jiffies + HZ;
1099 1099
1100 goto retry; 1100 goto retry;
1101 } 1101 }
1102 1102
1103 chip->state = FL_ERASING; 1103 chip->state = FL_ERASING;
1104 1104
@@ -1106,30 +1106,30 @@ retry:
1106 ENABLE_VPP(map); 1106 ENABLE_VPP(map);
1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA); 1107 send_cmd(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA);
1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr); 1108 send_cmd_to_addr(map, chip->start, CMD_SECTOR_ERASE_UNLOCK_DATA_2, adr);
1109 1109
1110 timeo = jiffies + (HZ * 20); 1110 timeo = jiffies + (HZ * 20);
1111 1111
1112 spin_unlock_bh(chip->mutex); 1112 spin_unlock_bh(chip->mutex);
1113 msleep(1000); 1113 msleep(1000);
1114 spin_lock_bh(chip->mutex); 1114 spin_lock_bh(chip->mutex);
1115 1115
1116 while (flash_is_busy(map, adr, private->interleave)) { 1116 while (flash_is_busy(map, adr, private->interleave)) {
1117 1117
1118 if (chip->state != FL_ERASING) { 1118 if (chip->state != FL_ERASING) {
1119 /* Someone's suspended the erase. Sleep */ 1119 /* Someone's suspended the erase. Sleep */
1120 set_current_state(TASK_UNINTERRUPTIBLE); 1120 set_current_state(TASK_UNINTERRUPTIBLE);
1121 add_wait_queue(&chip->wq, &wait); 1121 add_wait_queue(&chip->wq, &wait);
1122 1122
1123 spin_unlock_bh(chip->mutex); 1123 spin_unlock_bh(chip->mutex);
1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n", 1124 printk(KERN_INFO "%s: erase suspended. Sleeping\n",
1125 map->name); 1125 map->name);
1126 schedule(); 1126 schedule();
1127 remove_wait_queue(&chip->wq, &wait); 1127 remove_wait_queue(&chip->wq, &wait);
1128 1128
1129 if (signal_pending(current)) { 1129 if (signal_pending(current)) {
1130 return -EINTR; 1130 return -EINTR;
1131 } 1131 }
1132 1132
1133 timeo = jiffies + (HZ*2); /* FIXME */ 1133 timeo = jiffies + (HZ*2); /* FIXME */
1134 spin_lock_bh(chip->mutex); 1134 spin_lock_bh(chip->mutex);
1135 continue; 1135 continue;
@@ -1145,7 +1145,7 @@ retry:
1145 1145
1146 return -EIO; 1146 return -EIO;
1147 } 1147 }
1148 1148
1149 /* Latency issues. Drop the lock, wait a while and retry */ 1149 /* Latency issues. Drop the lock, wait a while and retry */
1150 spin_unlock_bh(chip->mutex); 1150 spin_unlock_bh(chip->mutex);
1151 1151
@@ -1153,7 +1153,7 @@ retry:
1153 schedule(); 1153 schedule();
1154 else 1154 else
1155 udelay(1); 1155 udelay(1);
1156 1156
1157 spin_lock_bh(chip->mutex); 1157 spin_lock_bh(chip->mutex);
1158 } 1158 }
1159 1159
@@ -1180,7 +1180,7 @@ retry:
1180 return -EIO; 1180 return -EIO;
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 DISABLE_VPP(map); 1184 DISABLE_VPP(map);
1185 chip->state = FL_READY; 1185 chip->state = FL_READY;
1186 wake_up(&chip->wq); 1186 wake_up(&chip->wq);
@@ -1246,7 +1246,7 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1246 * with the erase region at that address. 1246 * with the erase region at that address.
1247 */ 1247 */
1248 1248
1249 while ((i < mtd->numeraseregions) && 1249 while ((i < mtd->numeraseregions) &&
1250 ((instr->addr + instr->len) >= regions[i].offset)) { 1250 ((instr->addr + instr->len) >= regions[i].offset)) {
1251 i++; 1251 i++;
1252 } 1252 }
@@ -1293,10 +1293,10 @@ static int amd_flash_erase(struct mtd_info *mtd, struct erase_info *instr)
1293 } 1293 }
1294 } 1294 }
1295 } 1295 }
1296 1296
1297 instr->state = MTD_ERASE_DONE; 1297 instr->state = MTD_ERASE_DONE;
1298 mtd_erase_callback(instr); 1298 mtd_erase_callback(instr);
1299 1299
1300 return 0; 1300 return 0;
1301} 1301}
1302 1302
@@ -1324,7 +1324,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1324 case FL_JEDEC_QUERY: 1324 case FL_JEDEC_QUERY:
1325 chip->oldstate = chip->state; 1325 chip->oldstate = chip->state;
1326 chip->state = FL_SYNCING; 1326 chip->state = FL_SYNCING;
1327 /* No need to wake_up() on this state change - 1327 /* No need to wake_up() on this state change -
1328 * as the whole point is that nobody can do anything 1328 * as the whole point is that nobody can do anything
1329 * with the chip now anyway. 1329 * with the chip now anyway.
1330 */ 1330 */
@@ -1335,13 +1335,13 @@ static void amd_flash_sync(struct mtd_info *mtd)
1335 default: 1335 default:
1336 /* Not an idle state */ 1336 /* Not an idle state */
1337 add_wait_queue(&chip->wq, &wait); 1337 add_wait_queue(&chip->wq, &wait);
1338 1338
1339 spin_unlock_bh(chip->mutex); 1339 spin_unlock_bh(chip->mutex);
1340 1340
1341 schedule(); 1341 schedule();
1342 1342
1343 remove_wait_queue(&chip->wq, &wait); 1343 remove_wait_queue(&chip->wq, &wait);
1344 1344
1345 goto retry; 1345 goto retry;
1346 } 1346 }
1347 } 1347 }
@@ -1351,7 +1351,7 @@ static void amd_flash_sync(struct mtd_info *mtd)
1351 chip = &private->chips[i]; 1351 chip = &private->chips[i];
1352 1352
1353 spin_lock_bh(chip->mutex); 1353 spin_lock_bh(chip->mutex);
1354 1354
1355 if (chip->state == FL_SYNCING) { 1355 if (chip->state == FL_SYNCING) {
1356 chip->state = chip->oldstate; 1356 chip->state = chip->oldstate;
1357 wake_up(&chip->wq); 1357 wake_up(&chip->wq);