aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/mtdblock.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mtd/mtdblock.c')
-rw-r--r--drivers/mtd/mtdblock.c44
1 files changed, 22 insertions, 22 deletions
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c
index bee8aba9e5bb..e84756644fd1 100644
--- a/drivers/mtd/mtdblock.c
+++ b/drivers/mtd/mtdblock.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Direct MTD block device access 2 * Direct MTD block device access
3 * 3 *
4 * $Id: mtdblock.c,v 1.67 2005/11/06 10:04:37 gleixner Exp $ 4 * $Id: mtdblock.c,v 1.68 2005/11/07 11:14:20 gleixner Exp $
5 * 5 *
6 * (C) 2000-2003 Nicolas Pitre <nico@cam.org> 6 * (C) 2000-2003 Nicolas Pitre <nico@cam.org>
7 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org> 7 * (C) 1999-2003 David Woodhouse <dwmw2@infradead.org>
@@ -32,7 +32,7 @@ static struct mtdblk_dev {
32 32
33/* 33/*
34 * Cache stuff... 34 * Cache stuff...
35 * 35 *
36 * Since typical flash erasable sectors are much larger than what Linux's 36 * Since typical flash erasable sectors are much larger than what Linux's
37 * buffer cache can handle, we must implement read-modify-write on flash 37 * buffer cache can handle, we must implement read-modify-write on flash
38 * sectors for each block write requests. To avoid over-erasing flash sectors 38 * sectors for each block write requests. To avoid over-erasing flash sectors
@@ -46,7 +46,7 @@ static void erase_callback(struct erase_info *done)
46 wake_up(wait_q); 46 wake_up(wait_q);
47} 47}
48 48
49static int erase_write (struct mtd_info *mtd, unsigned long pos, 49static int erase_write (struct mtd_info *mtd, unsigned long pos,
50 int len, const char *buf) 50 int len, const char *buf)
51{ 51{
52 struct erase_info erase; 52 struct erase_info erase;
@@ -104,18 +104,18 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
104 return 0; 104 return 0;
105 105
106 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" " 106 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: writing cached data for \"%s\" "
107 "at 0x%lx, size 0x%x\n", mtd->name, 107 "at 0x%lx, size 0x%x\n", mtd->name,
108 mtdblk->cache_offset, mtdblk->cache_size); 108 mtdblk->cache_offset, mtdblk->cache_size);
109 109
110 ret = erase_write (mtd, mtdblk->cache_offset, 110 ret = erase_write (mtd, mtdblk->cache_offset,
111 mtdblk->cache_size, mtdblk->cache_data); 111 mtdblk->cache_size, mtdblk->cache_data);
112 if (ret) 112 if (ret)
113 return ret; 113 return ret;
114 114
115 /* 115 /*
116 * Here we could argubly set the cache state to STATE_CLEAN. 116 * Here we could argubly set the cache state to STATE_CLEAN.
117 * However this could lead to inconsistency since we will not 117 * However this could lead to inconsistency since we will not
118 * be notified if this content is altered on the flash by other 118 * be notified if this content is altered on the flash by other
119 * means. Let's declare it empty and leave buffering tasks to 119 * means. Let's declare it empty and leave buffering tasks to
120 * the buffer cache instead. 120 * the buffer cache instead.
121 */ 121 */
@@ -124,7 +124,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk)
124} 124}
125 125
126 126
127static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, 127static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
128 int len, const char *buf) 128 int len, const char *buf)
129{ 129{
130 struct mtd_info *mtd = mtdblk->mtd; 130 struct mtd_info *mtd = mtdblk->mtd;
@@ -134,7 +134,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
134 134
135 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n", 135 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: write on \"%s\" at 0x%lx, size 0x%x\n",
136 mtd->name, pos, len); 136 mtd->name, pos, len);
137 137
138 if (!sect_size) 138 if (!sect_size)
139 return MTD_WRITE (mtd, pos, len, &retlen, buf); 139 return MTD_WRITE (mtd, pos, len, &retlen, buf);
140 140
@@ -142,11 +142,11 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
142 unsigned long sect_start = (pos/sect_size)*sect_size; 142 unsigned long sect_start = (pos/sect_size)*sect_size;
143 unsigned int offset = pos - sect_start; 143 unsigned int offset = pos - sect_start;
144 unsigned int size = sect_size - offset; 144 unsigned int size = sect_size - offset;
145 if( size > len ) 145 if( size > len )
146 size = len; 146 size = len;
147 147
148 if (size == sect_size) { 148 if (size == sect_size) {
149 /* 149 /*
150 * We are covering a whole sector. Thus there is no 150 * We are covering a whole sector. Thus there is no
151 * need to bother with the cache while it may still be 151 * need to bother with the cache while it may still be
152 * useful for other partial writes. 152 * useful for other partial writes.
@@ -160,7 +160,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
160 if (mtdblk->cache_state == STATE_DIRTY && 160 if (mtdblk->cache_state == STATE_DIRTY &&
161 mtdblk->cache_offset != sect_start) { 161 mtdblk->cache_offset != sect_start) {
162 ret = write_cached_data(mtdblk); 162 ret = write_cached_data(mtdblk);
163 if (ret) 163 if (ret)
164 return ret; 164 return ret;
165 } 165 }
166 166
@@ -193,7 +193,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos,
193} 193}
194 194
195 195
196static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, 196static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
197 int len, char *buf) 197 int len, char *buf)
198{ 198{
199 struct mtd_info *mtd = mtdblk->mtd; 199 struct mtd_info *mtd = mtdblk->mtd;
@@ -201,9 +201,9 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
201 size_t retlen; 201 size_t retlen;
202 int ret; 202 int ret;
203 203
204 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n", 204 DEBUG(MTD_DEBUG_LEVEL2, "mtdblock: read on \"%s\" at 0x%lx, size 0x%x\n",
205 mtd->name, pos, len); 205 mtd->name, pos, len);
206 206
207 if (!sect_size) 207 if (!sect_size)
208 return MTD_READ (mtd, pos, len, &retlen, buf); 208 return MTD_READ (mtd, pos, len, &retlen, buf);
209 209
@@ -211,7 +211,7 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos,
211 unsigned long sect_start = (pos/sect_size)*sect_size; 211 unsigned long sect_start = (pos/sect_size)*sect_size;
212 unsigned int offset = pos - sect_start; 212 unsigned int offset = pos - sect_start;
213 unsigned int size = sect_size - offset; 213 unsigned int size = sect_size - offset;
214 if (size > len) 214 if (size > len)
215 size = len; 215 size = len;
216 216
217 /* 217 /*
@@ -269,12 +269,12 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
269 int dev = mbd->devnum; 269 int dev = mbd->devnum;
270 270
271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); 271 DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n");
272 272
273 if (mtdblks[dev]) { 273 if (mtdblks[dev]) {
274 mtdblks[dev]->count++; 274 mtdblks[dev]->count++;
275 return 0; 275 return 0;
276 } 276 }
277 277
278 /* OK, it's not open. Create cache info for it */ 278 /* OK, it's not open. Create cache info for it */
279 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); 279 mtdblk = kmalloc(sizeof(struct mtdblk_dev), GFP_KERNEL);
280 if (!mtdblk) 280 if (!mtdblk)
@@ -293,7 +293,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd)
293 } 293 }
294 294
295 mtdblks[dev] = mtdblk; 295 mtdblks[dev] = mtdblk;
296 296
297 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 297 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
298 298
299 return 0; 299 return 0;
@@ -321,7 +321,7 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd)
321 DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); 321 DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
322 322
323 return 0; 323 return 0;
324} 324}
325 325
326static int mtdblock_flush(struct mtd_blktrans_dev *dev) 326static int mtdblock_flush(struct mtd_blktrans_dev *dev)
327{ 327{