diff options
author | Ben Hutchings <bhutchings@solarflare.com> | 2010-01-29 15:58:37 -0500 |
---|---|---|
committer | David Woodhouse <David.Woodhouse@intel.com> | 2010-02-25 06:53:00 -0500 |
commit | cbfe93e9cedfcd59689bad9e67f57ef67545e5a0 (patch) | |
tree | 32c6c213dc377f10198988c1e226a59ae22d974d /drivers/mtd/mtdblock.c | |
parent | 24c15496771ea1f3902dee23f746042ba34dc2b8 (diff) |
mtd: mtdblock: Dynamically allocate cache info structures
Since we allocate struct mtd_blktrans_dev for each block device, we
can add our own structure members to the end. Therefore embed
struct mtd_blktrans_dev in struct mtdblk_dev and remove the static
array of struct mtdblk_dev. Also remove the redundant pointer to
struct mtd_info.
This is preparation for removing the static limit on the number of MTD
devices.
Signed-off-by: Ben Hutchings <bhutchings@solarflare.com>
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Diffstat (limited to 'drivers/mtd/mtdblock.c')
-rw-r--r-- | drivers/mtd/mtdblock.c | 74 |
1 files changed, 31 insertions, 43 deletions
diff --git a/drivers/mtd/mtdblock.c b/drivers/mtd/mtdblock.c index 9f41b1a853c1..69f6bf2e0a8c 100644 --- a/drivers/mtd/mtdblock.c +++ b/drivers/mtd/mtdblock.c | |||
@@ -19,15 +19,15 @@ | |||
19 | #include <linux/mutex.h> | 19 | #include <linux/mutex.h> |
20 | 20 | ||
21 | 21 | ||
22 | static struct mtdblk_dev { | 22 | struct mtdblk_dev { |
23 | struct mtd_info *mtd; | 23 | struct mtd_blktrans_dev mbd; |
24 | int count; | 24 | int count; |
25 | struct mutex cache_mutex; | 25 | struct mutex cache_mutex; |
26 | unsigned char *cache_data; | 26 | unsigned char *cache_data; |
27 | unsigned long cache_offset; | 27 | unsigned long cache_offset; |
28 | unsigned int cache_size; | 28 | unsigned int cache_size; |
29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; | 29 | enum { STATE_EMPTY, STATE_CLEAN, STATE_DIRTY } cache_state; |
30 | } *mtdblks[MAX_MTD_DEVICES]; | 30 | }; |
31 | 31 | ||
32 | static struct mutex mtdblks_lock; | 32 | static struct mutex mtdblks_lock; |
33 | 33 | ||
@@ -98,7 +98,7 @@ static int erase_write (struct mtd_info *mtd, unsigned long pos, | |||
98 | 98 | ||
99 | static int write_cached_data (struct mtdblk_dev *mtdblk) | 99 | static int write_cached_data (struct mtdblk_dev *mtdblk) |
100 | { | 100 | { |
101 | struct mtd_info *mtd = mtdblk->mtd; | 101 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
102 | int ret; | 102 | int ret; |
103 | 103 | ||
104 | if (mtdblk->cache_state != STATE_DIRTY) | 104 | if (mtdblk->cache_state != STATE_DIRTY) |
@@ -128,7 +128,7 @@ static int write_cached_data (struct mtdblk_dev *mtdblk) | |||
128 | static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, | 128 | static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, |
129 | int len, const char *buf) | 129 | int len, const char *buf) |
130 | { | 130 | { |
131 | struct mtd_info *mtd = mtdblk->mtd; | 131 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
132 | unsigned int sect_size = mtdblk->cache_size; | 132 | unsigned int sect_size = mtdblk->cache_size; |
133 | size_t retlen; | 133 | size_t retlen; |
134 | int ret; | 134 | int ret; |
@@ -198,7 +198,7 @@ static int do_cached_write (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
198 | static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, | 198 | static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, |
199 | int len, char *buf) | 199 | int len, char *buf) |
200 | { | 200 | { |
201 | struct mtd_info *mtd = mtdblk->mtd; | 201 | struct mtd_info *mtd = mtdblk->mbd.mtd; |
202 | unsigned int sect_size = mtdblk->cache_size; | 202 | unsigned int sect_size = mtdblk->cache_size; |
203 | size_t retlen; | 203 | size_t retlen; |
204 | int ret; | 204 | int ret; |
@@ -244,16 +244,16 @@ static int do_cached_read (struct mtdblk_dev *mtdblk, unsigned long pos, | |||
244 | static int mtdblock_readsect(struct mtd_blktrans_dev *dev, | 244 | static int mtdblock_readsect(struct mtd_blktrans_dev *dev, |
245 | unsigned long block, char *buf) | 245 | unsigned long block, char *buf) |
246 | { | 246 | { |
247 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 247 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
248 | return do_cached_read(mtdblk, block<<9, 512, buf); | 248 | return do_cached_read(mtdblk, block<<9, 512, buf); |
249 | } | 249 | } |
250 | 250 | ||
251 | static int mtdblock_writesect(struct mtd_blktrans_dev *dev, | 251 | static int mtdblock_writesect(struct mtd_blktrans_dev *dev, |
252 | unsigned long block, char *buf) | 252 | unsigned long block, char *buf) |
253 | { | 253 | { |
254 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 254 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
255 | if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { | 255 | if (unlikely(!mtdblk->cache_data && mtdblk->cache_size)) { |
256 | mtdblk->cache_data = vmalloc(mtdblk->mtd->erasesize); | 256 | mtdblk->cache_data = vmalloc(mtdblk->mbd.mtd->erasesize); |
257 | if (!mtdblk->cache_data) | 257 | if (!mtdblk->cache_data) |
258 | return -EINTR; | 258 | return -EINTR; |
259 | /* -EINTR is not really correct, but it is the best match | 259 | /* -EINTR is not really correct, but it is the best match |
@@ -266,37 +266,26 @@ static int mtdblock_writesect(struct mtd_blktrans_dev *dev, | |||
266 | 266 | ||
267 | static int mtdblock_open(struct mtd_blktrans_dev *mbd) | 267 | static int mtdblock_open(struct mtd_blktrans_dev *mbd) |
268 | { | 268 | { |
269 | struct mtdblk_dev *mtdblk; | 269 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
270 | struct mtd_info *mtd = mbd->mtd; | ||
271 | int dev = mbd->devnum; | ||
272 | 270 | ||
273 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); | 271 | DEBUG(MTD_DEBUG_LEVEL1,"mtdblock_open\n"); |
274 | 272 | ||
275 | mutex_lock(&mtdblks_lock); | 273 | mutex_lock(&mtdblks_lock); |
276 | if (mtdblks[dev]) { | 274 | if (mtdblk->count) { |
277 | mtdblks[dev]->count++; | 275 | mtdblk->count++; |
278 | mutex_unlock(&mtdblks_lock); | 276 | mutex_unlock(&mtdblks_lock); |
279 | return 0; | 277 | return 0; |
280 | } | 278 | } |
281 | 279 | ||
282 | /* OK, it's not open. Create cache info for it */ | 280 | /* OK, it's not open. Create cache info for it */ |
283 | mtdblk = kzalloc(sizeof(struct mtdblk_dev), GFP_KERNEL); | ||
284 | if (!mtdblk) { | ||
285 | mutex_unlock(&mtdblks_lock); | ||
286 | return -ENOMEM; | ||
287 | } | ||
288 | |||
289 | mtdblk->count = 1; | 281 | mtdblk->count = 1; |
290 | mtdblk->mtd = mtd; | ||
291 | |||
292 | mutex_init(&mtdblk->cache_mutex); | 282 | mutex_init(&mtdblk->cache_mutex); |
293 | mtdblk->cache_state = STATE_EMPTY; | 283 | mtdblk->cache_state = STATE_EMPTY; |
294 | if ( !(mtdblk->mtd->flags & MTD_NO_ERASE) && mtdblk->mtd->erasesize) { | 284 | if (!(mbd->mtd->flags & MTD_NO_ERASE) && mbd->mtd->erasesize) { |
295 | mtdblk->cache_size = mtdblk->mtd->erasesize; | 285 | mtdblk->cache_size = mbd->mtd->erasesize; |
296 | mtdblk->cache_data = NULL; | 286 | mtdblk->cache_data = NULL; |
297 | } | 287 | } |
298 | 288 | ||
299 | mtdblks[dev] = mtdblk; | ||
300 | mutex_unlock(&mtdblks_lock); | 289 | mutex_unlock(&mtdblks_lock); |
301 | 290 | ||
302 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); | 291 | DEBUG(MTD_DEBUG_LEVEL1, "ok\n"); |
@@ -306,8 +295,7 @@ static int mtdblock_open(struct mtd_blktrans_dev *mbd) | |||
306 | 295 | ||
307 | static int mtdblock_release(struct mtd_blktrans_dev *mbd) | 296 | static int mtdblock_release(struct mtd_blktrans_dev *mbd) |
308 | { | 297 | { |
309 | int dev = mbd->devnum; | 298 | struct mtdblk_dev *mtdblk = container_of(mbd, struct mtdblk_dev, mbd); |
310 | struct mtdblk_dev *mtdblk = mtdblks[dev]; | ||
311 | 299 | ||
312 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); | 300 | DEBUG(MTD_DEBUG_LEVEL1, "mtdblock_release\n"); |
313 | 301 | ||
@@ -318,12 +306,10 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
318 | mutex_unlock(&mtdblk->cache_mutex); | 306 | mutex_unlock(&mtdblk->cache_mutex); |
319 | 307 | ||
320 | if (!--mtdblk->count) { | 308 | if (!--mtdblk->count) { |
321 | /* It was the last usage. Free the device */ | 309 | /* It was the last usage. Free the cache */ |
322 | mtdblks[dev] = NULL; | 310 | if (mbd->mtd->sync) |
323 | if (mtdblk->mtd->sync) | 311 | mbd->mtd->sync(mbd->mtd); |
324 | mtdblk->mtd->sync(mtdblk->mtd); | ||
325 | vfree(mtdblk->cache_data); | 312 | vfree(mtdblk->cache_data); |
326 | kfree(mtdblk); | ||
327 | } | 313 | } |
328 | 314 | ||
329 | mutex_unlock(&mtdblks_lock); | 315 | mutex_unlock(&mtdblks_lock); |
@@ -335,40 +321,42 @@ static int mtdblock_release(struct mtd_blktrans_dev *mbd) | |||
335 | 321 | ||
336 | static int mtdblock_flush(struct mtd_blktrans_dev *dev) | 322 | static int mtdblock_flush(struct mtd_blktrans_dev *dev) |
337 | { | 323 | { |
338 | struct mtdblk_dev *mtdblk = mtdblks[dev->devnum]; | 324 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); |
339 | 325 | ||
340 | mutex_lock(&mtdblk->cache_mutex); | 326 | mutex_lock(&mtdblk->cache_mutex); |
341 | write_cached_data(mtdblk); | 327 | write_cached_data(mtdblk); |
342 | mutex_unlock(&mtdblk->cache_mutex); | 328 | mutex_unlock(&mtdblk->cache_mutex); |
343 | 329 | ||
344 | if (mtdblk->mtd->sync) | 330 | if (dev->mtd->sync) |
345 | mtdblk->mtd->sync(mtdblk->mtd); | 331 | dev->mtd->sync(dev->mtd); |
346 | return 0; | 332 | return 0; |
347 | } | 333 | } |
348 | 334 | ||
349 | static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) | 335 | static void mtdblock_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) |
350 | { | 336 | { |
351 | struct mtd_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); | 337 | struct mtdblk_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL); |
352 | 338 | ||
353 | if (!dev) | 339 | if (!dev) |
354 | return; | 340 | return; |
355 | 341 | ||
356 | dev->mtd = mtd; | 342 | dev->mbd.mtd = mtd; |
357 | dev->devnum = mtd->index; | 343 | dev->mbd.devnum = mtd->index; |
358 | 344 | ||
359 | dev->size = mtd->size >> 9; | 345 | dev->mbd.size = mtd->size >> 9; |
360 | dev->tr = tr; | 346 | dev->mbd.tr = tr; |
361 | 347 | ||
362 | if (!(mtd->flags & MTD_WRITEABLE)) | 348 | if (!(mtd->flags & MTD_WRITEABLE)) |
363 | dev->readonly = 1; | 349 | dev->mbd.readonly = 1; |
364 | 350 | ||
365 | add_mtd_blktrans_dev(dev); | 351 | add_mtd_blktrans_dev(&dev->mbd); |
366 | } | 352 | } |
367 | 353 | ||
368 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) | 354 | static void mtdblock_remove_dev(struct mtd_blktrans_dev *dev) |
369 | { | 355 | { |
356 | struct mtdblk_dev *mtdblk = container_of(dev, struct mtdblk_dev, mbd); | ||
357 | |||
370 | del_mtd_blktrans_dev(dev); | 358 | del_mtd_blktrans_dev(dev); |
371 | kfree(dev); | 359 | kfree(mtdblk); |
372 | } | 360 | } |
373 | 361 | ||
374 | static struct mtd_blktrans_ops mtdblock_tr = { | 362 | static struct mtd_blktrans_ops mtdblock_tr = { |