aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempool.c
blob: a46eb1b4bb661e12a6f752964ad2df190bc763aa (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
/*
 *  linux/mm/mempool.c
 *
 *  memory buffer pool support. Such pools are mostly used
 *  for guaranteed, deadlock-free memory allocations during
 *  extreme VM load.
 *
 *  started by Ingo Molnar, Copyright (C) 2001
 */

#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/mempool.h>
#include <linux/blkdev.h>
#include <linux/writeback.h>

static void add_element(mempool_t *pool, void *element)
{
	BUG_ON(pool->curr_nr >= pool->min_nr);
	pool->elements[pool->curr_nr++] = element;
}

static void *remove_element(mempool_t *pool)
{
	BUG_ON(pool->curr_nr <= 0);
	return pool->elements[--pool->curr_nr];
}

static void free_pool(mempool_t *pool)
{
	while (pool->curr_nr) {
		void *element = remove_element(pool);
		pool->free(element, pool->pool_data);
	}
	kfree(pool->elements);
	kfree(pool);
}

/**
 * mempool_create - create a memory pool
 * @min_nr:    the minimum number of elements guaranteed to be
 *             allocated for this pool.
 * @alloc_fn:  user-defined element-allocation function.
 * @free_fn:   user-defined element-freeing function.
 * @pool_data: optional private data available to the user-defined functions.
 *
 * this function creates and allocates a guaranteed size, preallocated
 * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
 * functions. This function might sleep. Both the alloc_fn() and the free_fn()
 * functions might sleep - as long as the mempool_alloc() function is not called
 * from IRQ contexts.
 */
mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
				mempool_free_t *free_fn, void *pool_data)
{
	return  mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
}
EXPORT_SYMBOL(mempool_create);

mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
			mempool_free_t *free_fn, void *pool_data, int node_id)
{
	mempool_t *pool;
	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
	if (!pool)
		return NULL;
	pool->elements = kmalloc_node(min_nr * sizeof(void *),
					GFP_KERNEL, node_id);
	if (!pool->elements) {
		kfree(pool);
		return NULL;
	}
	spin_lock_init(&pool->lock);
	pool->min_nr = min_nr;
	pool->pool_data = pool_data;
	init_waitqueue_head(&pool->wait);
	pool->alloc = alloc_fn;
	pool->free = free_fn;

	/*
	 * First pre-allocate the guaranteed number of buffers.
	 */
	while (pool->curr_nr < pool->min_nr) {
		void *element;

		element = pool->alloc(GFP_KERNEL, pool->pool_data);
		if (unlikely(!element)) {
			free_pool(pool);
			return NULL;
		}
		add_element(pool, element);
	}
	return pool;
}
EXPORT_SYMBOL(mempool_create_node);

/**
 * mempool_resize - resize an existing memory pool
 * @pool:       pointer to the memory pool which was allocated via
 *              mempool_create().
 * @new_min_nr: the new minimum number of elements guaranteed to be
 *              allocated for this pool.
 * @gfp_mask:   the usual allocation bitmask.
 *
 * This function shrinks/grows the pool. In the case of growing,
 * it cannot be guaranteed that the pool will be grown to the new
 * size immediately, but new mempool_free() calls will refill it.
 *
 * Note, the caller must guarantee that no mempool_destroy is called
 * while this function is running. mempool_alloc() & mempool_free()
 * might be called (eg. from IRQ contexts) while this function executes.
 */
int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
{
	void *element;
	void **new_elements;
	unsigned long flags;

	BUG_ON(new_min_nr <= 0);

	spin_lock_irqsave(&pool->lock, flags);
	if (new_min_nr <= pool->min_nr) {
		while (new_min_nr < pool->curr_nr) {
			element = remove_element(pool);
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);
			spin_lock_irqsave(&pool->lock, flags);
		}
		pool->min_nr = new_min_nr;
		goto out_unlock;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* Grow the pool */
	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
	if (!new_elements)
		return -ENOMEM;

	spin_lock_irqsave(&pool->lock, flags);
	if (unlikely(new_min_nr <= pool->min_nr)) {
		/* Raced, other resize will do our work */
		spin_unlock_irqrestore(&pool->lock, flags);
		kfree(new_elements);
		goto out;
	}
	memcpy(new_elements, pool->elements,
			pool->curr_nr * sizeof(*new_elements));
	kfree(pool->elements);
	pool->elements = new_elements;
	pool->min_nr = new_min_nr;

	while (pool->curr_nr < pool->min_nr) {
		spin_unlock_irqrestore(&pool->lock, flags);
		element = pool->alloc(gfp_mask, pool->pool_data);
		if (!element)
			goto out;
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
		} else {
			spin_unlock_irqrestore(&pool->lock, flags);
			pool->free(element, pool->pool_data);	/* Raced */
			goto out;
		}
	}
out_unlock:
	spin_unlock_irqrestore(&pool->lock, flags);
out:
	return 0;
}
EXPORT_SYMBOL(mempool_resize);

/**
 * mempool_destroy - deallocate a memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * this function only sleeps if the free_fn() function sleeps. The caller
 * has to guarantee that all elements have been returned to the pool (ie:
 * freed) prior to calling mempool_destroy().
 */
void mempool_destroy(mempool_t *pool)
{
	/* Check for outstanding elements */
	BUG_ON(pool->curr_nr != pool->min_nr);
	free_pool(pool);
}
EXPORT_SYMBOL(mempool_destroy);

/**
 * mempool_alloc - allocate an element from a specific memory pool
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 * @gfp_mask:  the usual allocation bitmask.
 *
 * this function only sleeps if the alloc_fn() function sleeps or
 * returns NULL. Note that due to preallocation, this function
 * *never* fails when called from process contexts. (it might
 * fail if called from an IRQ context.)
 */
void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
{
	void *element;
	unsigned long flags;
	wait_queue_t wait;
	gfp_t gfp_temp;

	might_sleep_if(gfp_mask & __GFP_WAIT);

	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
	gfp_mask |= __GFP_NOWARN;	/* failures are OK */

	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);

repeat_alloc:

	element = pool->alloc(gfp_temp, pool->pool_data);
	if (likely(element != NULL))
		return element;

	spin_lock_irqsave(&pool->lock, flags);
	if (likely(pool->curr_nr)) {
		element = remove_element(pool);
		spin_unlock_irqrestore(&pool->lock, flags);
		return element;
	}
	spin_unlock_irqrestore(&pool->lock, flags);

	/* We must not sleep in the GFP_ATOMIC case */
	if (!(gfp_mask & __GFP_WAIT))
		return NULL;

	/* Now start performing page reclaim */
	gfp_temp = gfp_mask;
	init_wait(&wait);
	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
	smp_mb();
	if (!pool->curr_nr) {
		/*
		 * FIXME: this should be io_schedule().  The timeout is there
		 * as a workaround for some DM problems in 2.6.18.
		 */
		io_schedule_timeout(5*HZ);
	}
	finish_wait(&pool->wait, &wait);

	goto repeat_alloc;
}
EXPORT_SYMBOL(mempool_alloc);

/**
 * mempool_free - return an element to the pool.
 * @element:   pool element pointer.
 * @pool:      pointer to the memory pool which was allocated via
 *             mempool_create().
 *
 * this function only sleeps if the free_fn() function sleeps.
 */
void mempool_free(void *element, mempool_t *pool)
{
	unsigned long flags;

	if (unlikely(element == NULL))
		return;

	smp_mb();
	if (pool->curr_nr < pool->min_nr) {
		spin_lock_irqsave(&pool->lock, flags);
		if (pool->curr_nr < pool->min_nr) {
			add_element(pool, element);
			spin_unlock_irqrestore(&pool->lock, flags);
			wake_up(&pool->wait);
			return;
		}
		spin_unlock_irqrestore(&pool->lock, flags);
	}
	pool->free(element, pool->pool_data);
}
EXPORT_SYMBOL(mempool_free);

/*
 * A commonly used alloc and free fn.
 */
void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
{
	struct kmem_cache *mem = pool_data;
	return kmem_cache_alloc(mem, gfp_mask);
}
EXPORT_SYMBOL(mempool_alloc_slab);

void mempool_free_slab(void *element, void *pool_data)
{
	struct kmem_cache *mem = pool_data;
	kmem_cache_free(mem, element);
}
EXPORT_SYMBOL(mempool_free_slab);

/*
 * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
 * specified by pool_data
 */
void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
{
	size_t size = (size_t)(long)pool_data;
	return kmalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kmalloc);

void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
{
	size_t size = (size_t) pool_data;
	return kzalloc(size, gfp_mask);
}
EXPORT_SYMBOL(mempool_kzalloc);

void mempool_kfree(void *element, void *pool_data)
{
	kfree(element);
}
EXPORT_SYMBOL(mempool_kfree);

/*
 * A simple mempool-backed page allocator that allocates pages
 * of the order specified by pool_data.
 */
void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
{
	int order = (int)(long)pool_data;
	return alloc_pages(gfp_mask, order);
}
EXPORT_SYMBOL(mempool_alloc_pages);

void mempool_free_pages(void *element, void *pool_data)
{
	int order = (int)(long)pool_data;
	__free_pages(element, order);
}
EXPORT_SYMBOL(mempool_free_pages);
tlen; int rc; if (sector >= part->sector_count) return -EIO; addr = part->sector_map[sector]; if (addr != -1) { rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char*)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_WARNING PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); return rc; } } else memset(buf, 0, SECTOR_SIZE); return 0; } static void erase_callback(struct erase_info *erase) { struct partition *part; u16 magic; int i, rc; size_t retlen; part = (struct partition*)erase->priv; i = (u32)erase->addr / part->block_size; if (i >= part->total_blocks || part->blocks[i].offset != erase->addr || erase->addr > UINT_MAX) { printk(KERN_ERR PREFIX "erase callback for unknown offset %llx " "on '%s'\n", (unsigned long long)erase->addr, part->mbd.mtd->name); return; } if (erase->state != MTD_ERASE_DONE) { printk(KERN_WARNING PREFIX "erase failed at 0x%llx on '%s', " "state %d\n", (unsigned long long)erase->addr, part->mbd.mtd->name, erase->state); part->blocks[i].state = BLOCK_FAILED; part->blocks[i].free_sectors = 0; part->blocks[i].used_sectors = 0; kfree(erase); return; } magic = cpu_to_le16(RFD_MAGIC); part->blocks[i].state = BLOCK_ERASED; part->blocks[i].free_sectors = part->data_sectors_per_block; part->blocks[i].used_sectors = 0; part->blocks[i].erases++; rc = part->mbd.mtd->write(part->mbd.mtd, part->blocks[i].offset, sizeof(magic), &retlen, (u_char*)&magic); if (!rc && retlen != sizeof(magic)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to write RFD " "header at 0x%lx\n", part->mbd.mtd->name, part->blocks[i].offset); part->blocks[i].state = BLOCK_FAILED; } else part->blocks[i].state = BLOCK_OK; kfree(erase); } static int erase_block(struct partition *part, int block) { struct erase_info *erase; int rc = -ENOMEM; erase = kmalloc(sizeof(struct erase_info), GFP_KERNEL); if (!erase) goto err; erase->mtd = part->mbd.mtd; erase->callback = erase_callback; erase->addr = part->blocks[block].offset; erase->len = part->block_size; erase->priv = (u_long)part; part->blocks[block].state = BLOCK_ERASING; part->blocks[block].free_sectors = 0; rc = part->mbd.mtd->erase(part->mbd.mtd, erase); if (rc) { printk(KERN_ERR PREFIX "erase of region %llx,%llx on '%s' " "failed\n", (unsigned long long)erase->addr, (unsigned long long)erase->len, part->mbd.mtd->name); kfree(erase); } err: return rc; } static int move_block_contents(struct partition *part, int block_no, u_long *old_sector) { void *sector_data; u16 *map; size_t retlen; int i, rc = -ENOMEM; part->is_reclaiming = 1; sector_data = kmalloc(SECTOR_SIZE, GFP_KERNEL); if (!sector_data) goto err3; map = kmalloc(part->header_size, GFP_KERNEL); if (!map) goto err2; rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block_no].offset, part->header_size, &retlen, (u_char*)map); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error reading '%s' at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block_no].offset); goto err; } for (i=0; i<part->data_sectors_per_block; i++) { u16 entry = le16_to_cpu(map[HEADER_MAP_OFFSET + i]); u_long addr; if (entry == SECTOR_FREE || entry == SECTOR_DELETED) continue; if (entry == SECTOR_ZERO) entry = 0; /* already warned about and ignored in build_block_map() */ if (entry >= part->sector_count) continue; addr = part->blocks[block_no].offset + (i + part->header_sectors_per_block) * SECTOR_SIZE; if (*old_sector == addr) { *old_sector = -1; if (!part->blocks[block_no].used_sectors--) { rc = erase_block(part, block_no); break; } continue; } rc = part->mbd.mtd->read(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, sector_data); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': Unable to " "read sector for relocation\n", part->mbd.mtd->name); goto err; } rc = rfd_ftl_writesect((struct mtd_blktrans_dev*)part, entry, sector_data); if (rc) goto err; } err: kfree(map); err2: kfree(sector_data); err3: part->is_reclaiming = 0; return rc; } static int reclaim_block(struct partition *part, u_long *old_sector) { int block, best_block, score, old_sector_block; int rc; /* we have a race if sync doesn't exist */ if (part->mbd.mtd->sync) part->mbd.mtd->sync(part->mbd.mtd); score = 0x7fffffff; /* MAX_INT */ best_block = -1; if (*old_sector != -1) old_sector_block = *old_sector / part->block_size; else old_sector_block = -1; for (block=0; block<part->total_blocks; block++) { int this_score; if (block == part->reserved_block) continue; /* * Postpone reclaiming if there is a free sector as * more removed sectors is more efficient (have to move * less). */ if (part->blocks[block].free_sectors) return 0; this_score = part->blocks[block].used_sectors; if (block == old_sector_block) this_score--; else { /* no point in moving a full block */ if (part->blocks[block].used_sectors == part->data_sectors_per_block) continue; } this_score += part->blocks[block].erases; if (this_score < score) { best_block = block; score = this_score; } } if (best_block == -1) return -ENOSPC; part->current_block = -1; part->reserved_block = best_block; pr_debug("reclaim_block: reclaiming block #%d with %d used " "%d free sectors\n", best_block, part->blocks[best_block].used_sectors, part->blocks[best_block].free_sectors); if (part->blocks[best_block].used_sectors) rc = move_block_contents(part, best_block, old_sector); else rc = erase_block(part, best_block); return rc; } /* * IMPROVE: It would be best to choose the block with the most deleted sectors, * because if we fill that one up first it'll have the most chance of having * the least live sectors at reclaim. */ static int find_free_block(struct partition *part) { int block, stop; block = part->current_block == -1 ? jiffies % part->total_blocks : part->current_block; stop = block; do { if (part->blocks[block].free_sectors && block != part->reserved_block) return block; if (part->blocks[block].state == BLOCK_UNUSED) erase_block(part, block); if (++block >= part->total_blocks) block = 0; } while (block != stop); return -1; } static int find_writable_block(struct partition *part, u_long *old_sector) { int rc, block; size_t retlen; block = find_free_block(part); if (block == -1) { if (!part->is_reclaiming) { rc = reclaim_block(part, old_sector); if (rc) goto err; block = find_free_block(part); } if (block == -1) { rc = -ENOSPC; goto err; } } rc = part->mbd.mtd->read(part->mbd.mtd, part->blocks[block].offset, part->header_size, &retlen, (u_char*)part->header_cache); if (!rc && retlen != part->header_size) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "'%s': unable to read header at " "0x%lx\n", part->mbd.mtd->name, part->blocks[block].offset); goto err; } part->current_block = block; err: return rc; } static int mark_sector_deleted(struct partition *part, u_long old_addr) { int block, offset, rc; u_long addr; size_t retlen; u16 del = cpu_to_le16(SECTOR_DELETED); block = old_addr / part->block_size; offset = (old_addr % part->block_size) / SECTOR_SIZE - part->header_sectors_per_block; addr = part->blocks[block].offset + (HEADER_MAP_OFFSET + offset) * sizeof(u16); rc = part->mbd.mtd->write(part->mbd.mtd, addr, sizeof(del), &retlen, (u_char*)&del); if (!rc && retlen != sizeof(del)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at " "0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } if (block == part->current_block) part->header_cache[offset + HEADER_MAP_OFFSET] = del; part->blocks[block].used_sectors--; if (!part->blocks[block].used_sectors && !part->blocks[block].free_sectors) rc = erase_block(part, block); err: return rc; } static int find_free_sector(const struct partition *part, const struct block *block) { int i, stop; i = stop = part->data_sectors_per_block - block->free_sectors; do { if (le16_to_cpu(part->header_cache[HEADER_MAP_OFFSET + i]) == SECTOR_FREE) return i; if (++i == part->data_sectors_per_block) i = 0; } while(i != stop); return -1; } static int do_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf, ulong *old_addr) { struct partition *part = (struct partition*)dev; struct block *block; u_long addr; int i; int rc; size_t retlen; u16 entry; if (part->current_block == -1 || !part->blocks[part->current_block].free_sectors) { rc = find_writable_block(part, old_addr); if (rc) goto err; } block = &part->blocks[part->current_block]; i = find_free_sector(part, block); if (i < 0) { rc = -ENOSPC; goto err; } addr = (i + part->header_sectors_per_block) * SECTOR_SIZE + block->offset; rc = part->mbd.mtd->write(part->mbd.mtd, addr, SECTOR_SIZE, &retlen, (u_char*)buf); if (!rc && retlen != SECTOR_SIZE) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } part->sector_map[sector] = addr; entry = cpu_to_le16(sector == 0 ? SECTOR_ZERO : sector); part->header_cache[i + HEADER_MAP_OFFSET] = entry; addr = block->offset + (HEADER_MAP_OFFSET + i) * sizeof(u16); rc = part->mbd.mtd->write(part->mbd.mtd, addr, sizeof(entry), &retlen, (u_char*)&entry); if (!rc && retlen != sizeof(entry)) rc = -EIO; if (rc) { printk(KERN_ERR PREFIX "error writing '%s' at 0x%lx\n", part->mbd.mtd->name, addr); if (rc) goto err; } block->used_sectors++; block->free_sectors--; err: return rc; } static int rfd_ftl_writesect(struct mtd_blktrans_dev *dev, u_long sector, char *buf) { struct partition *part = (struct partition*)dev; u_long old_addr; int i; int rc = 0; pr_debug("rfd_ftl_writesect(sector=0x%lx)\n", sector); if (part->reserved_block == -1) { rc = -EACCES; goto err; } if (sector >= part->sector_count) { rc = -EIO; goto err; } old_addr = part->sector_map[sector]; for (i=0; i<SECTOR_SIZE; i++) { if (!buf[i]) continue; rc = do_writesect(dev, sector, buf, &old_addr); if (rc) goto err; break; } if (i == SECTOR_SIZE) part->sector_map[sector] = -1; if (old_addr != -1) rc = mark_sector_deleted(part, old_addr); err: return rc; } static int rfd_ftl_getgeo(struct mtd_blktrans_dev *dev, struct hd_geometry *geo) { struct partition *part = (struct partition*)dev; geo->heads = 1; geo->sectors = SECTORS_PER_TRACK; geo->cylinders = part->cylinders; return 0; } static void rfd_ftl_add_mtd(struct mtd_blktrans_ops *tr, struct mtd_info *mtd) { struct partition *part; if (mtd->type != MTD_NORFLASH || mtd->size > UINT_MAX) return; part = kzalloc(sizeof(struct partition), GFP_KERNEL); if (!part) return; part->mbd.mtd = mtd; if (block_size) part->block_size = block_size; else { if (!mtd->erasesize) { printk(KERN_WARNING PREFIX "please provide block_size"); goto out; } else part->block_size = mtd->erasesize; } if (scan_header(part) == 0) { part->mbd.size = part->sector_count; part->mbd.tr = tr; part->mbd.devnum = -1; if (!(mtd->flags & MTD_WRITEABLE)) part->mbd.readonly = 1; else if (part->errors) { printk(KERN_WARNING PREFIX "'%s': errors found, " "setting read-only\n", mtd->name); part->mbd.readonly = 1; } printk(KERN_INFO PREFIX "name: '%s' type: %d flags %x\n", mtd->name, mtd->type, mtd->flags); if (!add_mtd_blktrans_dev((void*)part)) return; } out: kfree(part); } static void rfd_ftl_remove_dev(struct mtd_blktrans_dev *dev) { struct partition *part = (struct partition*)dev; int i; for (i=0; i<part->total_blocks; i++) { pr_debug("rfd_ftl_remove_dev:'%s': erase unit #%02d: %d erases\n", part->mbd.mtd->name, i, part->blocks[i].erases); } del_mtd_blktrans_dev(dev); vfree(part->sector_map); kfree(part->header_cache); kfree(part->blocks); } static struct mtd_blktrans_ops rfd_ftl_tr = { .name = "rfd", .major = RFD_FTL_MAJOR, .part_bits = PART_BITS, .blksize = SECTOR_SIZE, .readsect = rfd_ftl_readsect, .writesect = rfd_ftl_writesect, .getgeo = rfd_ftl_getgeo, .add_mtd = rfd_ftl_add_mtd, .remove_dev = rfd_ftl_remove_dev, .owner = THIS_MODULE, }; static int __init init_rfd_ftl(void) { return register_mtd_blktrans(&rfd_ftl_tr); } static void __exit cleanup_rfd_ftl(void) { deregister_mtd_blktrans(&rfd_ftl_tr); } module_init(init_rfd_ftl); module_exit(cleanup_rfd_ftl); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sean Young <sean@mess.org>"); MODULE_DESCRIPTION("Support code for RFD Flash Translation Layer, " "used by General Software's Embedded BIOS");