aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mtd/nand/ts7250.c
blob: 807a72752eeb035885b1f633d77d7f4e08e4ee6e (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
/*
 * drivers/mtd/nand/ts7250.c
 *
 * Copyright (C) 2004 Technologic Systems (support@embeddedARM.com)
 *
 * Derived from drivers/mtd/nand/edb7312.c
 *   Copyright (C) 2004 Marius Gröger (mag@sysgo.de)
 *
 * Derived from drivers/mtd/nand/autcpu12.c
 *   Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de)
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Overview:
 *   This is a device driver for the NAND flash device found on the
 *   TS-7250 board which utilizes a Samsung 32 Mbyte part.
 */

#include <linux/slab.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <asm/io.h>
#include <asm/arch/hardware.h>
#include <asm/sizes.h>
#include <asm/mach-types.h>

/*
 * MTD structure for TS7250 board
 */
static struct mtd_info *ts7250_mtd = NULL;

#ifdef CONFIG_MTD_PARTITIONS
static const char *part_probes[] = { "cmdlinepart", NULL };

#define NUM_PARTITIONS 3

/*
 * Define static partitions for flash device
 */
static struct mtd_partition partition_info32[] = {
	{
		.name		= "TS-BOOTROM",
		.offset		= 0x00000000,
		.size		= 0x00004000,
	}, {
		.name		= "Linux",
		.offset		= 0x00004000,
		.size		= 0x01d00000,
	}, {
		.name		= "RedBoot",
		.offset		= 0x01d04000,
		.size		= 0x002fc000,
	},
};

/*
 * Define static partitions for flash device
 */
static struct mtd_partition partition_info128[] = {
	{
		.name		= "TS-BOOTROM",
		.offset		= 0x00000000,
		.size		= 0x00004000,
	}, {
		.name		= "Linux",
		.offset		= 0x00004000,
		.size		= 0x07d00000,
	}, {
		.name		= "RedBoot",
		.offset		= 0x07d04000,
		.size		= 0x002fc000,
	},
};
#endif


/*
 *	hardware specific access to control-lines
 *
 *	ctrl:
 *	NAND_NCE: bit 0 -> bit 2
 *	NAND_CLE: bit 1 -> bit 1
 *	NAND_ALE: bit 2 -> bit 0
 */
static void ts7250_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl)
{
	struct nand_chip *chip = mtd->priv;

	if (ctrl & NAND_CTRL_CHANGE) {
		unsigned long addr = TS72XX_NAND_CONTROL_VIRT_BASE;
		unsigned char bits;

		bits = (ctrl & NAND_NCE) << 2;
		bits |= ctrl & NAND_CLE;
		bits |= (ctrl & NAND_ALE) >> 2;

		__raw_writeb((__raw_readb(addr) & ~0x7) | bits, addr);
	}

	if (cmd != NAND_CMD_NONE)
		writeb(cmd, chip->IO_ADDR_W);
}

/*
 *	read device ready pin
 */
static int ts7250_device_ready(struct mtd_info *mtd)
{
	return __raw_readb(TS72XX_NAND_BUSY_VIRT_BASE) & 0x20;
}

/*
 * Main initialization routine
 */
static int __init ts7250_init(void)
{
	struct nand_chip *this;
	const char *part_type = 0;
	int mtd_parts_nb = 0;
	struct mtd_partition *mtd_parts = 0;

	if (!machine_is_ts72xx() || board_is_ts7200())
		return -ENXIO;

	/* Allocate memory for MTD device structure and private data */
	ts7250_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL);
	if (!ts7250_mtd) {
		printk("Unable to allocate TS7250 NAND MTD device structure.\n");
		return -ENOMEM;
	}

	/* Get pointer to private data */
	this = (struct nand_chip *)(&ts7250_mtd[1]);

	/* Initialize structures */
	memset(ts7250_mtd, 0, sizeof(struct mtd_info));
	memset(this, 0, sizeof(struct nand_chip));

	/* Link the private data with the MTD structure */
	ts7250_mtd->priv = this;
	ts7250_mtd->owner = THIS_MODULE;

	/* insert callbacks */
	this->IO_ADDR_R = (void *)TS72XX_NAND_DATA_VIRT_BASE;
	this->IO_ADDR_W = (void *)TS72XX_NAND_DATA_VIRT_BASE;
	this->cmd_ctrl = ts7250_hwcontrol;
	this->dev_ready = ts7250_device_ready;
	this->chip_delay = 15;
	this->ecc.mode = NAND_ECC_SOFT;

	printk("Searching for NAND flash...\n");
	/* Scan to find existence of the device */
	if (nand_scan(ts7250_mtd, 1)) {
		kfree(ts7250_mtd);
		return -ENXIO;
	}
#ifdef CONFIG_MTD_PARTITIONS
	ts7250_mtd->name = "ts7250-nand";
	mtd_parts_nb = parse_mtd_partitions(ts7250_mtd, part_probes, &mtd_parts, 0);
	if (mtd_parts_nb > 0)
		part_type = "command line";
	else
		mtd_parts_nb = 0;
#endif
	if (mtd_parts_nb == 0) {
		mtd_parts = partition_info32;
		if (ts7250_mtd->size >= (128 * 0x100000))
			mtd_parts = partition_info128;
		mtd_parts_nb = NUM_PARTITIONS;
		part_type = "static";
	}

	/* Register the partitions */
	printk(KERN_NOTICE "Using %s partition definition\n", part_type);
	add_mtd_partitions(ts7250_mtd, mtd_parts, mtd_parts_nb);

	/* Return happy */
	return 0;
}

module_init(ts7250_init);

/*
 * Clean up routine
 */
static void __exit ts7250_cleanup(void)
{
	/* Unregister the device */
	del_mtd_device(ts7250_mtd);

	/* Free the MTD device structure */
	kfree(ts7250_mtd);
}

module_exit(ts7250_cleanup);

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Jesse Off <joff@embeddedARM.com>");
MODULE_DESCRIPTION("MTD map driver for Technologic Systems TS-7250 board");
e_tag *blk_init_tags(int depth) { return __blk_queue_init_tags(NULL, depth); } EXPORT_SYMBOL(blk_init_tags); /** * blk_queue_init_tags - initialize the queue tag info * @q: the request queue for the device * @depth: the maximum queue depth supported * @tags: the tag to use **/ int blk_queue_init_tags(struct request_queue *q, int depth, struct blk_queue_tag *tags) { int rc; BUG_ON(tags && q->queue_tags && tags != q->queue_tags); if (!tags && !q->queue_tags) { tags = __blk_queue_init_tags(q, depth); if (!tags) goto fail; } else if (q->queue_tags) { rc = blk_queue_resize_tags(q, depth); if (rc) return rc; set_bit(QUEUE_FLAG_QUEUED, &q->queue_flags); return 0; } else atomic_inc(&tags->refcnt); /* * assign it, all done */ q->queue_tags = tags; q->queue_flags |= (1 << QUEUE_FLAG_QUEUED); INIT_LIST_HEAD(&q->tag_busy_list); return 0; fail: kfree(tags); return -ENOMEM; } EXPORT_SYMBOL(blk_queue_init_tags); /** * blk_queue_resize_tags - change the queueing depth * @q: the request queue for the device * @new_depth: the new max command queueing depth * * Notes: * Must be called with the queue lock held. **/ int blk_queue_resize_tags(struct request_queue *q, int new_depth) { struct blk_queue_tag *bqt = q->queue_tags; struct request **tag_index; unsigned long *tag_map; int max_depth, nr_ulongs; if (!bqt) return -ENXIO; /* * if we already have large enough real_max_depth. just * adjust max_depth. *NOTE* as requests with tag value * between new_depth and real_max_depth can be in-flight, tag * map can not be shrunk blindly here. */ if (new_depth <= bqt->real_max_depth) { bqt->max_depth = new_depth; return 0; } /* * Currently cannot replace a shared tag map with a new * one, so error out if this is the case */ if (atomic_read(&bqt->refcnt) != 1) return -EBUSY; /* * save the old state info, so we can copy it back */ tag_index = bqt->tag_index; tag_map = bqt->tag_map; max_depth = bqt->real_max_depth; if (init_tag_map(q, bqt, new_depth)) return -ENOMEM; memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *)); nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG; memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long)); kfree(tag_index); kfree(tag_map); return 0; } EXPORT_SYMBOL(blk_queue_resize_tags); /** * blk_queue_end_tag - end tag operations for a request * @q: the request queue for the device * @rq: the request that has completed * * Description: * Typically called when end_that_request_first() returns 0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. * * Notes: * queue lock must be held. **/ void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag = rq->tag; BUG_ON(tag == -1); if (unlikely(tag >= bqt->real_max_depth)) /* * This can happen after tag depth has been reduced. * FIXME: how about a warning or info message here? */ return; list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; rq->tag = -1; if (unlikely(bqt->tag_index[tag] == NULL)) printk(KERN_ERR "%s: tag %d is missing\n", __FUNCTION__, tag); bqt->tag_index[tag] = NULL; if (unlikely(!test_bit(tag, bqt->tag_map))) { printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n", __FUNCTION__, tag); return; } /* * The tag_map bit acts as a lock for tag_index[bit], so we need * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); bqt->busy--; } EXPORT_SYMBOL(blk_queue_end_tag); /** * blk_queue_start_tag - find a free tag and assign it * @q: the request queue for the device * @rq: the block request that needs tagging * * Description: * This can either be used as a stand-alone helper, or possibly be * assigned as the queue &prep_rq_fn (in which case &struct request * automagically gets a tag assigned). Note that this function * assumes that any type of request can be queued! if this is not * true for your device, you must check the request type before * calling this function. The request will also be removed from * the request queue, so it's the drivers responsibility to readd * it if it should need to be restarted for some reason. * * Notes: * queue lock must be held. **/ int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { printk(KERN_ERR "%s: request %p for device [%s] already tagged %d", __FUNCTION__, rq, rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); BUG(); } /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. */ do { tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); if (tag >= bqt->max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); /* * We need lock ordering semantics given by test_and_set_bit_lock. * See blk_queue_end_tag for details. */ rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; blkdev_dequeue_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); bqt->busy++; return 0; } EXPORT_SYMBOL(blk_queue_start_tag); /** * blk_queue_invalidate_tags - invalidate all pending tags * @q: the request queue for the device * * Description: * Hardware conditions may dictate a need to stop all pending requests. * In this case, we will safely clear the block side of the tag queue and * readd all requests to the request queue in the right order. * * Notes: * queue lock must be held. **/ void blk_queue_invalidate_tags(struct request_queue *q) { struct list_head *tmp, *n; list_for_each_safe(tmp, n, &q->tag_busy_list) blk_requeue_request(q, list_entry_rq(tmp)); } EXPORT_SYMBOL(blk_queue_invalidate_tags);