aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/blockdev/00-INDEX2
-rw-r--r--Documentation/blockdev/mflash.txt84
-rw-r--r--block/blk-core.c15
-rw-r--r--block/blk-merge.c29
-rw-r--r--block/blk-sysfs.c4
-rw-r--r--block/blk.h14
-rw-r--r--block/cfq-iosched.c202
-rw-r--r--block/elevator.c42
-rw-r--r--drivers/block/Kconfig17
-rw-r--r--drivers/block/Makefile1
-rw-r--r--drivers/block/cciss.c117
-rw-r--r--drivers/block/cciss.h2
-rw-r--r--drivers/block/cciss_cmd.h23
-rw-r--r--drivers/block/loop.c7
-rw-r--r--drivers/block/mg_disk.c1005
-rw-r--r--include/linux/blkdev.h2
-rw-r--r--include/linux/elevator.h1
-rw-r--r--include/linux/mg_disk.h206
18 files changed, 1624 insertions, 149 deletions
diff --git a/Documentation/blockdev/00-INDEX b/Documentation/blockdev/00-INDEX
index 86f054c47013..c08df56dd91b 100644
--- a/Documentation/blockdev/00-INDEX
+++ b/Documentation/blockdev/00-INDEX
@@ -8,6 +8,8 @@ cpqarray.txt
8 - info on using Compaq's SMART2 Intelligent Disk Array Controllers. 8 - info on using Compaq's SMART2 Intelligent Disk Array Controllers.
9floppy.txt 9floppy.txt
10 - notes and driver options for the floppy disk driver. 10 - notes and driver options for the floppy disk driver.
11mflash.txt
12 - info on mGine m(g)flash driver for linux.
11nbd.txt 13nbd.txt
12 - info on a TCP implementation of a network block device. 14 - info on a TCP implementation of a network block device.
13paride.txt 15paride.txt
diff --git a/Documentation/blockdev/mflash.txt b/Documentation/blockdev/mflash.txt
new file mode 100644
index 000000000000..1f610ecf698a
--- /dev/null
+++ b/Documentation/blockdev/mflash.txt
@@ -0,0 +1,84 @@
1This document describes m[g]flash support in linux.
2
3Contents
4 1. Overview
5 2. Reserved area configuration
6 3. Example of mflash platform driver registration
7
81. Overview
9
10Mflash and gflash are embedded flash drive. The only difference is mflash is
11MCP(Multi Chip Package) device. These two device operate exactly same way.
12So the rest mflash repersents mflash and gflash altogether.
13
14Internally, mflash has nand flash and other hardware logics and supports
152 different operation (ATA, IO) modes. ATA mode doesn't need any new
16driver and currently works well under standard IDE subsystem. Actually it's
17one chip SSD. IO mode is ATA-like custom mode for the host that doesn't have
18IDE interface.
19
20Followings are brief descriptions about IO mode.
21A. IO mode based on ATA protocol and uses some custom command. (read confirm,
22write confirm)
23B. IO mode uses SRAM bus interface.
24C. IO mode supports 4kB boot area, so host can boot from mflash.
25
262. Reserved area configuration
27If host boot from mflash, usually needs raw area for boot loader image. All of
28the mflash's block device operation will be taken this value as start offset.
29Note that boot loader's size of reserved area and kernel configuration value
30must be same.
31
323. Example of mflash platform driver registration
33Working mflash is very straight forward. Adding platform device stuff to board
34configuration file is all. Here is some pseudo example.
35
36static struct mg_drv_data mflash_drv_data = {
37 /* If you want to polling driver set to 1 */
38 .use_polling = 0,
39 /* device attribution */
40 .dev_attr = MG_BOOT_DEV
41};
42
43static struct resource mg_mflash_rsc[] = {
44 /* Base address of mflash */
45 [0] = {
46 .start = 0x08000000,
47 .end = 0x08000000 + SZ_64K - 1,
48 .flags = IORESOURCE_MEM
49 },
50 /* mflash interrupt pin */
51 [1] = {
52 .start = IRQ_GPIO(84),
53 .end = IRQ_GPIO(84),
54 .flags = IORESOURCE_IRQ
55 },
56 /* mflash reset pin */
57 [2] = {
58 .start = 43,
59 .end = 43,
60 .name = MG_RST_PIN,
61 .flags = IORESOURCE_IO
62 },
63 /* mflash reset-out pin
64 * If you use mflash as storage device (i.e. other than MG_BOOT_DEV),
65 * should assign this */
66 [3] = {
67 .start = 51,
68 .end = 51,
69 .name = MG_RSTOUT_PIN,
70 .flags = IORESOURCE_IO
71 }
72};
73
74static struct platform_device mflash_dev = {
75 .name = MG_DEV_NAME,
76 .id = -1,
77 .dev = {
78 .platform_data = &mflash_drv_data,
79 },
80 .num_resources = ARRAY_SIZE(mg_mflash_rsc),
81 .resource = mg_mflash_rsc
82};
83
84platform_device_register(&mflash_dev);
diff --git a/block/blk-core.c b/block/blk-core.c
index 25572802dac2..43fdedc524ee 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -64,12 +64,11 @@ static struct workqueue_struct *kblockd_workqueue;
64 64
65static void drive_stat_acct(struct request *rq, int new_io) 65static void drive_stat_acct(struct request *rq, int new_io)
66{ 66{
67 struct gendisk *disk = rq->rq_disk;
68 struct hd_struct *part; 67 struct hd_struct *part;
69 int rw = rq_data_dir(rq); 68 int rw = rq_data_dir(rq);
70 int cpu; 69 int cpu;
71 70
72 if (!blk_fs_request(rq) || !disk || !blk_do_io_stat(disk->queue)) 71 if (!blk_fs_request(rq) || !blk_do_io_stat(rq))
73 return; 72 return;
74 73
75 cpu = part_stat_lock(); 74 cpu = part_stat_lock();
@@ -1124,8 +1123,6 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1124 1123
1125 if (bio_sync(bio)) 1124 if (bio_sync(bio))
1126 req->cmd_flags |= REQ_RW_SYNC; 1125 req->cmd_flags |= REQ_RW_SYNC;
1127 if (bio_unplug(bio))
1128 req->cmd_flags |= REQ_UNPLUG;
1129 if (bio_rw_meta(bio)) 1126 if (bio_rw_meta(bio))
1130 req->cmd_flags |= REQ_RW_META; 1127 req->cmd_flags |= REQ_RW_META;
1131 if (bio_noidle(bio)) 1128 if (bio_noidle(bio))
@@ -1675,9 +1672,7 @@ EXPORT_SYMBOL(blkdev_dequeue_request);
1675 1672
1676static void blk_account_io_completion(struct request *req, unsigned int bytes) 1673static void blk_account_io_completion(struct request *req, unsigned int bytes)
1677{ 1674{
1678 struct gendisk *disk = req->rq_disk; 1675 if (!blk_do_io_stat(req))
1679
1680 if (!disk || !blk_do_io_stat(disk->queue))
1681 return; 1676 return;
1682 1677
1683 if (blk_fs_request(req)) { 1678 if (blk_fs_request(req)) {
@@ -1694,9 +1689,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes)
1694 1689
1695static void blk_account_io_done(struct request *req) 1690static void blk_account_io_done(struct request *req)
1696{ 1691{
1697 struct gendisk *disk = req->rq_disk; 1692 if (!blk_do_io_stat(req))
1698
1699 if (!disk || !blk_do_io_stat(disk->queue))
1700 return; 1693 return;
1701 1694
1702 /* 1695 /*
@@ -1711,7 +1704,7 @@ static void blk_account_io_done(struct request *req)
1711 int cpu; 1704 int cpu;
1712 1705
1713 cpu = part_stat_lock(); 1706 cpu = part_stat_lock();
1714 part = disk_map_sector_rcu(disk, req->sector); 1707 part = disk_map_sector_rcu(req->rq_disk, req->sector);
1715 1708
1716 part_stat_inc(cpu, part, ios[rw]); 1709 part_stat_inc(cpu, part, ios[rw]);
1717 part_stat_add(cpu, part, ticks[rw], duration); 1710 part_stat_add(cpu, part, ticks[rw], duration);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index e39cb24b7679..63760ca3da0f 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -338,6 +338,22 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
338 return 1; 338 return 1;
339} 339}
340 340
341static void blk_account_io_merge(struct request *req)
342{
343 if (blk_do_io_stat(req)) {
344 struct hd_struct *part;
345 int cpu;
346
347 cpu = part_stat_lock();
348 part = disk_map_sector_rcu(req->rq_disk, req->sector);
349
350 part_round_stats(cpu, part);
351 part_dec_in_flight(part);
352
353 part_stat_unlock();
354 }
355}
356
341/* 357/*
342 * Has to be called with the request spinlock acquired 358 * Has to be called with the request spinlock acquired
343 */ 359 */
@@ -386,18 +402,7 @@ static int attempt_merge(struct request_queue *q, struct request *req,
386 402
387 elv_merge_requests(q, req, next); 403 elv_merge_requests(q, req, next);
388 404
389 if (req->rq_disk) { 405 blk_account_io_merge(req);
390 struct hd_struct *part;
391 int cpu;
392
393 cpu = part_stat_lock();
394 part = disk_map_sector_rcu(req->rq_disk, req->sector);
395
396 part_round_stats(cpu, part);
397 part_dec_in_flight(part);
398
399 part_stat_unlock();
400 }
401 406
402 req->ioprio = ioprio_best(req->ioprio, next->ioprio); 407 req->ioprio = ioprio_best(req->ioprio, next->ioprio);
403 if (blk_rq_cpu_valid(next)) 408 if (blk_rq_cpu_valid(next))
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 3ff9bba3379a..73f36beff5cd 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -209,10 +209,14 @@ static ssize_t queue_iostats_store(struct request_queue *q, const char *page,
209 ssize_t ret = queue_var_store(&stats, page, count); 209 ssize_t ret = queue_var_store(&stats, page, count);
210 210
211 spin_lock_irq(q->queue_lock); 211 spin_lock_irq(q->queue_lock);
212 elv_quisce_start(q);
213
212 if (stats) 214 if (stats)
213 queue_flag_set(QUEUE_FLAG_IO_STAT, q); 215 queue_flag_set(QUEUE_FLAG_IO_STAT, q);
214 else 216 else
215 queue_flag_clear(QUEUE_FLAG_IO_STAT, q); 217 queue_flag_clear(QUEUE_FLAG_IO_STAT, q);
218
219 elv_quisce_end(q);
216 spin_unlock_irq(q->queue_lock); 220 spin_unlock_irq(q->queue_lock);
217 221
218 return ret; 222 return ret;
diff --git a/block/blk.h b/block/blk.h
index 3ee94358b43d..24fcaeeaf620 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -70,6 +70,10 @@ void blk_queue_congestion_threshold(struct request_queue *q);
70 70
71int blk_dev_init(void); 71int blk_dev_init(void);
72 72
73void elv_quisce_start(struct request_queue *q);
74void elv_quisce_end(struct request_queue *q);
75
76
73/* 77/*
74 * Return the threshold (number of used requests) at which the queue is 78 * Return the threshold (number of used requests) at which the queue is
75 * considered to be congested. It include a little hysteresis to keep the 79 * considered to be congested. It include a little hysteresis to keep the
@@ -108,12 +112,14 @@ static inline int blk_cpu_to_group(int cpu)
108#endif 112#endif
109} 113}
110 114
111static inline int blk_do_io_stat(struct request_queue *q) 115static inline int blk_do_io_stat(struct request *rq)
112{ 116{
113 if (q) 117 struct gendisk *disk = rq->rq_disk;
114 return blk_queue_io_stat(q);
115 118
116 return 0; 119 if (!disk || !disk->queue)
120 return 0;
121
122 return blk_queue_io_stat(disk->queue) && (rq->cmd_flags & REQ_ELVPRIV);
117} 123}
118 124
119#endif 125#endif
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 9e809345f71a..a4809de6fea6 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -160,6 +160,7 @@ struct cfq_queue {
160 160
161 unsigned long slice_end; 161 unsigned long slice_end;
162 long slice_resid; 162 long slice_resid;
163 unsigned int slice_dispatch;
163 164
164 /* pending metadata requests */ 165 /* pending metadata requests */
165 int meta_pending; 166 int meta_pending;
@@ -176,13 +177,12 @@ struct cfq_queue {
176enum cfqq_state_flags { 177enum cfqq_state_flags {
177 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */ 178 CFQ_CFQQ_FLAG_on_rr = 0, /* on round-robin busy list */
178 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */ 179 CFQ_CFQQ_FLAG_wait_request, /* waiting for a request */
180 CFQ_CFQQ_FLAG_must_dispatch, /* must be allowed a dispatch */
179 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */ 181 CFQ_CFQQ_FLAG_must_alloc, /* must be allowed rq alloc */
180 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */ 182 CFQ_CFQQ_FLAG_must_alloc_slice, /* per-slice must_alloc flag */
181 CFQ_CFQQ_FLAG_must_dispatch, /* must dispatch, even if expired */
182 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */ 183 CFQ_CFQQ_FLAG_fifo_expire, /* FIFO checked in this slice */
183 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */ 184 CFQ_CFQQ_FLAG_idle_window, /* slice idling enabled */
184 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */ 185 CFQ_CFQQ_FLAG_prio_changed, /* task priority has changed */
185 CFQ_CFQQ_FLAG_queue_new, /* queue never been serviced */
186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 186 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 187 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
188}; 188};
@@ -203,13 +203,12 @@ static inline int cfq_cfqq_##name(const struct cfq_queue *cfqq) \
203 203
204CFQ_CFQQ_FNS(on_rr); 204CFQ_CFQQ_FNS(on_rr);
205CFQ_CFQQ_FNS(wait_request); 205CFQ_CFQQ_FNS(wait_request);
206CFQ_CFQQ_FNS(must_dispatch);
206CFQ_CFQQ_FNS(must_alloc); 207CFQ_CFQQ_FNS(must_alloc);
207CFQ_CFQQ_FNS(must_alloc_slice); 208CFQ_CFQQ_FNS(must_alloc_slice);
208CFQ_CFQQ_FNS(must_dispatch);
209CFQ_CFQQ_FNS(fifo_expire); 209CFQ_CFQQ_FNS(fifo_expire);
210CFQ_CFQQ_FNS(idle_window); 210CFQ_CFQQ_FNS(idle_window);
211CFQ_CFQQ_FNS(prio_changed); 211CFQ_CFQQ_FNS(prio_changed);
212CFQ_CFQQ_FNS(queue_new);
213CFQ_CFQQ_FNS(slice_new); 212CFQ_CFQQ_FNS(slice_new);
214CFQ_CFQQ_FNS(sync); 213CFQ_CFQQ_FNS(sync);
215#undef CFQ_CFQQ_FNS 214#undef CFQ_CFQQ_FNS
@@ -774,10 +773,15 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
774 if (cfqq) { 773 if (cfqq) {
775 cfq_log_cfqq(cfqd, cfqq, "set_active"); 774 cfq_log_cfqq(cfqd, cfqq, "set_active");
776 cfqq->slice_end = 0; 775 cfqq->slice_end = 0;
776 cfqq->slice_dispatch = 0;
777
778 cfq_clear_cfqq_wait_request(cfqq);
779 cfq_clear_cfqq_must_dispatch(cfqq);
777 cfq_clear_cfqq_must_alloc_slice(cfqq); 780 cfq_clear_cfqq_must_alloc_slice(cfqq);
778 cfq_clear_cfqq_fifo_expire(cfqq); 781 cfq_clear_cfqq_fifo_expire(cfqq);
779 cfq_mark_cfqq_slice_new(cfqq); 782 cfq_mark_cfqq_slice_new(cfqq);
780 cfq_clear_cfqq_queue_new(cfqq); 783
784 del_timer(&cfqd->idle_slice_timer);
781 } 785 }
782 786
783 cfqd->active_queue = cfqq; 787 cfqd->active_queue = cfqq;
@@ -795,7 +799,6 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
795 if (cfq_cfqq_wait_request(cfqq)) 799 if (cfq_cfqq_wait_request(cfqq))
796 del_timer(&cfqd->idle_slice_timer); 800 del_timer(&cfqd->idle_slice_timer);
797 801
798 cfq_clear_cfqq_must_dispatch(cfqq);
799 cfq_clear_cfqq_wait_request(cfqq); 802 cfq_clear_cfqq_wait_request(cfqq);
800 803
801 /* 804 /*
@@ -924,7 +927,6 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
924 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2)) 927 (sample_valid(cic->ttime_samples) && cic->ttime_mean > 2))
925 return; 928 return;
926 929
927 cfq_mark_cfqq_must_dispatch(cfqq);
928 cfq_mark_cfqq_wait_request(cfqq); 930 cfq_mark_cfqq_wait_request(cfqq);
929 931
930 /* 932 /*
@@ -1010,7 +1012,7 @@ static struct cfq_queue *cfq_select_queue(struct cfq_data *cfqd)
1010 /* 1012 /*
1011 * The active queue has run out of time, expire it and select new. 1013 * The active queue has run out of time, expire it and select new.
1012 */ 1014 */
1013 if (cfq_slice_used(cfqq)) 1015 if (cfq_slice_used(cfqq) && !cfq_cfqq_must_dispatch(cfqq))
1014 goto expire; 1016 goto expire;
1015 1017
1016 /* 1018 /*
@@ -1053,66 +1055,6 @@ keep_queue:
1053 return cfqq; 1055 return cfqq;
1054} 1056}
1055 1057
1056/*
1057 * Dispatch some requests from cfqq, moving them to the request queue
1058 * dispatch list.
1059 */
1060static int
1061__cfq_dispatch_requests(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1062 int max_dispatch)
1063{
1064 int dispatched = 0;
1065
1066 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1067
1068 do {
1069 struct request *rq;
1070
1071 /*
1072 * follow expired path, else get first next available
1073 */
1074 rq = cfq_check_fifo(cfqq);
1075 if (rq == NULL)
1076 rq = cfqq->next_rq;
1077
1078 /*
1079 * finally, insert request into driver dispatch list
1080 */
1081 cfq_dispatch_insert(cfqd->queue, rq);
1082
1083 dispatched++;
1084
1085 if (!cfqd->active_cic) {
1086 atomic_inc(&RQ_CIC(rq)->ioc->refcount);
1087 cfqd->active_cic = RQ_CIC(rq);
1088 }
1089
1090 if (RB_EMPTY_ROOT(&cfqq->sort_list))
1091 break;
1092
1093 /*
1094 * If there is a non-empty RT cfqq waiting for current
1095 * cfqq's timeslice to complete, pre-empt this cfqq
1096 */
1097 if (!cfq_class_rt(cfqq) && cfqd->busy_rt_queues)
1098 break;
1099
1100 } while (dispatched < max_dispatch);
1101
1102 /*
1103 * expire an async queue immediately if it has used up its slice. idle
1104 * queue always expire after 1 dispatch round.
1105 */
1106 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1107 dispatched >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1108 cfq_class_idle(cfqq))) {
1109 cfqq->slice_end = jiffies + 1;
1110 cfq_slice_expired(cfqd, 0);
1111 }
1112
1113 return dispatched;
1114}
1115
1116static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq) 1058static int __cfq_forced_dispatch_cfqq(struct cfq_queue *cfqq)
1117{ 1059{
1118 int dispatched = 0; 1060 int dispatched = 0;
@@ -1146,11 +1088,45 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
1146 return dispatched; 1088 return dispatched;
1147} 1089}
1148 1090
1091/*
1092 * Dispatch a request from cfqq, moving them to the request queue
1093 * dispatch list.
1094 */
1095static void cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1096{
1097 struct request *rq;
1098
1099 BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
1100
1101 /*
1102 * follow expired path, else get first next available
1103 */
1104 rq = cfq_check_fifo(cfqq);
1105 if (!rq)
1106 rq = cfqq->next_rq;
1107
1108 /*
1109 * insert request into driver dispatch list
1110 */
1111 cfq_dispatch_insert(cfqd->queue, rq);
1112
1113 if (!cfqd->active_cic) {
1114 struct cfq_io_context *cic = RQ_CIC(rq);
1115
1116 atomic_inc(&cic->ioc->refcount);
1117 cfqd->active_cic = cic;
1118 }
1119}
1120
1121/*
1122 * Find the cfqq that we need to service and move a request from that to the
1123 * dispatch list
1124 */
1149static int cfq_dispatch_requests(struct request_queue *q, int force) 1125static int cfq_dispatch_requests(struct request_queue *q, int force)
1150{ 1126{
1151 struct cfq_data *cfqd = q->elevator->elevator_data; 1127 struct cfq_data *cfqd = q->elevator->elevator_data;
1152 struct cfq_queue *cfqq; 1128 struct cfq_queue *cfqq;
1153 int dispatched; 1129 unsigned int max_dispatch;
1154 1130
1155 if (!cfqd->busy_queues) 1131 if (!cfqd->busy_queues)
1156 return 0; 1132 return 0;
@@ -1158,29 +1134,63 @@ static int cfq_dispatch_requests(struct request_queue *q, int force)
1158 if (unlikely(force)) 1134 if (unlikely(force))
1159 return cfq_forced_dispatch(cfqd); 1135 return cfq_forced_dispatch(cfqd);
1160 1136
1161 dispatched = 0; 1137 cfqq = cfq_select_queue(cfqd);
1162 while ((cfqq = cfq_select_queue(cfqd)) != NULL) { 1138 if (!cfqq)
1163 int max_dispatch; 1139 return 0;
1140
1141 /*
1142 * If this is an async queue and we have sync IO in flight, let it wait
1143 */
1144 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq))
1145 return 0;
1146
1147 max_dispatch = cfqd->cfq_quantum;
1148 if (cfq_class_idle(cfqq))
1149 max_dispatch = 1;
1164 1150
1165 max_dispatch = cfqd->cfq_quantum; 1151 /*
1152 * Does this cfqq already have too much IO in flight?
1153 */
1154 if (cfqq->dispatched >= max_dispatch) {
1155 /*
1156 * idle queue must always only have a single IO in flight
1157 */
1166 if (cfq_class_idle(cfqq)) 1158 if (cfq_class_idle(cfqq))
1167 max_dispatch = 1; 1159 return 0;
1168 1160
1169 if (cfqq->dispatched >= max_dispatch && cfqd->busy_queues > 1) 1161 /*
1170 break; 1162 * We have other queues, don't allow more IO from this one
1163 */
1164 if (cfqd->busy_queues > 1)
1165 return 0;
1171 1166
1172 if (cfqd->sync_flight && !cfq_cfqq_sync(cfqq)) 1167 /*
1173 break; 1168 * we are the only queue, allow up to 4 times of 'quantum'
1169 */
1170 if (cfqq->dispatched >= 4 * max_dispatch)
1171 return 0;
1172 }
1174 1173
1175 cfq_clear_cfqq_must_dispatch(cfqq); 1174 /*
1176 cfq_clear_cfqq_wait_request(cfqq); 1175 * Dispatch a request from this cfqq
1177 del_timer(&cfqd->idle_slice_timer); 1176 */
1177 cfq_dispatch_request(cfqd, cfqq);
1178 cfqq->slice_dispatch++;
1179 cfq_clear_cfqq_must_dispatch(cfqq);
1178 1180
1179 dispatched += __cfq_dispatch_requests(cfqd, cfqq, max_dispatch); 1181 /*
1182 * expire an async queue immediately if it has used up its slice. idle
1183 * queue always expire after 1 dispatch round.
1184 */
1185 if (cfqd->busy_queues > 1 && ((!cfq_cfqq_sync(cfqq) &&
1186 cfqq->slice_dispatch >= cfq_prio_to_maxrq(cfqd, cfqq)) ||
1187 cfq_class_idle(cfqq))) {
1188 cfqq->slice_end = jiffies + 1;
1189 cfq_slice_expired(cfqd, 0);
1180 } 1190 }
1181 1191
1182 cfq_log(cfqd, "dispatched=%d", dispatched); 1192 cfq_log(cfqd, "dispatched a request");
1183 return dispatched; 1193 return 1;
1184} 1194}
1185 1195
1186/* 1196/*
@@ -1506,7 +1516,6 @@ retry:
1506 cfqq->cfqd = cfqd; 1516 cfqq->cfqd = cfqd;
1507 1517
1508 cfq_mark_cfqq_prio_changed(cfqq); 1518 cfq_mark_cfqq_prio_changed(cfqq);
1509 cfq_mark_cfqq_queue_new(cfqq);
1510 1519
1511 cfq_init_prio_data(cfqq, ioc); 1520 cfq_init_prio_data(cfqq, ioc);
1512 1521
@@ -1893,15 +1902,13 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1893 1902
1894 if (cfqq == cfqd->active_queue) { 1903 if (cfqq == cfqd->active_queue) {
1895 /* 1904 /*
1896 * if we are waiting for a request for this queue, let it rip 1905 * Remember that we saw a request from this process, but
1897 * immediately and flag that we must not expire this queue 1906 * don't start queuing just yet. Otherwise we risk seeing lots
1898 * just now 1907 * of tiny requests, because we disrupt the normal plugging
1908 * and merging.
1899 */ 1909 */
1900 if (cfq_cfqq_wait_request(cfqq)) { 1910 if (cfq_cfqq_wait_request(cfqq))
1901 cfq_mark_cfqq_must_dispatch(cfqq); 1911 cfq_mark_cfqq_must_dispatch(cfqq);
1902 del_timer(&cfqd->idle_slice_timer);
1903 blk_start_queueing(cfqd->queue);
1904 }
1905 } else if (cfq_should_preempt(cfqd, cfqq, rq)) { 1912 } else if (cfq_should_preempt(cfqd, cfqq, rq)) {
1906 /* 1913 /*
1907 * not the active queue - expire current slice if it is 1914 * not the active queue - expire current slice if it is
@@ -1910,7 +1917,6 @@ cfq_rq_enqueued(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1910 * this new queue is RT and the current one is BE 1917 * this new queue is RT and the current one is BE
1911 */ 1918 */
1912 cfq_preempt_queue(cfqd, cfqq); 1919 cfq_preempt_queue(cfqd, cfqq);
1913 cfq_mark_cfqq_must_dispatch(cfqq);
1914 blk_start_queueing(cfqd->queue); 1920 blk_start_queueing(cfqd->queue);
1915 } 1921 }
1916} 1922}
@@ -2172,6 +2178,12 @@ static void cfq_idle_slice_timer(unsigned long data)
2172 timed_out = 0; 2178 timed_out = 0;
2173 2179
2174 /* 2180 /*
2181 * We saw a request before the queue expired, let it through
2182 */
2183 if (cfq_cfqq_must_dispatch(cfqq))
2184 goto out_kick;
2185
2186 /*
2175 * expired 2187 * expired
2176 */ 2188 */
2177 if (cfq_slice_used(cfqq)) 2189 if (cfq_slice_used(cfqq))
@@ -2187,10 +2199,8 @@ static void cfq_idle_slice_timer(unsigned long data)
2187 /* 2199 /*
2188 * not expired and it has a request pending, let it dispatch 2200 * not expired and it has a request pending, let it dispatch
2189 */ 2201 */
2190 if (!RB_EMPTY_ROOT(&cfqq->sort_list)) { 2202 if (!RB_EMPTY_ROOT(&cfqq->sort_list))
2191 cfq_mark_cfqq_must_dispatch(cfqq);
2192 goto out_kick; 2203 goto out_kick;
2193 }
2194 } 2204 }
2195expire: 2205expire:
2196 cfq_slice_expired(cfqd, timed_out); 2206 cfq_slice_expired(cfqd, timed_out);
diff --git a/block/elevator.c b/block/elevator.c
index ca6788a0195a..fb81bcc14a8c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -573,7 +573,7 @@ void elv_requeue_request(struct request_queue *q, struct request *rq)
573 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE); 573 elv_insert(q, rq, ELEVATOR_INSERT_REQUEUE);
574} 574}
575 575
576static void elv_drain_elevator(struct request_queue *q) 576void elv_drain_elevator(struct request_queue *q)
577{ 577{
578 static int printed; 578 static int printed;
579 while (q->elevator->ops->elevator_dispatch_fn(q, 1)) 579 while (q->elevator->ops->elevator_dispatch_fn(q, 1))
@@ -587,6 +587,31 @@ static void elv_drain_elevator(struct request_queue *q)
587 } 587 }
588} 588}
589 589
590/*
591 * Call with queue lock held, interrupts disabled
592 */
593void elv_quisce_start(struct request_queue *q)
594{
595 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
596
597 /*
598 * make sure we don't have any requests in flight
599 */
600 elv_drain_elevator(q);
601 while (q->rq.elvpriv) {
602 blk_start_queueing(q);
603 spin_unlock_irq(q->queue_lock);
604 msleep(10);
605 spin_lock_irq(q->queue_lock);
606 elv_drain_elevator(q);
607 }
608}
609
610void elv_quisce_end(struct request_queue *q)
611{
612 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q);
613}
614
590void elv_insert(struct request_queue *q, struct request *rq, int where) 615void elv_insert(struct request_queue *q, struct request *rq, int where)
591{ 616{
592 struct list_head *pos; 617 struct list_head *pos;
@@ -1101,18 +1126,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1101 * Turn on BYPASS and drain all requests w/ elevator private data 1126 * Turn on BYPASS and drain all requests w/ elevator private data
1102 */ 1127 */
1103 spin_lock_irq(q->queue_lock); 1128 spin_lock_irq(q->queue_lock);
1104 1129 elv_quisce_start(q);
1105 queue_flag_set(QUEUE_FLAG_ELVSWITCH, q);
1106
1107 elv_drain_elevator(q);
1108
1109 while (q->rq.elvpriv) {
1110 blk_start_queueing(q);
1111 spin_unlock_irq(q->queue_lock);
1112 msleep(10);
1113 spin_lock_irq(q->queue_lock);
1114 elv_drain_elevator(q);
1115 }
1116 1130
1117 /* 1131 /*
1118 * Remember old elevator. 1132 * Remember old elevator.
@@ -1136,7 +1150,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
1136 */ 1150 */
1137 elevator_exit(old_elevator); 1151 elevator_exit(old_elevator);
1138 spin_lock_irq(q->queue_lock); 1152 spin_lock_irq(q->queue_lock);
1139 queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); 1153 elv_quisce_end(q);
1140 spin_unlock_irq(q->queue_lock); 1154 spin_unlock_irq(q->queue_lock);
1141 1155
1142 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); 1156 blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name);
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index e7b8aa0cb47c..ddea8e485cc9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -410,6 +410,23 @@ config ATA_OVER_ETH
410 This driver provides Support for ATA over Ethernet block 410 This driver provides Support for ATA over Ethernet block
411 devices like the Coraid EtherDrive (R) Storage Blade. 411 devices like the Coraid EtherDrive (R) Storage Blade.
412 412
413config MG_DISK
414 tristate "mGine mflash, gflash support"
415 depends on ARM && ATA && GPIOLIB
416 help
417 mGine mFlash(gFlash) block device driver
418
419config MG_DISK_RES
420 int "Size of reserved area before MBR"
421 depends on MG_DISK
422 default 0
423 help
424 Define size of reserved area that usually used for boot. Unit is KB.
425 All of the block device operation will be taken this value as start
426 offset
427 Examples:
428 1024 => 1 MB
429
413config SUNVDC 430config SUNVDC
414 tristate "Sun Virtual Disk Client support" 431 tristate "Sun Virtual Disk Client support"
415 depends on SUN_LDOMS 432 depends on SUN_LDOMS
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 3145141cef72..7755a5e2a85e 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BLK_CPQ_CISS_DA) += cciss.o
21obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o 21obj-$(CONFIG_BLK_DEV_DAC960) += DAC960.o
22obj-$(CONFIG_XILINX_SYSACE) += xsysace.o 22obj-$(CONFIG_XILINX_SYSACE) += xsysace.o
23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o 23obj-$(CONFIG_CDROM_PKTCDVD) += pktcdvd.o
24obj-$(CONFIG_MG_DISK) += mg_disk.o
24obj-$(CONFIG_SUNVDC) += sunvdc.o 25obj-$(CONFIG_SUNVDC) += sunvdc.o
25 26
26obj-$(CONFIG_BLK_DEV_UMEM) += umem.o 27obj-$(CONFIG_BLK_DEV_UMEM) += umem.o
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index a6c55432819b..0ef6f08aa6ea 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -51,6 +51,7 @@
51#include <scsi/scsi_ioctl.h> 51#include <scsi/scsi_ioctl.h>
52#include <linux/cdrom.h> 52#include <linux/cdrom.h>
53#include <linux/scatterlist.h> 53#include <linux/scatterlist.h>
54#include <linux/kthread.h>
54 55
55#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin)) 56#define CCISS_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
56#define DRIVER_NAME "HP CISS Driver (v 3.6.20)" 57#define DRIVER_NAME "HP CISS Driver (v 3.6.20)"
@@ -186,6 +187,8 @@ static int sendcmd_withirq(__u8 cmd, int ctlr, void *buff, size_t size,
186 __u8 page_code, int cmd_type); 187 __u8 page_code, int cmd_type);
187 188
188static void fail_all_cmds(unsigned long ctlr); 189static void fail_all_cmds(unsigned long ctlr);
190static int scan_thread(void *data);
191static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
189 192
190#ifdef CONFIG_PROC_FS 193#ifdef CONFIG_PROC_FS
191static void cciss_procinit(int i); 194static void cciss_procinit(int i);
@@ -735,6 +738,12 @@ static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo)
735 return 0; 738 return 0;
736} 739}
737 740
741static void check_ioctl_unit_attention(ctlr_info_t *host, CommandList_struct *c)
742{
743 if (c->err_info->CommandStatus == CMD_TARGET_STATUS &&
744 c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)
745 (void)check_for_unit_attention(host, c);
746}
738/* 747/*
739 * ioctl 748 * ioctl
740 */ 749 */
@@ -1029,6 +1038,8 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1029 iocommand.buf_size, 1038 iocommand.buf_size,
1030 PCI_DMA_BIDIRECTIONAL); 1039 PCI_DMA_BIDIRECTIONAL);
1031 1040
1041 check_ioctl_unit_attention(host, c);
1042
1032 /* Copy the error information out */ 1043 /* Copy the error information out */
1033 iocommand.error_info = *(c->err_info); 1044 iocommand.error_info = *(c->err_info);
1034 if (copy_to_user 1045 if (copy_to_user
@@ -1180,6 +1191,7 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1180 (dma_addr_t) temp64.val, buff_size[i], 1191 (dma_addr_t) temp64.val, buff_size[i],
1181 PCI_DMA_BIDIRECTIONAL); 1192 PCI_DMA_BIDIRECTIONAL);
1182 } 1193 }
1194 check_ioctl_unit_attention(host, c);
1183 /* Copy the error information out */ 1195 /* Copy the error information out */
1184 ioc->error_info = *(c->err_info); 1196 ioc->error_info = *(c->err_info);
1185 if (copy_to_user(argp, ioc, sizeof(*ioc))) { 1197 if (copy_to_user(argp, ioc, sizeof(*ioc))) {
@@ -1287,6 +1299,7 @@ static void cciss_softirq_done(struct request *rq)
1287{ 1299{
1288 CommandList_struct *cmd = rq->completion_data; 1300 CommandList_struct *cmd = rq->completion_data;
1289 ctlr_info_t *h = hba[cmd->ctlr]; 1301 ctlr_info_t *h = hba[cmd->ctlr];
1302 unsigned int nr_bytes;
1290 unsigned long flags; 1303 unsigned long flags;
1291 u64bit temp64; 1304 u64bit temp64;
1292 int i, ddir; 1305 int i, ddir;
@@ -1308,7 +1321,14 @@ static void cciss_softirq_done(struct request *rq)
1308 printk("Done with %p\n", rq); 1321 printk("Done with %p\n", rq);
1309#endif /* CCISS_DEBUG */ 1322#endif /* CCISS_DEBUG */
1310 1323
1311 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, blk_rq_bytes(rq))) 1324 /*
1325 * Store the full size and set the residual count for pc requests
1326 */
1327 nr_bytes = blk_rq_bytes(rq);
1328 if (blk_pc_request(rq))
1329 rq->data_len = cmd->err_info->ResidualCnt;
1330
1331 if (blk_end_request(rq, (rq->errors == 0) ? 0 : -EIO, nr_bytes))
1312 BUG(); 1332 BUG();
1313 1333
1314 spin_lock_irqsave(&h->lock, flags); 1334 spin_lock_irqsave(&h->lock, flags);
@@ -2585,12 +2605,14 @@ static inline unsigned int make_status_bytes(unsigned int scsi_status_byte,
2585 ((driver_byte & 0xff) << 24); 2605 ((driver_byte & 0xff) << 24);
2586} 2606}
2587 2607
2588static inline int evaluate_target_status(CommandList_struct *cmd) 2608static inline int evaluate_target_status(ctlr_info_t *h,
2609 CommandList_struct *cmd, int *retry_cmd)
2589{ 2610{
2590 unsigned char sense_key; 2611 unsigned char sense_key;
2591 unsigned char status_byte, msg_byte, host_byte, driver_byte; 2612 unsigned char status_byte, msg_byte, host_byte, driver_byte;
2592 int error_value; 2613 int error_value;
2593 2614
2615 *retry_cmd = 0;
2594 /* If we get in here, it means we got "target status", that is, scsi status */ 2616 /* If we get in here, it means we got "target status", that is, scsi status */
2595 status_byte = cmd->err_info->ScsiStatus; 2617 status_byte = cmd->err_info->ScsiStatus;
2596 driver_byte = DRIVER_OK; 2618 driver_byte = DRIVER_OK;
@@ -2618,6 +2640,11 @@ static inline int evaluate_target_status(CommandList_struct *cmd)
2618 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq)) 2640 if (((sense_key == 0x0) || (sense_key == 0x1)) && !blk_pc_request(cmd->rq))
2619 error_value = 0; 2641 error_value = 0;
2620 2642
2643 if (check_for_unit_attention(h, cmd)) {
2644 *retry_cmd = !blk_pc_request(cmd->rq);
2645 return 0;
2646 }
2647
2621 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */ 2648 if (!blk_pc_request(cmd->rq)) { /* Not SG_IO or similar? */
2622 if (error_value != 0) 2649 if (error_value != 0)
2623 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION" 2650 printk(KERN_WARNING "cciss: cmd %p has CHECK CONDITION"
@@ -2657,7 +2684,7 @@ static inline void complete_command(ctlr_info_t *h, CommandList_struct *cmd,
2657 2684
2658 switch (cmd->err_info->CommandStatus) { 2685 switch (cmd->err_info->CommandStatus) {
2659 case CMD_TARGET_STATUS: 2686 case CMD_TARGET_STATUS:
2660 rq->errors = evaluate_target_status(cmd); 2687 rq->errors = evaluate_target_status(h, cmd, &retry_cmd);
2661 break; 2688 break;
2662 case CMD_DATA_UNDERRUN: 2689 case CMD_DATA_UNDERRUN:
2663 if (blk_fs_request(cmd->rq)) { 2690 if (blk_fs_request(cmd->rq)) {
@@ -3008,6 +3035,63 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3008 return IRQ_HANDLED; 3035 return IRQ_HANDLED;
3009} 3036}
3010 3037
3038static int scan_thread(void *data)
3039{
3040 ctlr_info_t *h = data;
3041 int rc;
3042 DECLARE_COMPLETION_ONSTACK(wait);
3043 h->rescan_wait = &wait;
3044
3045 for (;;) {
3046 rc = wait_for_completion_interruptible(&wait);
3047 if (kthread_should_stop())
3048 break;
3049 if (!rc)
3050 rebuild_lun_table(h, 0);
3051 }
3052 return 0;
3053}
3054
3055static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
3056{
3057 if (c->err_info->SenseInfo[2] != UNIT_ATTENTION)
3058 return 0;
3059
3060 switch (c->err_info->SenseInfo[12]) {
3061 case STATE_CHANGED:
3062 printk(KERN_WARNING "cciss%d: a state change "
3063 "detected, command retried\n", h->ctlr);
3064 return 1;
3065 break;
3066 case LUN_FAILED:
3067 printk(KERN_WARNING "cciss%d: LUN failure "
3068 "detected, action required\n", h->ctlr);
3069 return 1;
3070 break;
3071 case REPORT_LUNS_CHANGED:
3072 printk(KERN_WARNING "cciss%d: report LUN data "
3073 "changed\n", h->ctlr);
3074 if (h->rescan_wait)
3075 complete(h->rescan_wait);
3076 return 1;
3077 break;
3078 case POWER_OR_RESET:
3079 printk(KERN_WARNING "cciss%d: a power on "
3080 "or device reset detected\n", h->ctlr);
3081 return 1;
3082 break;
3083 case UNIT_ATTENTION_CLEARED:
3084 printk(KERN_WARNING "cciss%d: unit attention "
3085 "cleared by another initiator\n", h->ctlr);
3086 return 1;
3087 break;
3088 default:
3089 printk(KERN_WARNING "cciss%d: unknown "
3090 "unit attention detected\n", h->ctlr);
3091 return 1;
3092 }
3093}
3094
3011/* 3095/*
3012 * We cannot read the structure directly, for portability we must use 3096 * We cannot read the structure directly, for portability we must use
3013 * the io functions. 3097 * the io functions.
@@ -3181,12 +3265,21 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3181 */ 3265 */
3182 cciss_interrupt_mode(c, pdev, board_id); 3266 cciss_interrupt_mode(c, pdev, board_id);
3183 3267
3184 /* 3268 /* find the memory BAR */
3185 * Memory base addr is first addr , the second points to the config 3269 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
3186 * table 3270 if (pci_resource_flags(pdev, i) & IORESOURCE_MEM)
3187 */ 3271 break;
3272 }
3273 if (i == DEVICE_COUNT_RESOURCE) {
3274 printk(KERN_WARNING "cciss: No memory BAR found\n");
3275 err = -ENODEV;
3276 goto err_out_free_res;
3277 }
3278
3279 c->paddr = pci_resource_start(pdev, i); /* addressing mode bits
3280 * already removed
3281 */
3188 3282
3189 c->paddr = pci_resource_start(pdev, 0); /* addressing mode bits already removed */
3190#ifdef CCISS_DEBUG 3283#ifdef CCISS_DEBUG
3191 printk("address 0 = %lx\n", c->paddr); 3284 printk("address 0 = %lx\n", c->paddr);
3192#endif /* CCISS_DEBUG */ 3285#endif /* CCISS_DEBUG */
@@ -3753,6 +3846,11 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3753 hba[i]->busy_initializing = 0; 3846 hba[i]->busy_initializing = 0;
3754 3847
3755 rebuild_lun_table(hba[i], 1); 3848 rebuild_lun_table(hba[i], 1);
3849 hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
3850 "cciss_scan%02d", i);
3851 if (IS_ERR(hba[i]->cciss_scan_thread))
3852 return PTR_ERR(hba[i]->cciss_scan_thread);
3853
3756 return 1; 3854 return 1;
3757 3855
3758clean4: 3856clean4:
@@ -3828,6 +3926,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3828 printk(KERN_ERR "cciss: Unable to remove device \n"); 3926 printk(KERN_ERR "cciss: Unable to remove device \n");
3829 return; 3927 return;
3830 } 3928 }
3929
3831 tmp_ptr = pci_get_drvdata(pdev); 3930 tmp_ptr = pci_get_drvdata(pdev);
3832 i = tmp_ptr->ctlr; 3931 i = tmp_ptr->ctlr;
3833 if (hba[i] == NULL) { 3932 if (hba[i] == NULL) {
@@ -3836,6 +3935,8 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
3836 return; 3935 return;
3837 } 3936 }
3838 3937
3938 kthread_stop(hba[i]->cciss_scan_thread);
3939
3839 remove_proc_entry(hba[i]->devname, proc_cciss); 3940 remove_proc_entry(hba[i]->devname, proc_cciss);
3840 unregister_blkdev(hba[i]->major, hba[i]->devname); 3941 unregister_blkdev(hba[i]->major, hba[i]->devname);
3841 3942
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 15e2b84734e3..703e08038fb9 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -121,6 +121,8 @@ struct ctlr_info
121 struct sendcmd_reject_list scsi_rejects; 121 struct sendcmd_reject_list scsi_rejects;
122#endif 122#endif
123 unsigned char alive; 123 unsigned char alive;
124 struct completion *rescan_wait;
125 struct task_struct *cciss_scan_thread;
124}; 126};
125 127
126/* Defining the diffent access_menthods */ 128/* Defining the diffent access_menthods */
diff --git a/drivers/block/cciss_cmd.h b/drivers/block/cciss_cmd.h
index 24e22dea1a99..40b1b92dae7f 100644
--- a/drivers/block/cciss_cmd.h
+++ b/drivers/block/cciss_cmd.h
@@ -25,6 +25,29 @@
25#define CMD_TIMEOUT 0x000B 25#define CMD_TIMEOUT 0x000B
26#define CMD_UNABORTABLE 0x000C 26#define CMD_UNABORTABLE 0x000C
27 27
28/* Unit Attentions ASC's as defined for the MSA2012sa */
29#define POWER_OR_RESET 0x29
30#define STATE_CHANGED 0x2a
31#define UNIT_ATTENTION_CLEARED 0x2f
32#define LUN_FAILED 0x3e
33#define REPORT_LUNS_CHANGED 0x3f
34
35/* Unit Attentions ASCQ's as defined for the MSA2012sa */
36
37 /* These ASCQ's defined for ASC = POWER_OR_RESET */
38#define POWER_ON_RESET 0x00
39#define POWER_ON_REBOOT 0x01
40#define SCSI_BUS_RESET 0x02
41#define MSA_TARGET_RESET 0x03
42#define CONTROLLER_FAILOVER 0x04
43#define TRANSCEIVER_SE 0x05
44#define TRANSCEIVER_LVD 0x06
45
46 /* These ASCQ's defined for ASC = STATE_CHANGED */
47#define RESERVATION_PREEMPTED 0x03
48#define ASYM_ACCESS_CHANGED 0x06
49#define LUN_CAPACITY_CHANGED 0x09
50
28//transfer direction 51//transfer direction
29#define XFER_NONE 0x00 52#define XFER_NONE 0x00
30#define XFER_WRITE 0x01 53#define XFER_WRITE 0x01
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 40b17d3b55a1..ddae80825899 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1431,6 +1431,7 @@ static int lo_open(struct block_device *bdev, fmode_t mode)
1431static int lo_release(struct gendisk *disk, fmode_t mode) 1431static int lo_release(struct gendisk *disk, fmode_t mode)
1432{ 1432{
1433 struct loop_device *lo = disk->private_data; 1433 struct loop_device *lo = disk->private_data;
1434 int err;
1434 1435
1435 mutex_lock(&lo->lo_ctl_mutex); 1436 mutex_lock(&lo->lo_ctl_mutex);
1436 1437
@@ -1442,7 +1443,9 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1442 * In autoclear mode, stop the loop thread 1443 * In autoclear mode, stop the loop thread
1443 * and remove configuration after last close. 1444 * and remove configuration after last close.
1444 */ 1445 */
1445 loop_clr_fd(lo, NULL); 1446 err = loop_clr_fd(lo, NULL);
1447 if (!err)
1448 goto out_unlocked;
1446 } else { 1449 } else {
1447 /* 1450 /*
1448 * Otherwise keep thread (if running) and config, 1451 * Otherwise keep thread (if running) and config,
@@ -1453,7 +1456,7 @@ static int lo_release(struct gendisk *disk, fmode_t mode)
1453 1456
1454out: 1457out:
1455 mutex_unlock(&lo->lo_ctl_mutex); 1458 mutex_unlock(&lo->lo_ctl_mutex);
1456 1459out_unlocked:
1457 return 0; 1460 return 0;
1458} 1461}
1459 1462
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
new file mode 100644
index 000000000000..fb39d9aa3cdc
--- /dev/null
+++ b/drivers/block/mg_disk.c
@@ -0,0 +1,1005 @@
1/*
2 * drivers/block/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/fs.h>
18#include <linux/blkdev.h>
19#include <linux/hdreg.h>
20#include <linux/libata.h>
21#include <linux/interrupt.h>
22#include <linux/delay.h>
23#include <linux/platform_device.h>
24#include <linux/gpio.h>
25#include <linux/mg_disk.h>
26
27#define MG_RES_SEC (CONFIG_MG_DISK_RES << 1)
28
29static void mg_request(struct request_queue *);
30
31static void mg_dump_status(const char *msg, unsigned int stat,
32 struct mg_host *host)
33{
34 char *name = MG_DISK_NAME;
35 struct request *req;
36
37 if (host->breq) {
38 req = elv_next_request(host->breq);
39 if (req)
40 name = req->rq_disk->disk_name;
41 }
42
43 printk(KERN_ERR "%s: %s: status=0x%02x { ", name, msg, stat & 0xff);
44 if (stat & MG_REG_STATUS_BIT_BUSY)
45 printk("Busy ");
46 if (stat & MG_REG_STATUS_BIT_READY)
47 printk("DriveReady ");
48 if (stat & MG_REG_STATUS_BIT_WRITE_FAULT)
49 printk("WriteFault ");
50 if (stat & MG_REG_STATUS_BIT_SEEK_DONE)
51 printk("SeekComplete ");
52 if (stat & MG_REG_STATUS_BIT_DATA_REQ)
53 printk("DataRequest ");
54 if (stat & MG_REG_STATUS_BIT_CORRECTED_ERROR)
55 printk("CorrectedError ");
56 if (stat & MG_REG_STATUS_BIT_ERROR)
57 printk("Error ");
58 printk("}\n");
59 if ((stat & MG_REG_STATUS_BIT_ERROR) == 0) {
60 host->error = 0;
61 } else {
62 host->error = inb((unsigned long)host->dev_base + MG_REG_ERROR);
63 printk(KERN_ERR "%s: %s: error=0x%02x { ", name, msg,
64 host->error & 0xff);
65 if (host->error & MG_REG_ERR_BBK)
66 printk("BadSector ");
67 if (host->error & MG_REG_ERR_UNC)
68 printk("UncorrectableError ");
69 if (host->error & MG_REG_ERR_IDNF)
70 printk("SectorIdNotFound ");
71 if (host->error & MG_REG_ERR_ABRT)
72 printk("DriveStatusError ");
73 if (host->error & MG_REG_ERR_AMNF)
74 printk("AddrMarkNotFound ");
75 printk("}");
76 if (host->error &
77 (MG_REG_ERR_BBK | MG_REG_ERR_UNC |
78 MG_REG_ERR_IDNF | MG_REG_ERR_AMNF)) {
79 if (host->breq) {
80 req = elv_next_request(host->breq);
81 if (req)
82 printk(", sector=%ld", req->sector);
83 }
84
85 }
86 printk("\n");
87 }
88}
89
90static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
91{
92 u8 status;
93 unsigned long expire, cur_jiffies;
94 struct mg_drv_data *prv_data = host->dev->platform_data;
95
96 host->error = MG_ERR_NONE;
97 expire = jiffies + msecs_to_jiffies(msec);
98
99 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
100
101 do {
102 cur_jiffies = jiffies;
103 if (status & MG_REG_STATUS_BIT_BUSY) {
104 if (expect == MG_REG_STATUS_BIT_BUSY)
105 break;
106 } else {
107 /* Check the error condition! */
108 if (status & MG_REG_STATUS_BIT_ERROR) {
109 mg_dump_status("mg_wait", status, host);
110 break;
111 }
112
113 if (expect == MG_STAT_READY)
114 if (MG_READY_OK(status))
115 break;
116
117 if (expect == MG_REG_STATUS_BIT_DATA_REQ)
118 if (status & MG_REG_STATUS_BIT_DATA_REQ)
119 break;
120 }
121 if (!msec) {
122 mg_dump_status("not ready", status, host);
123 return MG_ERR_INV_STAT;
124 }
125 if (prv_data->use_polling)
126 msleep(1);
127
128 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
129 } while (time_before(cur_jiffies, expire));
130
131 if (time_after_eq(cur_jiffies, expire) && msec)
132 host->error = MG_ERR_TIMEOUT;
133
134 return host->error;
135}
136
137static unsigned int mg_wait_rstout(u32 rstout, u32 msec)
138{
139 unsigned long expire;
140
141 expire = jiffies + msecs_to_jiffies(msec);
142 while (time_before(jiffies, expire)) {
143 if (gpio_get_value(rstout) == 1)
144 return MG_ERR_NONE;
145 msleep(10);
146 }
147
148 return MG_ERR_RSTOUT;
149}
150
151static void mg_unexpected_intr(struct mg_host *host)
152{
153 u32 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
154
155 mg_dump_status("mg_unexpected_intr", status, host);
156}
157
158static irqreturn_t mg_irq(int irq, void *dev_id)
159{
160 struct mg_host *host = dev_id;
161 void (*handler)(struct mg_host *) = host->mg_do_intr;
162
163 host->mg_do_intr = 0;
164 del_timer(&host->timer);
165 if (!handler)
166 handler = mg_unexpected_intr;
167 handler(host);
168 return IRQ_HANDLED;
169}
170
171static int mg_get_disk_id(struct mg_host *host)
172{
173 u32 i;
174 s32 err;
175 const u16 *id = host->id;
176 struct mg_drv_data *prv_data = host->dev->platform_data;
177 char fwrev[ATA_ID_FW_REV_LEN + 1];
178 char model[ATA_ID_PROD_LEN + 1];
179 char serial[ATA_ID_SERNO_LEN + 1];
180
181 if (!prv_data->use_polling)
182 outb(MG_REG_CTRL_INTR_DISABLE,
183 (unsigned long)host->dev_base +
184 MG_REG_DRV_CTRL);
185
186 outb(MG_CMD_ID, (unsigned long)host->dev_base + MG_REG_COMMAND);
187 err = mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_RD_DRQ);
188 if (err)
189 return err;
190
191 for (i = 0; i < (MG_SECTOR_SIZE >> 1); i++)
192 host->id[i] = le16_to_cpu(inw((unsigned long)host->dev_base +
193 MG_BUFF_OFFSET + i * 2));
194
195 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
196 err = mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD);
197 if (err)
198 return err;
199
200 if ((id[ATA_ID_FIELD_VALID] & 1) == 0)
201 return MG_ERR_TRANSLATION;
202
203 host->n_sectors = ata_id_u32(id, ATA_ID_LBA_CAPACITY);
204 host->cyls = id[ATA_ID_CYLS];
205 host->heads = id[ATA_ID_HEADS];
206 host->sectors = id[ATA_ID_SECTORS];
207
208 if (MG_RES_SEC && host->heads && host->sectors) {
209 /* modify cyls, n_sectors */
210 host->cyls = (host->n_sectors - MG_RES_SEC) /
211 host->heads / host->sectors;
212 host->nres_sectors = host->n_sectors - host->cyls *
213 host->heads * host->sectors;
214 host->n_sectors -= host->nres_sectors;
215 }
216
217 ata_id_c_string(id, fwrev, ATA_ID_FW_REV, sizeof(fwrev));
218 ata_id_c_string(id, model, ATA_ID_PROD, sizeof(model));
219 ata_id_c_string(id, serial, ATA_ID_SERNO, sizeof(serial));
220 printk(KERN_INFO "mg_disk: model: %s\n", model);
221 printk(KERN_INFO "mg_disk: firm: %.8s\n", fwrev);
222 printk(KERN_INFO "mg_disk: serial: %s\n", serial);
223 printk(KERN_INFO "mg_disk: %d + reserved %d sectors\n",
224 host->n_sectors, host->nres_sectors);
225
226 if (!prv_data->use_polling)
227 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
228 MG_REG_DRV_CTRL);
229
230 return err;
231}
232
233
234static int mg_disk_init(struct mg_host *host)
235{
236 struct mg_drv_data *prv_data = host->dev->platform_data;
237 s32 err;
238 u8 init_status;
239
240 /* hdd rst low */
241 gpio_set_value(host->rst, 0);
242 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
243 if (err)
244 return err;
245
246 /* hdd rst high */
247 gpio_set_value(host->rst, 1);
248 err = mg_wait(host, MG_STAT_READY, MG_TMAX_HDRST_TO_RDY);
249 if (err)
250 return err;
251
252 /* soft reset on */
253 outb(MG_REG_CTRL_RESET |
254 (prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
255 MG_REG_CTRL_INTR_ENABLE),
256 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
257 err = mg_wait(host, MG_REG_STATUS_BIT_BUSY, MG_TMAX_RST_TO_BUSY);
258 if (err)
259 return err;
260
261 /* soft reset off */
262 outb(prv_data->use_polling ? MG_REG_CTRL_INTR_DISABLE :
263 MG_REG_CTRL_INTR_ENABLE,
264 (unsigned long)host->dev_base + MG_REG_DRV_CTRL);
265 err = mg_wait(host, MG_STAT_READY, MG_TMAX_SWRST_TO_RDY);
266 if (err)
267 return err;
268
269 init_status = inb((unsigned long)host->dev_base + MG_REG_STATUS) & 0xf;
270
271 if (init_status == 0xf)
272 return MG_ERR_INIT_STAT;
273
274 return err;
275}
276
277static void mg_bad_rw_intr(struct mg_host *host)
278{
279 struct request *req = elv_next_request(host->breq);
280 if (req != NULL)
281 if (++req->errors >= MG_MAX_ERRORS ||
282 host->error == MG_ERR_TIMEOUT)
283 end_request(req, 0);
284}
285
286static unsigned int mg_out(struct mg_host *host,
287 unsigned int sect_num,
288 unsigned int sect_cnt,
289 unsigned int cmd,
290 void (*intr_addr)(struct mg_host *))
291{
292 struct mg_drv_data *prv_data = host->dev->platform_data;
293
294 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
295 return host->error;
296
297 if (!prv_data->use_polling) {
298 host->mg_do_intr = intr_addr;
299 mod_timer(&host->timer, jiffies + 3 * HZ);
300 }
301 if (MG_RES_SEC)
302 sect_num += MG_RES_SEC;
303 outb((u8)sect_cnt, (unsigned long)host->dev_base + MG_REG_SECT_CNT);
304 outb((u8)sect_num, (unsigned long)host->dev_base + MG_REG_SECT_NUM);
305 outb((u8)(sect_num >> 8), (unsigned long)host->dev_base +
306 MG_REG_CYL_LOW);
307 outb((u8)(sect_num >> 16), (unsigned long)host->dev_base +
308 MG_REG_CYL_HIGH);
309 outb((u8)((sect_num >> 24) | MG_REG_HEAD_LBA_MODE),
310 (unsigned long)host->dev_base + MG_REG_DRV_HEAD);
311 outb(cmd, (unsigned long)host->dev_base + MG_REG_COMMAND);
312 return MG_ERR_NONE;
313}
314
315static void mg_read(struct request *req)
316{
317 u32 remains, j;
318 struct mg_host *host = req->rq_disk->private_data;
319
320 remains = req->nr_sectors;
321
322 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_RD, 0) !=
323 MG_ERR_NONE)
324 mg_bad_rw_intr(host);
325
326 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
327 remains, req->sector, req->buffer);
328
329 while (remains) {
330 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
331 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
332 mg_bad_rw_intr(host);
333 return;
334 }
335 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
336 *(u16 *)req->buffer =
337 inw((unsigned long)host->dev_base +
338 MG_BUFF_OFFSET + (j << 1));
339 req->buffer += 2;
340 }
341
342 req->sector++;
343 req->errors = 0;
344 remains = --req->nr_sectors;
345 --req->current_nr_sectors;
346
347 if (req->current_nr_sectors <= 0) {
348 MG_DBG("remain : %d sects\n", remains);
349 end_request(req, 1);
350 if (remains > 0)
351 req = elv_next_request(host->breq);
352 }
353
354 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
355 MG_REG_COMMAND);
356 }
357}
358
359static void mg_write(struct request *req)
360{
361 u32 remains, j;
362 struct mg_host *host = req->rq_disk->private_data;
363
364 remains = req->nr_sectors;
365
366 if (mg_out(host, req->sector, req->nr_sectors, MG_CMD_WR, 0) !=
367 MG_ERR_NONE) {
368 mg_bad_rw_intr(host);
369 return;
370 }
371
372
373 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
374 remains, req->sector, req->buffer);
375 while (remains) {
376 if (mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ,
377 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
378 mg_bad_rw_intr(host);
379 return;
380 }
381 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) {
382 outw(*(u16 *)req->buffer,
383 (unsigned long)host->dev_base +
384 MG_BUFF_OFFSET + (j << 1));
385 req->buffer += 2;
386 }
387 req->sector++;
388 remains = --req->nr_sectors;
389 --req->current_nr_sectors;
390
391 if (req->current_nr_sectors <= 0) {
392 MG_DBG("remain : %d sects\n", remains);
393 end_request(req, 1);
394 if (remains > 0)
395 req = elv_next_request(host->breq);
396 }
397
398 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
399 MG_REG_COMMAND);
400 }
401}
402
403static void mg_read_intr(struct mg_host *host)
404{
405 u32 i;
406 struct request *req;
407
408 /* check status */
409 do {
410 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
411 if (i & MG_REG_STATUS_BIT_BUSY)
412 break;
413 if (!MG_READY_OK(i))
414 break;
415 if (i & MG_REG_STATUS_BIT_DATA_REQ)
416 goto ok_to_read;
417 } while (0);
418 mg_dump_status("mg_read_intr", i, host);
419 mg_bad_rw_intr(host);
420 mg_request(host->breq);
421 return;
422
423ok_to_read:
424 /* get current segment of request */
425 req = elv_next_request(host->breq);
426
427 /* read 1 sector */
428 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
429 *(u16 *)req->buffer =
430 inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
431 (i << 1));
432 req->buffer += 2;
433 }
434
435 /* manipulate request */
436 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
437 req->sector, req->nr_sectors - 1, req->buffer);
438
439 req->sector++;
440 req->errors = 0;
441 i = --req->nr_sectors;
442 --req->current_nr_sectors;
443
444 /* let know if current segment done */
445 if (req->current_nr_sectors <= 0)
446 end_request(req, 1);
447
448 /* set handler if read remains */
449 if (i > 0) {
450 host->mg_do_intr = mg_read_intr;
451 mod_timer(&host->timer, jiffies + 3 * HZ);
452 }
453
454 /* send read confirm */
455 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
456
457 /* goto next request */
458 if (!i)
459 mg_request(host->breq);
460}
461
462static void mg_write_intr(struct mg_host *host)
463{
464 u32 i, j;
465 u16 *buff;
466 struct request *req;
467
468 /* get current segment of request */
469 req = elv_next_request(host->breq);
470
471 /* check status */
472 do {
473 i = inb((unsigned long)host->dev_base + MG_REG_STATUS);
474 if (i & MG_REG_STATUS_BIT_BUSY)
475 break;
476 if (!MG_READY_OK(i))
477 break;
478 if ((req->nr_sectors <= 1) || (i & MG_REG_STATUS_BIT_DATA_REQ))
479 goto ok_to_write;
480 } while (0);
481 mg_dump_status("mg_write_intr", i, host);
482 mg_bad_rw_intr(host);
483 mg_request(host->breq);
484 return;
485
486ok_to_write:
487 /* manipulate request */
488 req->sector++;
489 i = --req->nr_sectors;
490 --req->current_nr_sectors;
491 req->buffer += MG_SECTOR_SIZE;
492
493 /* let know if current segment or all done */
494 if (!i || (req->bio && req->current_nr_sectors <= 0))
495 end_request(req, 1);
496
497 /* write 1 sector and set handler if remains */
498 if (i > 0) {
499 buff = (u16 *)req->buffer;
500 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
501 outw(*buff, (unsigned long)host->dev_base +
502 MG_BUFF_OFFSET + (j << 1));
503 buff++;
504 }
505 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
506 req->sector, req->nr_sectors, req->buffer);
507 host->mg_do_intr = mg_write_intr;
508 mod_timer(&host->timer, jiffies + 3 * HZ);
509 }
510
511 /* send write confirm */
512 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + MG_REG_COMMAND);
513
514 if (!i)
515 mg_request(host->breq);
516}
517
518void mg_times_out(unsigned long data)
519{
520 struct mg_host *host = (struct mg_host *)data;
521 char *name;
522 struct request *req;
523
524 req = elv_next_request(host->breq);
525 if (!req)
526 return;
527
528 host->mg_do_intr = NULL;
529
530 name = req->rq_disk->disk_name;
531 printk(KERN_DEBUG "%s: timeout\n", name);
532
533 host->error = MG_ERR_TIMEOUT;
534 mg_bad_rw_intr(host);
535
536 mg_request(host->breq);
537}
538
539static void mg_request_poll(struct request_queue *q)
540{
541 struct request *req;
542 struct mg_host *host;
543
544 while ((req = elv_next_request(q)) != NULL) {
545 host = req->rq_disk->private_data;
546 if (blk_fs_request(req)) {
547 switch (rq_data_dir(req)) {
548 case READ:
549 mg_read(req);
550 break;
551 case WRITE:
552 mg_write(req);
553 break;
554 default:
555 printk(KERN_WARNING "%s:%d unknown command\n",
556 __func__, __LINE__);
557 end_request(req, 0);
558 break;
559 }
560 }
561 }
562}
563
564static unsigned int mg_issue_req(struct request *req,
565 struct mg_host *host,
566 unsigned int sect_num,
567 unsigned int sect_cnt)
568{
569 u16 *buff;
570 u32 i;
571
572 switch (rq_data_dir(req)) {
573 case READ:
574 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
575 != MG_ERR_NONE) {
576 mg_bad_rw_intr(host);
577 return host->error;
578 }
579 break;
580 case WRITE:
581 /* TODO : handler */
582 outb(MG_REG_CTRL_INTR_DISABLE,
583 (unsigned long)host->dev_base +
584 MG_REG_DRV_CTRL);
585 if (mg_out(host, sect_num, sect_cnt, MG_CMD_WR, &mg_write_intr)
586 != MG_ERR_NONE) {
587 mg_bad_rw_intr(host);
588 return host->error;
589 }
590 del_timer(&host->timer);
591 mg_wait(host, MG_REG_STATUS_BIT_DATA_REQ, MG_TMAX_WAIT_WR_DRQ);
592 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
593 MG_REG_DRV_CTRL);
594 if (host->error) {
595 mg_bad_rw_intr(host);
596 return host->error;
597 }
598 buff = (u16 *)req->buffer;
599 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
600 outw(*buff, (unsigned long)host->dev_base +
601 MG_BUFF_OFFSET + (i << 1));
602 buff++;
603 }
604 mod_timer(&host->timer, jiffies + 3 * HZ);
605 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
606 MG_REG_COMMAND);
607 break;
608 default:
609 printk(KERN_WARNING "%s:%d unknown command\n",
610 __func__, __LINE__);
611 end_request(req, 0);
612 break;
613 }
614 return MG_ERR_NONE;
615}
616
617/* This function also called from IRQ context */
618static void mg_request(struct request_queue *q)
619{
620 struct request *req;
621 struct mg_host *host;
622 u32 sect_num, sect_cnt;
623
624 while (1) {
625 req = elv_next_request(q);
626 if (!req)
627 return;
628
629 host = req->rq_disk->private_data;
630
631 /* check unwanted request call */
632 if (host->mg_do_intr)
633 return;
634
635 del_timer(&host->timer);
636
637 sect_num = req->sector;
638 /* deal whole segments */
639 sect_cnt = req->nr_sectors;
640
641 /* sanity check */
642 if (sect_num >= get_capacity(req->rq_disk) ||
643 ((sect_num + sect_cnt) >
644 get_capacity(req->rq_disk))) {
645 printk(KERN_WARNING
646 "%s: bad access: sector=%d, count=%d\n",
647 req->rq_disk->disk_name,
648 sect_num, sect_cnt);
649 end_request(req, 0);
650 continue;
651 }
652
653 if (!blk_fs_request(req))
654 return;
655
656 if (!mg_issue_req(req, host, sect_num, sect_cnt))
657 return;
658 }
659}
660
661static int mg_getgeo(struct block_device *bdev, struct hd_geometry *geo)
662{
663 struct mg_host *host = bdev->bd_disk->private_data;
664
665 geo->cylinders = (unsigned short)host->cyls;
666 geo->heads = (unsigned char)host->heads;
667 geo->sectors = (unsigned char)host->sectors;
668 return 0;
669}
670
671static struct block_device_operations mg_disk_ops = {
672 .getgeo = mg_getgeo
673};
674
675static int mg_suspend(struct platform_device *plat_dev, pm_message_t state)
676{
677 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
678 struct mg_host *host = prv_data->host;
679
680 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
681 return -EIO;
682
683 if (!prv_data->use_polling)
684 outb(MG_REG_CTRL_INTR_DISABLE,
685 (unsigned long)host->dev_base +
686 MG_REG_DRV_CTRL);
687
688 outb(MG_CMD_SLEEP, (unsigned long)host->dev_base + MG_REG_COMMAND);
689 /* wait until mflash deep sleep */
690 msleep(1);
691
692 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD)) {
693 if (!prv_data->use_polling)
694 outb(MG_REG_CTRL_INTR_ENABLE,
695 (unsigned long)host->dev_base +
696 MG_REG_DRV_CTRL);
697 return -EIO;
698 }
699
700 return 0;
701}
702
703static int mg_resume(struct platform_device *plat_dev)
704{
705 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
706 struct mg_host *host = prv_data->host;
707
708 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
709 return -EIO;
710
711 outb(MG_CMD_WAKEUP, (unsigned long)host->dev_base + MG_REG_COMMAND);
712 /* wait until mflash wakeup */
713 msleep(1);
714
715 if (mg_wait(host, MG_STAT_READY, MG_TMAX_CONF_TO_CMD))
716 return -EIO;
717
718 if (!prv_data->use_polling)
719 outb(MG_REG_CTRL_INTR_ENABLE, (unsigned long)host->dev_base +
720 MG_REG_DRV_CTRL);
721
722 return 0;
723}
724
725static int mg_probe(struct platform_device *plat_dev)
726{
727 struct mg_host *host;
728 struct resource *rsc;
729 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
730 int err = 0;
731
732 if (!prv_data) {
733 printk(KERN_ERR "%s:%d fail (no driver_data)\n",
734 __func__, __LINE__);
735 err = -EINVAL;
736 goto probe_err;
737 }
738
739 /* alloc mg_host */
740 host = kzalloc(sizeof(struct mg_host), GFP_KERNEL);
741 if (!host) {
742 printk(KERN_ERR "%s:%d fail (no memory for mg_host)\n",
743 __func__, __LINE__);
744 err = -ENOMEM;
745 goto probe_err;
746 }
747 host->major = MG_DISK_MAJ;
748
749 /* link each other */
750 prv_data->host = host;
751 host->dev = &plat_dev->dev;
752
753 /* io remap */
754 rsc = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
755 if (!rsc) {
756 printk(KERN_ERR "%s:%d platform_get_resource fail\n",
757 __func__, __LINE__);
758 err = -EINVAL;
759 goto probe_err_2;
760 }
761 host->dev_base = ioremap(rsc->start , rsc->end + 1);
762 if (!host->dev_base) {
763 printk(KERN_ERR "%s:%d ioremap fail\n",
764 __func__, __LINE__);
765 err = -EIO;
766 goto probe_err_2;
767 }
768 MG_DBG("dev_base = 0x%x\n", (u32)host->dev_base);
769
770 /* get reset pin */
771 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
772 MG_RST_PIN);
773 if (!rsc) {
774 printk(KERN_ERR "%s:%d get reset pin fail\n",
775 __func__, __LINE__);
776 err = -EIO;
777 goto probe_err_3;
778 }
779 host->rst = rsc->start;
780
781 /* init rst pin */
782 err = gpio_request(host->rst, MG_RST_PIN);
783 if (err)
784 goto probe_err_3;
785 gpio_direction_output(host->rst, 1);
786
787 /* reset out pin */
788 if (!(prv_data->dev_attr & MG_DEV_MASK))
789 goto probe_err_3a;
790
791 if (prv_data->dev_attr != MG_BOOT_DEV) {
792 rsc = platform_get_resource_byname(plat_dev, IORESOURCE_IO,
793 MG_RSTOUT_PIN);
794 if (!rsc) {
795 printk(KERN_ERR "%s:%d get reset-out pin fail\n",
796 __func__, __LINE__);
797 err = -EIO;
798 goto probe_err_3a;
799 }
800 host->rstout = rsc->start;
801 err = gpio_request(host->rstout, MG_RSTOUT_PIN);
802 if (err)
803 goto probe_err_3a;
804 gpio_direction_input(host->rstout);
805 }
806
807 /* disk reset */
808 if (prv_data->dev_attr == MG_STORAGE_DEV) {
809 /* If POR seq. not yet finised, wait */
810 err = mg_wait_rstout(host->rstout, MG_TMAX_RSTOUT);
811 if (err)
812 goto probe_err_3b;
813 err = mg_disk_init(host);
814 if (err) {
815 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
816 __func__, __LINE__, err);
817 err = -EIO;
818 goto probe_err_3b;
819 }
820 }
821
822 /* get irq resource */
823 if (!prv_data->use_polling) {
824 host->irq = platform_get_irq(plat_dev, 0);
825 if (host->irq == -ENXIO) {
826 err = host->irq;
827 goto probe_err_3b;
828 }
829 err = request_irq(host->irq, mg_irq,
830 IRQF_DISABLED | IRQF_TRIGGER_RISING,
831 MG_DEV_NAME, host);
832 if (err) {
833 printk(KERN_ERR "%s:%d fail (request_irq err=%d)\n",
834 __func__, __LINE__, err);
835 goto probe_err_3b;
836 }
837
838 }
839
840 /* get disk id */
841 err = mg_get_disk_id(host);
842 if (err) {
843 printk(KERN_ERR "%s:%d fail (err code : %d)\n",
844 __func__, __LINE__, err);
845 err = -EIO;
846 goto probe_err_4;
847 }
848
849 err = register_blkdev(host->major, MG_DISK_NAME);
850 if (err < 0) {
851 printk(KERN_ERR "%s:%d register_blkdev fail (err code : %d)\n",
852 __func__, __LINE__, err);
853 goto probe_err_4;
854 }
855 if (!host->major)
856 host->major = err;
857
858 spin_lock_init(&host->lock);
859
860 if (prv_data->use_polling)
861 host->breq = blk_init_queue(mg_request_poll, &host->lock);
862 else
863 host->breq = blk_init_queue(mg_request, &host->lock);
864
865 if (!host->breq) {
866 err = -ENOMEM;
867 printk(KERN_ERR "%s:%d (blk_init_queue) fail\n",
868 __func__, __LINE__);
869 goto probe_err_5;
870 }
871
872 /* mflash is random device, thanx for the noop */
873 elevator_exit(host->breq->elevator);
874 err = elevator_init(host->breq, "noop");
875 if (err) {
876 printk(KERN_ERR "%s:%d (elevator_init) fail\n",
877 __func__, __LINE__);
878 goto probe_err_6;
879 }
880 blk_queue_max_sectors(host->breq, MG_MAX_SECTS);
881 blk_queue_hardsect_size(host->breq, MG_SECTOR_SIZE);
882
883 init_timer(&host->timer);
884 host->timer.function = mg_times_out;
885 host->timer.data = (unsigned long)host;
886
887 host->gd = alloc_disk(MG_DISK_MAX_PART);
888 if (!host->gd) {
889 printk(KERN_ERR "%s:%d (alloc_disk) fail\n",
890 __func__, __LINE__);
891 err = -ENOMEM;
892 goto probe_err_7;
893 }
894 host->gd->major = host->major;
895 host->gd->first_minor = 0;
896 host->gd->fops = &mg_disk_ops;
897 host->gd->queue = host->breq;
898 host->gd->private_data = host;
899 sprintf(host->gd->disk_name, MG_DISK_NAME"a");
900
901 set_capacity(host->gd, host->n_sectors);
902
903 add_disk(host->gd);
904
905 return err;
906
907probe_err_7:
908 del_timer_sync(&host->timer);
909probe_err_6:
910 blk_cleanup_queue(host->breq);
911probe_err_5:
912 unregister_blkdev(MG_DISK_MAJ, MG_DISK_NAME);
913probe_err_4:
914 if (!prv_data->use_polling)
915 free_irq(host->irq, host);
916probe_err_3b:
917 gpio_free(host->rstout);
918probe_err_3a:
919 gpio_free(host->rst);
920probe_err_3:
921 iounmap(host->dev_base);
922probe_err_2:
923 kfree(host);
924probe_err:
925 return err;
926}
927
928static int mg_remove(struct platform_device *plat_dev)
929{
930 struct mg_drv_data *prv_data = plat_dev->dev.platform_data;
931 struct mg_host *host = prv_data->host;
932 int err = 0;
933
934 /* delete timer */
935 del_timer_sync(&host->timer);
936
937 /* remove disk */
938 if (host->gd) {
939 del_gendisk(host->gd);
940 put_disk(host->gd);
941 }
942 /* remove queue */
943 if (host->breq)
944 blk_cleanup_queue(host->breq);
945
946 /* unregister blk device */
947 unregister_blkdev(host->major, MG_DISK_NAME);
948
949 /* free irq */
950 if (!prv_data->use_polling)
951 free_irq(host->irq, host);
952
953 /* free reset-out pin */
954 if (prv_data->dev_attr != MG_BOOT_DEV)
955 gpio_free(host->rstout);
956
957 /* free rst pin */
958 if (host->rst)
959 gpio_free(host->rst);
960
961 /* unmap io */
962 if (host->dev_base)
963 iounmap(host->dev_base);
964
965 /* free mg_host */
966 kfree(host);
967
968 return err;
969}
970
971static struct platform_driver mg_disk_driver = {
972 .probe = mg_probe,
973 .remove = mg_remove,
974 .suspend = mg_suspend,
975 .resume = mg_resume,
976 .driver = {
977 .name = MG_DEV_NAME,
978 .owner = THIS_MODULE,
979 }
980};
981
982/****************************************************************************
983 *
984 * Module stuff
985 *
986 ****************************************************************************/
987
988static int __init mg_init(void)
989{
990 printk(KERN_INFO "mGine mflash driver, (c) 2008 mGine Co.\n");
991 return platform_driver_register(&mg_disk_driver);
992}
993
994static void __exit mg_exit(void)
995{
996 printk(KERN_INFO "mflash driver : bye bye\n");
997 platform_driver_unregister(&mg_disk_driver);
998}
999
1000module_init(mg_init);
1001module_exit(mg_exit);
1002
1003MODULE_LICENSE("GPL");
1004MODULE_AUTHOR("unsik Kim <donari75@gmail.com>");
1005MODULE_DESCRIPTION("mGine m[g]flash device driver");
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index e03660964e02..ba54c834a590 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -117,7 +117,6 @@ enum rq_flag_bits {
117 __REQ_RW_META, /* metadata io request */ 117 __REQ_RW_META, /* metadata io request */
118 __REQ_COPY_USER, /* contains copies of user pages */ 118 __REQ_COPY_USER, /* contains copies of user pages */
119 __REQ_INTEGRITY, /* integrity metadata has been remapped */ 119 __REQ_INTEGRITY, /* integrity metadata has been remapped */
120 __REQ_UNPLUG, /* unplug queue on submission */
121 __REQ_NOIDLE, /* Don't anticipate more IO after this one */ 120 __REQ_NOIDLE, /* Don't anticipate more IO after this one */
122 __REQ_NR_BITS, /* stops here */ 121 __REQ_NR_BITS, /* stops here */
123}; 122};
@@ -145,7 +144,6 @@ enum rq_flag_bits {
145#define REQ_RW_META (1 << __REQ_RW_META) 144#define REQ_RW_META (1 << __REQ_RW_META)
146#define REQ_COPY_USER (1 << __REQ_COPY_USER) 145#define REQ_COPY_USER (1 << __REQ_COPY_USER)
147#define REQ_INTEGRITY (1 << __REQ_INTEGRITY) 146#define REQ_INTEGRITY (1 << __REQ_INTEGRITY)
148#define REQ_UNPLUG (1 << __REQ_UNPLUG)
149#define REQ_NOIDLE (1 << __REQ_NOIDLE) 147#define REQ_NOIDLE (1 << __REQ_NOIDLE)
150 148
151#define BLK_MAX_CDB 16 149#define BLK_MAX_CDB 16
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 7a204256b155..c59b769f62b0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -116,6 +116,7 @@ extern void elv_abort_queue(struct request_queue *);
116extern void elv_completed_request(struct request_queue *, struct request *); 116extern void elv_completed_request(struct request_queue *, struct request *);
117extern int elv_set_request(struct request_queue *, struct request *, gfp_t); 117extern int elv_set_request(struct request_queue *, struct request *, gfp_t);
118extern void elv_put_request(struct request_queue *, struct request *); 118extern void elv_put_request(struct request_queue *, struct request *);
119extern void elv_drain_elevator(struct request_queue *);
119 120
120/* 121/*
121 * io scheduler registration 122 * io scheduler registration
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
new file mode 100644
index 000000000000..1f76b1ebf627
--- /dev/null
+++ b/include/linux/mg_disk.h
@@ -0,0 +1,206 @@
1/*
2 * include/linux/mg_disk.c
3 *
4 * Support for the mGine m[g]flash IO mode.
5 * Based on legacy hd.c
6 *
7 * (c) 2008 mGine Co.,LTD
8 * (c) 2008 unsik Kim <donari75@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __MG_DISK_H__
16#define __MG_DISK_H__
17
18#include <linux/blkdev.h>
19#include <linux/ata.h>
20
21/* name for block device */
22#define MG_DISK_NAME "mgd"
23/* name for platform device */
24#define MG_DEV_NAME "mg_disk"
25
26#define MG_DISK_MAJ 0
27#define MG_DISK_MAX_PART 16
28#define MG_SECTOR_SIZE 512
29#define MG_MAX_SECTS 256
30
31/* Register offsets */
32#define MG_BUFF_OFFSET 0x8000
33#define MG_STORAGE_BUFFER_SIZE 0x200
34#define MG_REG_OFFSET 0xC000
35#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
36#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
37#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
38#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
39#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
40#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
41#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
42#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
43#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
44#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
45#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
46
47/* "Drive Select/Head Register" bit values */
48#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
49#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
50#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
51#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
52
53
54/* "Device Control Register" bit values */
55#define MG_REG_CTRL_INTR_ENABLE 0x0
56#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
57#define MG_REG_CTRL_RESET (0x1<<2)
58#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
59#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
60#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
61#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
62#define MG_REG_CTRL_DPD_DISABLE 0x0
63#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
64
65/* Status register bit */
66/* error bit in status register */
67#define MG_REG_STATUS_BIT_ERROR 0x01
68/* corrected error in status register */
69#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
70/* data request bit in status register */
71#define MG_REG_STATUS_BIT_DATA_REQ 0x08
72/* DSC - Drive Seek Complete */
73#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
74/* DWF - Drive Write Fault */
75#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
76#define MG_REG_STATUS_BIT_READY 0x40
77#define MG_REG_STATUS_BIT_BUSY 0x80
78
79/* handy status */
80#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
81#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
82 (MG_REG_STATUS_BIT_BUSY | \
83 MG_REG_STATUS_BIT_WRITE_FAULT | \
84 MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
85
86/* Error register */
87#define MG_REG_ERR_AMNF 0x01
88#define MG_REG_ERR_ABRT 0x04
89#define MG_REG_ERR_IDNF 0x10
90#define MG_REG_ERR_UNC 0x40
91#define MG_REG_ERR_BBK 0x80
92
93/* error code for others */
94#define MG_ERR_NONE 0
95#define MG_ERR_TIMEOUT 0x100
96#define MG_ERR_INIT_STAT 0x101
97#define MG_ERR_TRANSLATION 0x102
98#define MG_ERR_CTRL_RST 0x103
99#define MG_ERR_INV_STAT 0x104
100#define MG_ERR_RSTOUT 0x105
101
102#define MG_MAX_ERRORS 6 /* Max read/write errors */
103
104/* command */
105#define MG_CMD_RD 0x20
106#define MG_CMD_WR 0x30
107#define MG_CMD_SLEEP 0x99
108#define MG_CMD_WAKEUP 0xC3
109#define MG_CMD_ID 0xEC
110#define MG_CMD_WR_CONF 0x3C
111#define MG_CMD_RD_CONF 0x40
112
113/* operation mode */
114#define MG_OP_CASCADE (1 << 0)
115#define MG_OP_CASCADE_SYNC_RD (1 << 1)
116#define MG_OP_CASCADE_SYNC_WR (1 << 2)
117#define MG_OP_INTERLEAVE (1 << 3)
118
119/* synchronous */
120#define MG_BURST_LAT_4 (3 << 4)
121#define MG_BURST_LAT_5 (4 << 4)
122#define MG_BURST_LAT_6 (5 << 4)
123#define MG_BURST_LAT_7 (6 << 4)
124#define MG_BURST_LAT_8 (7 << 4)
125#define MG_BURST_LEN_4 (1 << 1)
126#define MG_BURST_LEN_8 (2 << 1)
127#define MG_BURST_LEN_16 (3 << 1)
128#define MG_BURST_LEN_32 (4 << 1)
129#define MG_BURST_LEN_CONT (0 << 1)
130
131/* timeout value (unit: ms) */
132#define MG_TMAX_CONF_TO_CMD 1
133#define MG_TMAX_WAIT_RD_DRQ 10
134#define MG_TMAX_WAIT_WR_DRQ 500
135#define MG_TMAX_RST_TO_BUSY 10
136#define MG_TMAX_HDRST_TO_RDY 500
137#define MG_TMAX_SWRST_TO_RDY 500
138#define MG_TMAX_RSTOUT 3000
139
140/* device attribution */
141/* use mflash as boot device */
142#define MG_BOOT_DEV (1 << 0)
143/* use mflash as storage device */
144#define MG_STORAGE_DEV (1 << 1)
145/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
146#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
147
148#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
149
150/* names of GPIO resource */
151#define MG_RST_PIN "mg_rst"
152/* except MG_BOOT_DEV, reset-out pin should be assigned */
153#define MG_RSTOUT_PIN "mg_rstout"
154
155/* private driver data */
156struct mg_drv_data {
157 /* disk resource */
158 u32 use_polling;
159
160 /* device attribution */
161 u32 dev_attr;
162
163 /* internally used */
164 struct mg_host *host;
165};
166
167/* main structure for mflash driver */
168struct mg_host {
169 struct device *dev;
170
171 struct request_queue *breq;
172 spinlock_t lock;
173 struct gendisk *gd;
174
175 struct timer_list timer;
176 void (*mg_do_intr) (struct mg_host *);
177
178 u16 id[ATA_ID_WORDS];
179
180 u16 cyls;
181 u16 heads;
182 u16 sectors;
183 u32 n_sectors;
184 u32 nres_sectors;
185
186 void __iomem *dev_base;
187 unsigned int irq;
188 unsigned int rst;
189 unsigned int rstout;
190
191 u32 major;
192 u32 error;
193};
194
195/*
196 * Debugging macro and defines
197 */
198#undef DO_MG_DEBUG
199#ifdef DO_MG_DEBUG
200# define MG_DBG(fmt, args...) \
201 printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
202#else /* CONFIG_MG_DEBUG */
203# define MG_DBG(fmt, args...) do { } while (0)
204#endif /* CONFIG_MG_DEBUG */
205
206#endif