aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-07-31 15:10:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-07-31 15:10:26 -0400
commita5bc92cdf2ab27a15732976004b3755c40740f57 (patch)
treeab7ee562f31ed9fddac78c1e17a2ba9eee6cb028
parent6eb80e00bff341dd09a7ec8b9dba6da8410448bf (diff)
parentcbb4f2646d77b536ed2b1500ef6641083228ed8f (diff)
Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block
* 'for-linus' of git://git.kernel.dk/linux-2.6-block: io context: fix ref counting block: make the end_io functions be non-GPL exports block: fix improper kobject release in blk_integrity_unregister block: always assign default lock to queues mg_disk: Add missing ready status check on mg_write() mg_disk: fix issue with data integrity on error in mg_write() mg_disk: fix reading invalid status when use polling driver mg_disk: remove prohibited sleep operation
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-integrity.c1
-rw-r--r--block/blk-settings.c7
-rw-r--r--drivers/block/mg_disk.c101
-rw-r--r--include/linux/iocontext.h2
5 files changed, 71 insertions, 59 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 4b45435c6eaf..e3299a77a0d8 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -575,13 +575,6 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
575 return NULL; 575 return NULL;
576 } 576 }
577 577
578 /*
579 * if caller didn't supply a lock, they get per-queue locking with
580 * our embedded lock
581 */
582 if (!lock)
583 lock = &q->__queue_lock;
584
585 q->request_fn = rfn; 578 q->request_fn = rfn;
586 q->prep_rq_fn = NULL; 579 q->prep_rq_fn = NULL;
587 q->unplug_fn = generic_unplug_device; 580 q->unplug_fn = generic_unplug_device;
@@ -2143,7 +2136,7 @@ bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2143{ 2136{
2144 return blk_end_bidi_request(rq, error, nr_bytes, 0); 2137 return blk_end_bidi_request(rq, error, nr_bytes, 0);
2145} 2138}
2146EXPORT_SYMBOL_GPL(blk_end_request); 2139EXPORT_SYMBOL(blk_end_request);
2147 2140
2148/** 2141/**
2149 * blk_end_request_all - Helper function for drives to finish the request. 2142 * blk_end_request_all - Helper function for drives to finish the request.
@@ -2164,7 +2157,7 @@ void blk_end_request_all(struct request *rq, int error)
2164 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2157 pending = blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2165 BUG_ON(pending); 2158 BUG_ON(pending);
2166} 2159}
2167EXPORT_SYMBOL_GPL(blk_end_request_all); 2160EXPORT_SYMBOL(blk_end_request_all);
2168 2161
2169/** 2162/**
2170 * blk_end_request_cur - Helper function to finish the current request chunk. 2163 * blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2182,7 +2175,7 @@ bool blk_end_request_cur(struct request *rq, int error)
2182{ 2175{
2183 return blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2176 return blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2184} 2177}
2185EXPORT_SYMBOL_GPL(blk_end_request_cur); 2178EXPORT_SYMBOL(blk_end_request_cur);
2186 2179
2187/** 2180/**
2188 * __blk_end_request - Helper function for drivers to complete the request. 2181 * __blk_end_request - Helper function for drivers to complete the request.
@@ -2201,7 +2194,7 @@ bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2201{ 2194{
2202 return __blk_end_bidi_request(rq, error, nr_bytes, 0); 2195 return __blk_end_bidi_request(rq, error, nr_bytes, 0);
2203} 2196}
2204EXPORT_SYMBOL_GPL(__blk_end_request); 2197EXPORT_SYMBOL(__blk_end_request);
2205 2198
2206/** 2199/**
2207 * __blk_end_request_all - Helper function for drives to finish the request. 2200 * __blk_end_request_all - Helper function for drives to finish the request.
@@ -2222,7 +2215,7 @@ void __blk_end_request_all(struct request *rq, int error)
2222 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes); 2215 pending = __blk_end_bidi_request(rq, error, blk_rq_bytes(rq), bidi_bytes);
2223 BUG_ON(pending); 2216 BUG_ON(pending);
2224} 2217}
2225EXPORT_SYMBOL_GPL(__blk_end_request_all); 2218EXPORT_SYMBOL(__blk_end_request_all);
2226 2219
2227/** 2220/**
2228 * __blk_end_request_cur - Helper function to finish the current request chunk. 2221 * __blk_end_request_cur - Helper function to finish the current request chunk.
@@ -2241,7 +2234,7 @@ bool __blk_end_request_cur(struct request *rq, int error)
2241{ 2234{
2242 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq)); 2235 return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
2243} 2236}
2244EXPORT_SYMBOL_GPL(__blk_end_request_cur); 2237EXPORT_SYMBOL(__blk_end_request_cur);
2245 2238
2246void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 2239void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2247 struct bio *bio) 2240 struct bio *bio)
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 73e28d355688..15c630813b1c 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -379,6 +379,7 @@ void blk_integrity_unregister(struct gendisk *disk)
379 379
380 kobject_uevent(&bi->kobj, KOBJ_REMOVE); 380 kobject_uevent(&bi->kobj, KOBJ_REMOVE);
381 kobject_del(&bi->kobj); 381 kobject_del(&bi->kobj);
382 kobject_put(&bi->kobj);
382 kmem_cache_free(integrity_cachep, bi); 383 kmem_cache_free(integrity_cachep, bi);
383 disk->integrity = NULL; 384 disk->integrity = NULL;
384} 385}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index bd582a7f5310..8a3ea3bba10d 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -165,6 +165,13 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
165 blk_set_default_limits(&q->limits); 165 blk_set_default_limits(&q->limits);
166 166
167 /* 167 /*
168 * If the caller didn't supply a lock, fall back to our embedded
169 * per-queue locks
170 */
171 if (!q->queue_lock)
172 q->queue_lock = &q->__queue_lock;
173
174 /*
168 * by default assume old behaviour and bounce for any highmem page 175 * by default assume old behaviour and bounce for any highmem page
169 */ 176 */
170 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH); 177 blk_queue_bounce_limit(q, BLK_BOUNCE_HIGH);
diff --git a/drivers/block/mg_disk.c b/drivers/block/mg_disk.c
index f703f5478246..6d7fbaa92248 100644
--- a/drivers/block/mg_disk.c
+++ b/drivers/block/mg_disk.c
@@ -36,7 +36,6 @@
36 36
37/* Register offsets */ 37/* Register offsets */
38#define MG_BUFF_OFFSET 0x8000 38#define MG_BUFF_OFFSET 0x8000
39#define MG_STORAGE_BUFFER_SIZE 0x200
40#define MG_REG_OFFSET 0xC000 39#define MG_REG_OFFSET 0xC000
41#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */ 40#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
42#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */ 41#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
@@ -219,6 +218,16 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
219 host->error = MG_ERR_NONE; 218 host->error = MG_ERR_NONE;
220 expire = jiffies + msecs_to_jiffies(msec); 219 expire = jiffies + msecs_to_jiffies(msec);
221 220
221 /* These 2 times dummy status read prevents reading invalid
222 * status. A very little time (3 times of mflash operating clk)
223 * is required for busy bit is set. Use dummy read instead of
224 * busy wait, because mflash's PLL is machine dependent.
225 */
226 if (prv_data->use_polling) {
227 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
228 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
229 }
230
222 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 231 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
223 232
224 do { 233 do {
@@ -245,8 +254,6 @@ static unsigned int mg_wait(struct mg_host *host, u32 expect, u32 msec)
245 mg_dump_status("not ready", status, host); 254 mg_dump_status("not ready", status, host);
246 return MG_ERR_INV_STAT; 255 return MG_ERR_INV_STAT;
247 } 256 }
248 if (prv_data->use_polling)
249 msleep(1);
250 257
251 status = inb((unsigned long)host->dev_base + MG_REG_STATUS); 258 status = inb((unsigned long)host->dev_base + MG_REG_STATUS);
252 } while (time_before(cur_jiffies, expire)); 259 } while (time_before(cur_jiffies, expire));
@@ -469,9 +476,18 @@ static unsigned int mg_out(struct mg_host *host,
469 return MG_ERR_NONE; 476 return MG_ERR_NONE;
470} 477}
471 478
479static void mg_read_one(struct mg_host *host, struct request *req)
480{
481 u16 *buff = (u16 *)req->buffer;
482 u32 i;
483
484 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
485 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
486 (i << 1));
487}
488
472static void mg_read(struct request *req) 489static void mg_read(struct request *req)
473{ 490{
474 u32 j;
475 struct mg_host *host = req->rq_disk->private_data; 491 struct mg_host *host = req->rq_disk->private_data;
476 492
477 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 493 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req),
@@ -482,49 +498,65 @@ static void mg_read(struct request *req)
482 blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 498 blk_rq_sectors(req), blk_rq_pos(req), req->buffer);
483 499
484 do { 500 do {
485 u16 *buff = (u16 *)req->buffer;
486
487 if (mg_wait(host, ATA_DRQ, 501 if (mg_wait(host, ATA_DRQ,
488 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) { 502 MG_TMAX_WAIT_RD_DRQ) != MG_ERR_NONE) {
489 mg_bad_rw_intr(host); 503 mg_bad_rw_intr(host);
490 return; 504 return;
491 } 505 }
492 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++) 506
493 *buff++ = inw((unsigned long)host->dev_base + 507 mg_read_one(host, req);
494 MG_BUFF_OFFSET + (j << 1));
495 508
496 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base + 509 outb(MG_CMD_RD_CONF, (unsigned long)host->dev_base +
497 MG_REG_COMMAND); 510 MG_REG_COMMAND);
498 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 511 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
499} 512}
500 513
514static void mg_write_one(struct mg_host *host, struct request *req)
515{
516 u16 *buff = (u16 *)req->buffer;
517 u32 i;
518
519 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
520 outw(*buff++, (unsigned long)host->dev_base + MG_BUFF_OFFSET +
521 (i << 1));
522}
523
501static void mg_write(struct request *req) 524static void mg_write(struct request *req)
502{ 525{
503 u32 j;
504 struct mg_host *host = req->rq_disk->private_data; 526 struct mg_host *host = req->rq_disk->private_data;
527 unsigned int rem = blk_rq_sectors(req);
505 528
506 if (mg_out(host, blk_rq_pos(req), blk_rq_sectors(req), 529 if (mg_out(host, blk_rq_pos(req), rem,
507 MG_CMD_WR, NULL) != MG_ERR_NONE) { 530 MG_CMD_WR, NULL) != MG_ERR_NONE) {
508 mg_bad_rw_intr(host); 531 mg_bad_rw_intr(host);
509 return; 532 return;
510 } 533 }
511 534
512 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n", 535 MG_DBG("requested %d sects (from %ld), buffer=0x%p\n",
513 blk_rq_sectors(req), blk_rq_pos(req), req->buffer); 536 rem, blk_rq_pos(req), req->buffer);
537
538 if (mg_wait(host, ATA_DRQ,
539 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
540 mg_bad_rw_intr(host);
541 return;
542 }
514 543
515 do { 544 do {
516 u16 *buff = (u16 *)req->buffer; 545 mg_write_one(host, req);
517 546
518 if (mg_wait(host, ATA_DRQ, MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) { 547 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
548 MG_REG_COMMAND);
549
550 rem--;
551 if (rem > 1 && mg_wait(host, ATA_DRQ,
552 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
553 mg_bad_rw_intr(host);
554 return;
555 } else if (mg_wait(host, MG_STAT_READY,
556 MG_TMAX_WAIT_WR_DRQ) != MG_ERR_NONE) {
519 mg_bad_rw_intr(host); 557 mg_bad_rw_intr(host);
520 return; 558 return;
521 } 559 }
522 for (j = 0; j < MG_SECTOR_SIZE >> 1; j++)
523 outw(*buff++, (unsigned long)host->dev_base +
524 MG_BUFF_OFFSET + (j << 1));
525
526 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
527 MG_REG_COMMAND);
528 } while (mg_end_request(host, 0, MG_SECTOR_SIZE)); 560 } while (mg_end_request(host, 0, MG_SECTOR_SIZE));
529} 561}
530 562
@@ -532,7 +564,6 @@ static void mg_read_intr(struct mg_host *host)
532{ 564{
533 struct request *req = host->req; 565 struct request *req = host->req;
534 u32 i; 566 u32 i;
535 u16 *buff;
536 567
537 /* check status */ 568 /* check status */
538 do { 569 do {
@@ -550,13 +581,7 @@ static void mg_read_intr(struct mg_host *host)
550 return; 581 return;
551 582
552ok_to_read: 583ok_to_read:
553 /* get current segment of request */ 584 mg_read_one(host, req);
554 buff = (u16 *)req->buffer;
555
556 /* read 1 sector */
557 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++)
558 *buff++ = inw((unsigned long)host->dev_base + MG_BUFF_OFFSET +
559 (i << 1));
560 585
561 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 586 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
562 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer); 587 blk_rq_pos(req), blk_rq_sectors(req) - 1, req->buffer);
@@ -575,8 +600,7 @@ ok_to_read:
575static void mg_write_intr(struct mg_host *host) 600static void mg_write_intr(struct mg_host *host)
576{ 601{
577 struct request *req = host->req; 602 struct request *req = host->req;
578 u32 i, j; 603 u32 i;
579 u16 *buff;
580 bool rem; 604 bool rem;
581 605
582 /* check status */ 606 /* check status */
@@ -597,12 +621,7 @@ static void mg_write_intr(struct mg_host *host)
597ok_to_write: 621ok_to_write:
598 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) { 622 if ((rem = mg_end_request(host, 0, MG_SECTOR_SIZE))) {
599 /* write 1 sector and set handler if remains */ 623 /* write 1 sector and set handler if remains */
600 buff = (u16 *)req->buffer; 624 mg_write_one(host, req);
601 for (j = 0; j < MG_STORAGE_BUFFER_SIZE >> 1; j++) {
602 outw(*buff, (unsigned long)host->dev_base +
603 MG_BUFF_OFFSET + (j << 1));
604 buff++;
605 }
606 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n", 625 MG_DBG("sector %ld, remaining=%ld, buffer=0x%p\n",
607 blk_rq_pos(req), blk_rq_sectors(req), req->buffer); 626 blk_rq_pos(req), blk_rq_sectors(req), req->buffer);
608 host->mg_do_intr = mg_write_intr; 627 host->mg_do_intr = mg_write_intr;
@@ -667,9 +686,6 @@ static unsigned int mg_issue_req(struct request *req,
667 unsigned int sect_num, 686 unsigned int sect_num,
668 unsigned int sect_cnt) 687 unsigned int sect_cnt)
669{ 688{
670 u16 *buff;
671 u32 i;
672
673 switch (rq_data_dir(req)) { 689 switch (rq_data_dir(req)) {
674 case READ: 690 case READ:
675 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr) 691 if (mg_out(host, sect_num, sect_cnt, MG_CMD_RD, &mg_read_intr)
@@ -693,12 +709,7 @@ static unsigned int mg_issue_req(struct request *req,
693 mg_bad_rw_intr(host); 709 mg_bad_rw_intr(host);
694 return host->error; 710 return host->error;
695 } 711 }
696 buff = (u16 *)req->buffer; 712 mg_write_one(host, req);
697 for (i = 0; i < MG_SECTOR_SIZE >> 1; i++) {
698 outw(*buff, (unsigned long)host->dev_base +
699 MG_BUFF_OFFSET + (i << 1));
700 buff++;
701 }
702 mod_timer(&host->timer, jiffies + 3 * HZ); 713 mod_timer(&host->timer, jiffies + 3 * HZ);
703 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base + 714 outb(MG_CMD_WR_CONF, (unsigned long)host->dev_base +
704 MG_REG_COMMAND); 715 MG_REG_COMMAND);
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dd05434fa45f..4da4a75c3f1e 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -92,7 +92,7 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
92 * a race). 92 * a race).
93 */ 93 */
94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 94 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
95 atomic_long_inc(&ioc->refcount); 95 atomic_inc(&ioc->nr_tasks);
96 return ioc; 96 return ioc;
97 } 97 }
98 98