summaryrefslogtreecommitdiffstats
path: root/drivers/cdrom/gdrom.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-15 11:01:40 -0400
committerJens Axboe <axboe@kernel.dk>2018-10-16 11:50:39 -0400
commitad5fc6bb72214615f300af1f4ed57f71bc3be510 (patch)
tree25c54c32cc33e2fd4efcfa467894cf96c2ce6da3 /drivers/cdrom/gdrom.c
parenta9f38e1dec107af70d81338332494bf0a1e76597 (diff)
gdrom: convert to blk-mq
Ditch the deffered list, lock, and workqueue handling. Just mark the set as being blocking, so we are invoked from a workqueue already. Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/cdrom/gdrom.c')
-rw-r--r--drivers/cdrom/gdrom.c174
1 files changed, 79 insertions, 95 deletions
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index ae3a7537cf0f..757e85b81879 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -31,12 +31,11 @@
31#include <linux/cdrom.h> 31#include <linux/cdrom.h>
32#include <linux/genhd.h> 32#include <linux/genhd.h>
33#include <linux/bio.h> 33#include <linux/bio.h>
34#include <linux/blkdev.h> 34#include <linux/blk-mq.h>
35#include <linux/interrupt.h> 35#include <linux/interrupt.h>
36#include <linux/device.h> 36#include <linux/device.h>
37#include <linux/mutex.h> 37#include <linux/mutex.h>
38#include <linux/wait.h> 38#include <linux/wait.h>
39#include <linux/workqueue.h>
40#include <linux/platform_device.h> 39#include <linux/platform_device.h>
41#include <scsi/scsi.h> 40#include <scsi/scsi.h>
42#include <asm/io.h> 41#include <asm/io.h>
@@ -102,11 +101,6 @@ static int gdrom_major;
102static DECLARE_WAIT_QUEUE_HEAD(command_queue); 101static DECLARE_WAIT_QUEUE_HEAD(command_queue);
103static DECLARE_WAIT_QUEUE_HEAD(request_queue); 102static DECLARE_WAIT_QUEUE_HEAD(request_queue);
104 103
105static DEFINE_SPINLOCK(gdrom_lock);
106static void gdrom_readdisk_dma(struct work_struct *work);
107static DECLARE_WORK(work, gdrom_readdisk_dma);
108static LIST_HEAD(gdrom_deferred);
109
110struct gdromtoc { 104struct gdromtoc {
111 unsigned int entry[99]; 105 unsigned int entry[99];
112 unsigned int first, last; 106 unsigned int first, last;
@@ -122,6 +116,7 @@ static struct gdrom_unit {
122 char disk_type; 116 char disk_type;
123 struct gdromtoc *toc; 117 struct gdromtoc *toc;
124 struct request_queue *gdrom_rq; 118 struct request_queue *gdrom_rq;
119 struct blk_mq_tag_set tag_set;
125} gd; 120} gd;
126 121
127struct gdrom_id { 122struct gdrom_id {
@@ -584,103 +579,83 @@ static int gdrom_set_interrupt_handlers(void)
584 * 9 -> sectors >> 8 579 * 9 -> sectors >> 8
585 * 10 -> sectors 580 * 10 -> sectors
586 */ 581 */
587static void gdrom_readdisk_dma(struct work_struct *work) 582static blk_status_t gdrom_readdisk_dma(struct request *req)
588{ 583{
589 int block, block_cnt; 584 int block, block_cnt;
590 blk_status_t err; 585 blk_status_t err;
591 struct packet_command *read_command; 586 struct packet_command *read_command;
592 struct list_head *elem, *next;
593 struct request *req;
594 unsigned long timeout; 587 unsigned long timeout;
595 588
596 if (list_empty(&gdrom_deferred))
597 return;
598 read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL); 589 read_command = kzalloc(sizeof(struct packet_command), GFP_KERNEL);
599 if (!read_command) 590 if (!read_command)
600 return; /* get more memory later? */ 591 return BLK_STS_RESOURCE;
592
601 read_command->cmd[0] = 0x30; 593 read_command->cmd[0] = 0x30;
602 read_command->cmd[1] = 0x20; 594 read_command->cmd[1] = 0x20;
603 spin_lock(&gdrom_lock); 595 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET;
604 list_for_each_safe(elem, next, &gdrom_deferred) { 596 block_cnt = blk_rq_sectors(req)/GD_TO_BLK;
605 req = list_entry(elem, struct request, queuelist); 597 __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG);
606 spin_unlock(&gdrom_lock); 598 __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG);
607 block = blk_rq_pos(req)/GD_TO_BLK + GD_SESSION_OFFSET; 599 __raw_writel(1, GDROM_DMA_DIRECTION_REG);
608 block_cnt = blk_rq_sectors(req)/GD_TO_BLK; 600 __raw_writel(1, GDROM_DMA_ENABLE_REG);
609 __raw_writel(virt_to_phys(bio_data(req->bio)), GDROM_DMA_STARTADDR_REG); 601 read_command->cmd[2] = (block >> 16) & 0xFF;
610 __raw_writel(block_cnt * GDROM_HARD_SECTOR, GDROM_DMA_LENGTH_REG); 602 read_command->cmd[3] = (block >> 8) & 0xFF;
611 __raw_writel(1, GDROM_DMA_DIRECTION_REG); 603 read_command->cmd[4] = block & 0xFF;
612 __raw_writel(1, GDROM_DMA_ENABLE_REG); 604 read_command->cmd[8] = (block_cnt >> 16) & 0xFF;
613 read_command->cmd[2] = (block >> 16) & 0xFF; 605 read_command->cmd[9] = (block_cnt >> 8) & 0xFF;
614 read_command->cmd[3] = (block >> 8) & 0xFF; 606 read_command->cmd[10] = block_cnt & 0xFF;
615 read_command->cmd[4] = block & 0xFF; 607 /* set for DMA */
616 read_command->cmd[8] = (block_cnt >> 16) & 0xFF; 608 __raw_writeb(1, GDROM_ERROR_REG);
617 read_command->cmd[9] = (block_cnt >> 8) & 0xFF; 609 /* other registers */
618 read_command->cmd[10] = block_cnt & 0xFF; 610 __raw_writeb(0, GDROM_SECNUM_REG);
619 /* set for DMA */ 611 __raw_writeb(0, GDROM_BCL_REG);
620 __raw_writeb(1, GDROM_ERROR_REG); 612 __raw_writeb(0, GDROM_BCH_REG);
621 /* other registers */ 613 __raw_writeb(0, GDROM_DSEL_REG);
622 __raw_writeb(0, GDROM_SECNUM_REG); 614 __raw_writeb(0, GDROM_INTSEC_REG);
623 __raw_writeb(0, GDROM_BCL_REG); 615 /* Wait for registers to reset after any previous activity */
624 __raw_writeb(0, GDROM_BCH_REG); 616 timeout = jiffies + HZ / 2;
625 __raw_writeb(0, GDROM_DSEL_REG); 617 while (gdrom_is_busy() && time_before(jiffies, timeout))
626 __raw_writeb(0, GDROM_INTSEC_REG); 618 cpu_relax();
627 /* Wait for registers to reset after any previous activity */ 619 __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG);
628 timeout = jiffies + HZ / 2; 620 timeout = jiffies + HZ / 2;
629 while (gdrom_is_busy() && time_before(jiffies, timeout)) 621 /* Wait for packet command to finish */
630 cpu_relax(); 622 while (gdrom_is_busy() && time_before(jiffies, timeout))
631 __raw_writeb(GDROM_COM_PACKET, GDROM_STATUSCOMMAND_REG); 623 cpu_relax();
632 timeout = jiffies + HZ / 2; 624 gd.pending = 1;
633 /* Wait for packet command to finish */ 625 gd.transfer = 1;
634 while (gdrom_is_busy() && time_before(jiffies, timeout)) 626 outsw(GDROM_DATA_REG, &read_command->cmd, 6);
635 cpu_relax(); 627 timeout = jiffies + HZ / 2;
636 gd.pending = 1; 628 /* Wait for any pending DMA to finish */
637 gd.transfer = 1; 629 while (__raw_readb(GDROM_DMA_STATUS_REG) &&
638 outsw(GDROM_DATA_REG, &read_command->cmd, 6); 630 time_before(jiffies, timeout))
639 timeout = jiffies + HZ / 2; 631 cpu_relax();
640 /* Wait for any pending DMA to finish */ 632 /* start transfer */
641 while (__raw_readb(GDROM_DMA_STATUS_REG) && 633 __raw_writeb(1, GDROM_DMA_STATUS_REG);
642 time_before(jiffies, timeout)) 634 wait_event_interruptible_timeout(request_queue,
643 cpu_relax(); 635 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT);
644 /* start transfer */ 636 err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK;
645 __raw_writeb(1, GDROM_DMA_STATUS_REG); 637 gd.transfer = 0;
646 wait_event_interruptible_timeout(request_queue, 638 gd.pending = 0;
647 gd.transfer == 0, GDROM_DEFAULT_TIMEOUT); 639
648 err = gd.transfer ? BLK_STS_IOERR : BLK_STS_OK; 640 blk_mq_end_request(req, err);
649 gd.transfer = 0;
650 gd.pending = 0;
651 /* now seek to take the request spinlock
652 * before handling ending the request */
653 spin_lock(&gdrom_lock);
654 list_del_init(&req->queuelist);
655 __blk_end_request_all(req, err);
656 }
657 spin_unlock(&gdrom_lock);
658 kfree(read_command); 641 kfree(read_command);
642 return BLK_STS_OK;
659} 643}
660 644
661static void gdrom_request(struct request_queue *rq) 645static blk_status_t gdrom_queue_rq(struct blk_mq_hw_ctx *hctx,
662{ 646 const struct blk_mq_queue_data *bd)
663 struct request *req; 647{
664 648 blk_mq_start_request(bd->rq);
665 while ((req = blk_fetch_request(rq)) != NULL) { 649
666 switch (req_op(req)) { 650 switch (req_op(bd->rq)) {
667 case REQ_OP_READ: 651 case REQ_OP_READ:
668 /* 652 return gdrom_readdisk_dma(bd->rq);
669 * Add to list of deferred work and then schedule 653 case REQ_OP_WRITE:
670 * workqueue. 654 pr_notice("Read only device - write request ignored\n");
671 */ 655 return BLK_STS_IOERR;
672 list_add_tail(&req->queuelist, &gdrom_deferred); 656 default:
673 schedule_work(&work); 657 printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
674 break; 658 return BLK_STS_IOERR;
675 case REQ_OP_WRITE:
676 pr_notice("Read only device - write request ignored\n");
677 __blk_end_request_all(req, BLK_STS_IOERR);
678 break;
679 default:
680 printk(KERN_DEBUG "gdrom: Non-fs request ignored\n");
681 __blk_end_request_all(req, BLK_STS_IOERR);
682 break;
683 }
684 } 659 }
685} 660}
686 661
@@ -768,6 +743,10 @@ static int probe_gdrom_setupqueue(void)
768 return gdrom_init_dma_mode(); 743 return gdrom_init_dma_mode();
769} 744}
770 745
746static const struct blk_mq_ops gdrom_mq_ops = {
747 .queue_rq = gdrom_queue_rq,
748};
749
771/* 750/*
772 * register this as a block device and as compliant with the 751 * register this as a block device and as compliant with the
773 * universal CD Rom driver interface 752 * universal CD Rom driver interface
@@ -811,11 +790,15 @@ static int probe_gdrom(struct platform_device *devptr)
811 err = gdrom_set_interrupt_handlers(); 790 err = gdrom_set_interrupt_handlers();
812 if (err) 791 if (err)
813 goto probe_fail_cmdirq_register; 792 goto probe_fail_cmdirq_register;
814 gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock); 793
815 if (!gd.gdrom_rq) { 794 gd.gdrom_rq = blk_mq_init_sq_queue(&gd.tag_set, &gdrom_mq_ops, 1,
816 err = -ENOMEM; 795 BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_BLOCKING);
796 if (IS_ERR(gd.gdrom_rq)) {
797 rc = PTR_ERR(gd.gdrom_rq);
798 gd.gdrom_rq = NULL;
817 goto probe_fail_requestq; 799 goto probe_fail_requestq;
818 } 800 }
801
819 blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH); 802 blk_queue_bounce_limit(gd.gdrom_rq, BLK_BOUNCE_HIGH);
820 803
821 err = probe_gdrom_setupqueue(); 804 err = probe_gdrom_setupqueue();
@@ -832,6 +815,7 @@ static int probe_gdrom(struct platform_device *devptr)
832 815
833probe_fail_toc: 816probe_fail_toc:
834 blk_cleanup_queue(gd.gdrom_rq); 817 blk_cleanup_queue(gd.gdrom_rq);
818 blk_mq_free_tag_set(&gd.tag_set);
835probe_fail_requestq: 819probe_fail_requestq:
836 free_irq(HW_EVENT_GDROM_DMA, &gd); 820 free_irq(HW_EVENT_GDROM_DMA, &gd);
837 free_irq(HW_EVENT_GDROM_CMD, &gd); 821 free_irq(HW_EVENT_GDROM_CMD, &gd);
@@ -849,8 +833,8 @@ probe_fail_no_mem:
849 833
850static int remove_gdrom(struct platform_device *devptr) 834static int remove_gdrom(struct platform_device *devptr)
851{ 835{
852 flush_work(&work);
853 blk_cleanup_queue(gd.gdrom_rq); 836 blk_cleanup_queue(gd.gdrom_rq);
837 blk_mq_free_tag_set(&gd.tag_set);
854 free_irq(HW_EVENT_GDROM_CMD, &gd); 838 free_irq(HW_EVENT_GDROM_CMD, &gd);
855 free_irq(HW_EVENT_GDROM_DMA, &gd); 839 free_irq(HW_EVENT_GDROM_DMA, &gd);
856 del_gendisk(gd.disk); 840 del_gendisk(gd.disk);