diff options
-rw-r--r-- | block/cfq-iosched.c | 34 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 41 | ||||
-rw-r--r-- | drivers/block/pktcdvd.c | 9 | ||||
-rw-r--r-- | drivers/md/bitmap.c | 3 | ||||
-rw-r--r-- | drivers/md/dm-table.c | 3 | ||||
-rw-r--r-- | drivers/md/linear.c | 3 | ||||
-rw-r--r-- | drivers/md/md.c | 4 | ||||
-rw-r--r-- | drivers/md/multipath.c | 3 | ||||
-rw-r--r-- | drivers/md/raid0.c | 3 | ||||
-rw-r--r-- | drivers/md/raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/raid10.c | 3 | ||||
-rw-r--r-- | drivers/md/raid5.c | 3 | ||||
-rw-r--r-- | drivers/mmc/card/queue.c | 2 | ||||
-rw-r--r-- | fs/ioprio.c | 4 | ||||
-rw-r--r-- | include/linux/blkdev.h | 1 |
15 files changed, 63 insertions, 56 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index e47a9309eb48..0b4a47905575 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -789,6 +789,20 @@ static inline void cfq_slice_expired(struct cfq_data *cfqd, int timed_out) | |||
789 | __cfq_slice_expired(cfqd, cfqq, timed_out); | 789 | __cfq_slice_expired(cfqd, cfqq, timed_out); |
790 | } | 790 | } |
791 | 791 | ||
792 | static int start_idle_class_timer(struct cfq_data *cfqd) | ||
793 | { | ||
794 | unsigned long end = cfqd->last_end_request + CFQ_IDLE_GRACE; | ||
795 | unsigned long now = jiffies; | ||
796 | |||
797 | if (time_before(now, end) && | ||
798 | time_after_eq(now, cfqd->last_end_request)) { | ||
799 | mod_timer(&cfqd->idle_class_timer, end); | ||
800 | return 1; | ||
801 | } | ||
802 | |||
803 | return 0; | ||
804 | } | ||
805 | |||
792 | /* | 806 | /* |
793 | * Get next queue for service. Unless we have a queue preemption, | 807 | * Get next queue for service. Unless we have a queue preemption, |
794 | * we'll simply select the first cfqq in the service tree. | 808 | * we'll simply select the first cfqq in the service tree. |
@@ -805,19 +819,14 @@ static struct cfq_queue *cfq_get_next_queue(struct cfq_data *cfqd) | |||
805 | cfqq = rb_entry(n, struct cfq_queue, rb_node); | 819 | cfqq = rb_entry(n, struct cfq_queue, rb_node); |
806 | 820 | ||
807 | if (cfq_class_idle(cfqq)) { | 821 | if (cfq_class_idle(cfqq)) { |
808 | unsigned long end; | ||
809 | |||
810 | /* | 822 | /* |
811 | * if we have idle queues and no rt or be queues had | 823 | * if we have idle queues and no rt or be queues had |
812 | * pending requests, either allow immediate service if | 824 | * pending requests, either allow immediate service if |
813 | * the grace period has passed or arm the idle grace | 825 | * the grace period has passed or arm the idle grace |
814 | * timer | 826 | * timer |
815 | */ | 827 | */ |
816 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | 828 | if (start_idle_class_timer(cfqd)) |
817 | if (time_before(jiffies, end)) { | ||
818 | mod_timer(&cfqd->idle_class_timer, end); | ||
819 | cfqq = NULL; | 829 | cfqq = NULL; |
820 | } | ||
821 | } | 830 | } |
822 | 831 | ||
823 | return cfqq; | 832 | return cfqq; |
@@ -2036,17 +2045,14 @@ out_cont: | |||
2036 | static void cfq_idle_class_timer(unsigned long data) | 2045 | static void cfq_idle_class_timer(unsigned long data) |
2037 | { | 2046 | { |
2038 | struct cfq_data *cfqd = (struct cfq_data *) data; | 2047 | struct cfq_data *cfqd = (struct cfq_data *) data; |
2039 | unsigned long flags, end; | 2048 | unsigned long flags; |
2040 | 2049 | ||
2041 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); | 2050 | spin_lock_irqsave(cfqd->queue->queue_lock, flags); |
2042 | 2051 | ||
2043 | /* | 2052 | /* |
2044 | * race with a non-idle queue, reset timer | 2053 | * race with a non-idle queue, reset timer |
2045 | */ | 2054 | */ |
2046 | end = cfqd->last_end_request + CFQ_IDLE_GRACE; | 2055 | if (!start_idle_class_timer(cfqd)) |
2047 | if (!time_after_eq(jiffies, end)) | ||
2048 | mod_timer(&cfqd->idle_class_timer, end); | ||
2049 | else | ||
2050 | cfq_schedule_dispatch(cfqd); | 2056 | cfq_schedule_dispatch(cfqd); |
2051 | 2057 | ||
2052 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); | 2058 | spin_unlock_irqrestore(cfqd->queue->queue_lock, flags); |
@@ -2068,9 +2074,10 @@ static void cfq_put_async_queues(struct cfq_data *cfqd) | |||
2068 | cfq_put_queue(cfqd->async_cfqq[0][i]); | 2074 | cfq_put_queue(cfqd->async_cfqq[0][i]); |
2069 | if (cfqd->async_cfqq[1][i]) | 2075 | if (cfqd->async_cfqq[1][i]) |
2070 | cfq_put_queue(cfqd->async_cfqq[1][i]); | 2076 | cfq_put_queue(cfqd->async_cfqq[1][i]); |
2071 | if (cfqd->async_idle_cfqq) | ||
2072 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2073 | } | 2077 | } |
2078 | |||
2079 | if (cfqd->async_idle_cfqq) | ||
2080 | cfq_put_queue(cfqd->async_idle_cfqq); | ||
2074 | } | 2081 | } |
2075 | 2082 | ||
2076 | static void cfq_exit_queue(elevator_t *e) | 2083 | static void cfq_exit_queue(elevator_t *e) |
@@ -2125,6 +2132,7 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2125 | 2132 | ||
2126 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2133 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2127 | 2134 | ||
2135 | cfqd->last_end_request = jiffies; | ||
2128 | cfqd->cfq_quantum = cfq_quantum; | 2136 | cfqd->cfq_quantum = cfq_quantum; |
2129 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2137 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
2130 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; | 2138 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 75c98d58f4dd..3b927be03850 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -1143,22 +1143,9 @@ EXPORT_SYMBOL(blk_queue_start_tag); | |||
1143 | void blk_queue_invalidate_tags(struct request_queue *q) | 1143 | void blk_queue_invalidate_tags(struct request_queue *q) |
1144 | { | 1144 | { |
1145 | struct list_head *tmp, *n; | 1145 | struct list_head *tmp, *n; |
1146 | struct request *rq; | ||
1147 | |||
1148 | list_for_each_safe(tmp, n, &q->tag_busy_list) { | ||
1149 | rq = list_entry_rq(tmp); | ||
1150 | 1146 | ||
1151 | if (rq->tag == -1) { | 1147 | list_for_each_safe(tmp, n, &q->tag_busy_list) |
1152 | printk(KERN_ERR | 1148 | blk_requeue_request(q, list_entry_rq(tmp)); |
1153 | "%s: bad tag found on list\n", __FUNCTION__); | ||
1154 | list_del_init(&rq->queuelist); | ||
1155 | rq->cmd_flags &= ~REQ_QUEUED; | ||
1156 | } else | ||
1157 | blk_queue_end_tag(q, rq); | ||
1158 | |||
1159 | rq->cmd_flags &= ~REQ_STARTED; | ||
1160 | __elv_add_request(q, rq, ELEVATOR_INSERT_BACK, 0); | ||
1161 | } | ||
1162 | } | 1149 | } |
1163 | 1150 | ||
1164 | EXPORT_SYMBOL(blk_queue_invalidate_tags); | 1151 | EXPORT_SYMBOL(blk_queue_invalidate_tags); |
@@ -1634,15 +1621,7 @@ static void blk_backing_dev_unplug(struct backing_dev_info *bdi, | |||
1634 | { | 1621 | { |
1635 | struct request_queue *q = bdi->unplug_io_data; | 1622 | struct request_queue *q = bdi->unplug_io_data; |
1636 | 1623 | ||
1637 | /* | 1624 | blk_unplug(q); |
1638 | * devices don't necessarily have an ->unplug_fn defined | ||
1639 | */ | ||
1640 | if (q->unplug_fn) { | ||
1641 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1642 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1643 | |||
1644 | q->unplug_fn(q); | ||
1645 | } | ||
1646 | } | 1625 | } |
1647 | 1626 | ||
1648 | static void blk_unplug_work(struct work_struct *work) | 1627 | static void blk_unplug_work(struct work_struct *work) |
@@ -1666,6 +1645,20 @@ static void blk_unplug_timeout(unsigned long data) | |||
1666 | kblockd_schedule_work(&q->unplug_work); | 1645 | kblockd_schedule_work(&q->unplug_work); |
1667 | } | 1646 | } |
1668 | 1647 | ||
1648 | void blk_unplug(struct request_queue *q) | ||
1649 | { | ||
1650 | /* | ||
1651 | * devices don't necessarily have an ->unplug_fn defined | ||
1652 | */ | ||
1653 | if (q->unplug_fn) { | ||
1654 | blk_add_trace_pdu_int(q, BLK_TA_UNPLUG_IO, NULL, | ||
1655 | q->rq.count[READ] + q->rq.count[WRITE]); | ||
1656 | |||
1657 | q->unplug_fn(q); | ||
1658 | } | ||
1659 | } | ||
1660 | EXPORT_SYMBOL(blk_unplug); | ||
1661 | |||
1669 | /** | 1662 | /** |
1670 | * blk_start_queue - restart a previously stopped queue | 1663 | * blk_start_queue - restart a previously stopped queue |
1671 | * @q: The &struct request_queue in question | 1664 | * @q: The &struct request_queue in question |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index a8130a4ad6d4..a5ee21319d37 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -358,10 +358,19 @@ static ssize_t class_pktcdvd_store_add(struct class *c, const char *buf, | |||
358 | size_t count) | 358 | size_t count) |
359 | { | 359 | { |
360 | unsigned int major, minor; | 360 | unsigned int major, minor; |
361 | |||
361 | if (sscanf(buf, "%u:%u", &major, &minor) == 2) { | 362 | if (sscanf(buf, "%u:%u", &major, &minor) == 2) { |
363 | /* pkt_setup_dev() expects caller to hold reference to self */ | ||
364 | if (!try_module_get(THIS_MODULE)) | ||
365 | return -ENODEV; | ||
366 | |||
362 | pkt_setup_dev(MKDEV(major, minor), NULL); | 367 | pkt_setup_dev(MKDEV(major, minor), NULL); |
368 | |||
369 | module_put(THIS_MODULE); | ||
370 | |||
363 | return count; | 371 | return count; |
364 | } | 372 | } |
373 | |||
365 | return -EINVAL; | 374 | return -EINVAL; |
366 | } | 375 | } |
367 | 376 | ||
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c index 7c426d07a555..1b1ef3130e6e 100644 --- a/drivers/md/bitmap.c +++ b/drivers/md/bitmap.c | |||
@@ -1207,8 +1207,7 @@ int bitmap_startwrite(struct bitmap *bitmap, sector_t offset, unsigned long sect | |||
1207 | prepare_to_wait(&bitmap->overflow_wait, &__wait, | 1207 | prepare_to_wait(&bitmap->overflow_wait, &__wait, |
1208 | TASK_UNINTERRUPTIBLE); | 1208 | TASK_UNINTERRUPTIBLE); |
1209 | spin_unlock_irq(&bitmap->lock); | 1209 | spin_unlock_irq(&bitmap->lock); |
1210 | bitmap->mddev->queue | 1210 | blk_unplug(bitmap->mddev->queue); |
1211 | ->unplug_fn(bitmap->mddev->queue); | ||
1212 | schedule(); | 1211 | schedule(); |
1213 | finish_wait(&bitmap->overflow_wait, &__wait); | 1212 | finish_wait(&bitmap->overflow_wait, &__wait); |
1214 | continue; | 1213 | continue; |
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 5a7eb650181e..e298d8d11f24 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c | |||
@@ -1000,8 +1000,7 @@ void dm_table_unplug_all(struct dm_table *t) | |||
1000 | struct dm_dev *dd = list_entry(d, struct dm_dev, list); | 1000 | struct dm_dev *dd = list_entry(d, struct dm_dev, list); |
1001 | struct request_queue *q = bdev_get_queue(dd->bdev); | 1001 | struct request_queue *q = bdev_get_queue(dd->bdev); |
1002 | 1002 | ||
1003 | if (q->unplug_fn) | 1003 | blk_unplug(q); |
1004 | q->unplug_fn(q); | ||
1005 | } | 1004 | } |
1006 | } | 1005 | } |
1007 | 1006 | ||
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 56a11f6c127b..3dac1cfb8189 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -87,8 +87,7 @@ static void linear_unplug(struct request_queue *q) | |||
87 | 87 | ||
88 | for (i=0; i < mddev->raid_disks; i++) { | 88 | for (i=0; i < mddev->raid_disks; i++) { |
89 | struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); | 89 | struct request_queue *r_queue = bdev_get_queue(conf->disks[i].rdev->bdev); |
90 | if (r_queue->unplug_fn) | 90 | blk_unplug(r_queue); |
91 | r_queue->unplug_fn(r_queue); | ||
92 | } | 91 | } |
93 | } | 92 | } |
94 | 93 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index 808cd9549456..cef9ebd5a046 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -5445,7 +5445,7 @@ void md_do_sync(mddev_t *mddev) | |||
5445 | * about not overloading the IO subsystem. (things like an | 5445 | * about not overloading the IO subsystem. (things like an |
5446 | * e2fsck being done on the RAID array should execute fast) | 5446 | * e2fsck being done on the RAID array should execute fast) |
5447 | */ | 5447 | */ |
5448 | mddev->queue->unplug_fn(mddev->queue); | 5448 | blk_unplug(mddev->queue); |
5449 | cond_resched(); | 5449 | cond_resched(); |
5450 | 5450 | ||
5451 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 | 5451 | currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 |
@@ -5464,7 +5464,7 @@ void md_do_sync(mddev_t *mddev) | |||
5464 | * this also signals 'finished resyncing' to md_stop | 5464 | * this also signals 'finished resyncing' to md_stop |
5465 | */ | 5465 | */ |
5466 | out: | 5466 | out: |
5467 | mddev->queue->unplug_fn(mddev->queue); | 5467 | blk_unplug(mddev->queue); |
5468 | 5468 | ||
5469 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); | 5469 | wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); |
5470 | 5470 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index b35731cceac6..eb631ebed686 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -125,8 +125,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
125 | atomic_inc(&rdev->nr_pending); | 125 | atomic_inc(&rdev->nr_pending); |
126 | rcu_read_unlock(); | 126 | rcu_read_unlock(); |
127 | 127 | ||
128 | if (r_queue->unplug_fn) | 128 | blk_unplug(r_queue); |
129 | r_queue->unplug_fn(r_queue); | ||
130 | 129 | ||
131 | rdev_dec_pending(rdev, mddev); | 130 | rdev_dec_pending(rdev, mddev); |
132 | rcu_read_lock(); | 131 | rcu_read_lock(); |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c111105fc2dc..f8e591708d1f 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -35,8 +35,7 @@ static void raid0_unplug(struct request_queue *q) | |||
35 | for (i=0; i<mddev->raid_disks; i++) { | 35 | for (i=0; i<mddev->raid_disks; i++) { |
36 | struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); | 36 | struct request_queue *r_queue = bdev_get_queue(devlist[i]->bdev); |
37 | 37 | ||
38 | if (r_queue->unplug_fn) | 38 | blk_unplug(r_queue); |
39 | r_queue->unplug_fn(r_queue); | ||
40 | } | 39 | } |
41 | } | 40 | } |
42 | 41 | ||
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 85478d6a9c1a..4a69c416e045 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -549,8 +549,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
549 | atomic_inc(&rdev->nr_pending); | 549 | atomic_inc(&rdev->nr_pending); |
550 | rcu_read_unlock(); | 550 | rcu_read_unlock(); |
551 | 551 | ||
552 | if (r_queue->unplug_fn) | 552 | blk_unplug(r_queue); |
553 | r_queue->unplug_fn(r_queue); | ||
554 | 553 | ||
555 | rdev_dec_pending(rdev, mddev); | 554 | rdev_dec_pending(rdev, mddev); |
556 | rcu_read_lock(); | 555 | rcu_read_lock(); |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index fc6607acb6e4..5cdcc9386200 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -593,8 +593,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
593 | atomic_inc(&rdev->nr_pending); | 593 | atomic_inc(&rdev->nr_pending); |
594 | rcu_read_unlock(); | 594 | rcu_read_unlock(); |
595 | 595 | ||
596 | if (r_queue->unplug_fn) | 596 | blk_unplug(r_queue); |
597 | r_queue->unplug_fn(r_queue); | ||
598 | 597 | ||
599 | rdev_dec_pending(rdev, mddev); | 598 | rdev_dec_pending(rdev, mddev); |
600 | rcu_read_lock(); | 599 | rcu_read_lock(); |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 82af3465a900..1cfc984cc7b7 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3186,8 +3186,7 @@ static void unplug_slaves(mddev_t *mddev) | |||
3186 | atomic_inc(&rdev->nr_pending); | 3186 | atomic_inc(&rdev->nr_pending); |
3187 | rcu_read_unlock(); | 3187 | rcu_read_unlock(); |
3188 | 3188 | ||
3189 | if (r_queue->unplug_fn) | 3189 | blk_unplug(r_queue); |
3190 | r_queue->unplug_fn(r_queue); | ||
3191 | 3190 | ||
3192 | rdev_dec_pending(rdev, mddev); | 3191 | rdev_dec_pending(rdev, mddev); |
3193 | rcu_read_lock(); | 3192 | rcu_read_lock(); |
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 9203a0b221b3..1b9c9b6da5b7 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c | |||
@@ -310,7 +310,7 @@ static void copy_sg(struct scatterlist *dst, unsigned int dst_len, | |||
310 | } | 310 | } |
311 | 311 | ||
312 | if (src_size == 0) { | 312 | if (src_size == 0) { |
313 | src_buf = sg_virt(dst); | 313 | src_buf = sg_virt(src); |
314 | src_size = src->length; | 314 | src_size = src->length; |
315 | } | 315 | } |
316 | 316 | ||
diff --git a/fs/ioprio.c b/fs/ioprio.c index d6ff77e8e7ec..e4e01bc7f338 100644 --- a/fs/ioprio.c +++ b/fs/ioprio.c | |||
@@ -78,6 +78,10 @@ asmlinkage long sys_ioprio_set(int which, int who, int ioprio) | |||
78 | if (!capable(CAP_SYS_ADMIN)) | 78 | if (!capable(CAP_SYS_ADMIN)) |
79 | return -EPERM; | 79 | return -EPERM; |
80 | break; | 80 | break; |
81 | case IOPRIO_CLASS_NONE: | ||
82 | if (data) | ||
83 | return -EINVAL; | ||
84 | break; | ||
81 | default: | 85 | default: |
82 | return -EINVAL; | 86 | return -EINVAL; |
83 | } | 87 | } |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 8396db24d019..d18ee67b40f8 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -697,6 +697,7 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
697 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 697 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
698 | struct request *, int, rq_end_io_fn *); | 698 | struct request *, int, rq_end_io_fn *); |
699 | extern int blk_verify_command(unsigned char *, int); | 699 | extern int blk_verify_command(unsigned char *, int); |
700 | extern void blk_unplug(struct request_queue *q); | ||
700 | 701 | ||
701 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 702 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
702 | { | 703 | { |