diff options
39 files changed, 498 insertions, 489 deletions
@@ -1,7 +1,7 @@ | |||
1 | VERSION = 3 | 1 | VERSION = 3 |
2 | PATCHLEVEL = 1 | 2 | PATCHLEVEL = 1 |
3 | SUBLEVEL = 0 | 3 | SUBLEVEL = 0 |
4 | EXTRAVERSION = -rc9 | 4 | EXTRAVERSION = -rc10 |
5 | NAME = "Divemaster Edition" | 5 | NAME = "Divemaster Edition" |
6 | 6 | ||
7 | # *DOCUMENTATION* | 7 | # *DOCUMENTATION* |
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 48e50f8c1c7e..e3011338ab40 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c | |||
@@ -59,7 +59,7 @@ struct nfhd_device { | |||
59 | struct gendisk *disk; | 59 | struct gendisk *disk; |
60 | }; | 60 | }; |
61 | 61 | ||
62 | static int nfhd_make_request(struct request_queue *queue, struct bio *bio) | 62 | static void nfhd_make_request(struct request_queue *queue, struct bio *bio) |
63 | { | 63 | { |
64 | struct nfhd_device *dev = queue->queuedata; | 64 | struct nfhd_device *dev = queue->queuedata; |
65 | struct bio_vec *bvec; | 65 | struct bio_vec *bvec; |
@@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio) | |||
76 | sec += len; | 76 | sec += len; |
77 | } | 77 | } |
78 | bio_endio(bio, 0); | 78 | bio_endio(bio, 0); |
79 | return 0; | ||
80 | } | 79 | } |
81 | 80 | ||
82 | static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 81 | static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 265f0f09395a..ba4271919062 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev) | |||
104 | * axon_ram_make_request - make_request() method for block device | 104 | * axon_ram_make_request - make_request() method for block device |
105 | * @queue, @bio: see blk_queue_make_request() | 105 | * @queue, @bio: see blk_queue_make_request() |
106 | */ | 106 | */ |
107 | static int | 107 | static void |
108 | axon_ram_make_request(struct request_queue *queue, struct bio *bio) | 108 | axon_ram_make_request(struct request_queue *queue, struct bio *bio) |
109 | { | 109 | { |
110 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; | 110 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; |
@@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
113 | struct bio_vec *vec; | 113 | struct bio_vec *vec; |
114 | unsigned int transfered; | 114 | unsigned int transfered; |
115 | unsigned short idx; | 115 | unsigned short idx; |
116 | int rc = 0; | ||
117 | 116 | ||
118 | phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); | 117 | phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); |
119 | phys_end = bank->io_addr + bank->size; | 118 | phys_end = bank->io_addr + bank->size; |
@@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
121 | bio_for_each_segment(vec, bio, idx) { | 120 | bio_for_each_segment(vec, bio, idx) { |
122 | if (unlikely(phys_mem + vec->bv_len > phys_end)) { | 121 | if (unlikely(phys_mem + vec->bv_len > phys_end)) { |
123 | bio_io_error(bio); | 122 | bio_io_error(bio); |
124 | rc = -ERANGE; | 123 | return; |
125 | break; | ||
126 | } | 124 | } |
127 | 125 | ||
128 | user_mem = page_address(vec->bv_page) + vec->bv_offset; | 126 | user_mem = page_address(vec->bv_page) + vec->bv_offset; |
@@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
135 | transfered += vec->bv_len; | 133 | transfered += vec->bv_len; |
136 | } | 134 | } |
137 | bio_endio(bio, 0); | 135 | bio_endio(bio, 0); |
138 | |||
139 | return rc; | ||
140 | } | 136 | } |
141 | 137 | ||
142 | /** | 138 | /** |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b596e54ddd71..d61ec5636ce0 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -768,25 +768,14 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
768 | return disk_total; | 768 | return disk_total; |
769 | } | 769 | } |
770 | 770 | ||
771 | static int blkio_check_dev_num(dev_t dev) | ||
772 | { | ||
773 | int part = 0; | ||
774 | struct gendisk *disk; | ||
775 | |||
776 | disk = get_gendisk(dev, &part); | ||
777 | if (!disk || part) | ||
778 | return -ENODEV; | ||
779 | |||
780 | return 0; | ||
781 | } | ||
782 | |||
783 | static int blkio_policy_parse_and_set(char *buf, | 771 | static int blkio_policy_parse_and_set(char *buf, |
784 | struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) | 772 | struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) |
785 | { | 773 | { |
774 | struct gendisk *disk = NULL; | ||
786 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; | 775 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; |
787 | int ret; | ||
788 | unsigned long major, minor; | 776 | unsigned long major, minor; |
789 | int i = 0; | 777 | int i = 0, ret = -EINVAL; |
778 | int part; | ||
790 | dev_t dev; | 779 | dev_t dev; |
791 | u64 temp; | 780 | u64 temp; |
792 | 781 | ||
@@ -804,37 +793,36 @@ static int blkio_policy_parse_and_set(char *buf, | |||
804 | } | 793 | } |
805 | 794 | ||
806 | if (i != 2) | 795 | if (i != 2) |
807 | return -EINVAL; | 796 | goto out; |
808 | 797 | ||
809 | p = strsep(&s[0], ":"); | 798 | p = strsep(&s[0], ":"); |
810 | if (p != NULL) | 799 | if (p != NULL) |
811 | major_s = p; | 800 | major_s = p; |
812 | else | 801 | else |
813 | return -EINVAL; | 802 | goto out; |
814 | 803 | ||
815 | minor_s = s[0]; | 804 | minor_s = s[0]; |
816 | if (!minor_s) | 805 | if (!minor_s) |
817 | return -EINVAL; | 806 | goto out; |
818 | 807 | ||
819 | ret = strict_strtoul(major_s, 10, &major); | 808 | if (strict_strtoul(major_s, 10, &major)) |
820 | if (ret) | 809 | goto out; |
821 | return -EINVAL; | ||
822 | 810 | ||
823 | ret = strict_strtoul(minor_s, 10, &minor); | 811 | if (strict_strtoul(minor_s, 10, &minor)) |
824 | if (ret) | 812 | goto out; |
825 | return -EINVAL; | ||
826 | 813 | ||
827 | dev = MKDEV(major, minor); | 814 | dev = MKDEV(major, minor); |
828 | 815 | ||
829 | ret = strict_strtoull(s[1], 10, &temp); | 816 | if (strict_strtoull(s[1], 10, &temp)) |
830 | if (ret) | 817 | goto out; |
831 | return -EINVAL; | ||
832 | 818 | ||
833 | /* For rule removal, do not check for device presence. */ | 819 | /* For rule removal, do not check for device presence. */ |
834 | if (temp) { | 820 | if (temp) { |
835 | ret = blkio_check_dev_num(dev); | 821 | disk = get_gendisk(dev, &part); |
836 | if (ret) | 822 | if (!disk || part) { |
837 | return ret; | 823 | ret = -ENODEV; |
824 | goto out; | ||
825 | } | ||
838 | } | 826 | } |
839 | 827 | ||
840 | newpn->dev = dev; | 828 | newpn->dev = dev; |
@@ -843,7 +831,7 @@ static int blkio_policy_parse_and_set(char *buf, | |||
843 | case BLKIO_POLICY_PROP: | 831 | case BLKIO_POLICY_PROP: |
844 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || | 832 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || |
845 | temp > BLKIO_WEIGHT_MAX) | 833 | temp > BLKIO_WEIGHT_MAX) |
846 | return -EINVAL; | 834 | goto out; |
847 | 835 | ||
848 | newpn->plid = plid; | 836 | newpn->plid = plid; |
849 | newpn->fileid = fileid; | 837 | newpn->fileid = fileid; |
@@ -860,7 +848,7 @@ static int blkio_policy_parse_and_set(char *buf, | |||
860 | case BLKIO_THROTL_read_iops_device: | 848 | case BLKIO_THROTL_read_iops_device: |
861 | case BLKIO_THROTL_write_iops_device: | 849 | case BLKIO_THROTL_write_iops_device: |
862 | if (temp > THROTL_IOPS_MAX) | 850 | if (temp > THROTL_IOPS_MAX) |
863 | return -EINVAL; | 851 | goto out; |
864 | 852 | ||
865 | newpn->plid = plid; | 853 | newpn->plid = plid; |
866 | newpn->fileid = fileid; | 854 | newpn->fileid = fileid; |
@@ -871,8 +859,10 @@ static int blkio_policy_parse_and_set(char *buf, | |||
871 | default: | 859 | default: |
872 | BUG(); | 860 | BUG(); |
873 | } | 861 | } |
874 | 862 | ret = 0; | |
875 | return 0; | 863 | out: |
864 | put_disk(disk); | ||
865 | return ret; | ||
876 | } | 866 | } |
877 | 867 | ||
878 | unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, | 868 | unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index a71d2904ffb9..6f3ace7e792f 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
@@ -188,7 +188,7 @@ struct blkio_policy_node { | |||
188 | union { | 188 | union { |
189 | unsigned int weight; | 189 | unsigned int weight; |
190 | /* | 190 | /* |
191 | * Rate read/write in terms of byptes per second | 191 | * Rate read/write in terms of bytes per second |
192 | * Whether this rate represents read or write is determined | 192 | * Whether this rate represents read or write is determined |
193 | * by file type "fileid". | 193 | * by file type "fileid". |
194 | */ | 194 | */ |
diff --git a/block/blk-core.c b/block/blk-core.c index 795154e54a75..f65871116404 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
30 | #include <linux/list_sort.h> | 30 | #include <linux/list_sort.h> |
31 | #include <linux/delay.h> | ||
31 | 32 | ||
32 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
33 | #include <trace/events/block.h> | 34 | #include <trace/events/block.h> |
@@ -38,8 +39,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | |||
38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
40 | 41 | ||
41 | static int __make_request(struct request_queue *q, struct bio *bio); | ||
42 | |||
43 | /* | 42 | /* |
44 | * For the allocated request tables | 43 | * For the allocated request tables |
45 | */ | 44 | */ |
@@ -347,30 +346,75 @@ void blk_put_queue(struct request_queue *q) | |||
347 | } | 346 | } |
348 | EXPORT_SYMBOL(blk_put_queue); | 347 | EXPORT_SYMBOL(blk_put_queue); |
349 | 348 | ||
350 | /* | 349 | /** |
351 | * Note: If a driver supplied the queue lock, it is disconnected | 350 | * blk_drain_queue - drain requests from request_queue |
352 | * by this function. The actual state of the lock doesn't matter | 351 | * @q: queue to drain |
353 | * here as the request_queue isn't accessible after this point | 352 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV |
354 | * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. | 353 | * |
354 | * Drain requests from @q. If @drain_all is set, all requests are drained. | ||
355 | * If not, only ELVPRIV requests are drained. The caller is responsible | ||
356 | * for ensuring that no new requests which need to be drained are queued. | ||
357 | */ | ||
358 | void blk_drain_queue(struct request_queue *q, bool drain_all) | ||
359 | { | ||
360 | while (true) { | ||
361 | int nr_rqs; | ||
362 | |||
363 | spin_lock_irq(q->queue_lock); | ||
364 | |||
365 | elv_drain_elevator(q); | ||
366 | if (drain_all) | ||
367 | blk_throtl_drain(q); | ||
368 | |||
369 | __blk_run_queue(q); | ||
370 | |||
371 | if (drain_all) | ||
372 | nr_rqs = q->rq.count[0] + q->rq.count[1]; | ||
373 | else | ||
374 | nr_rqs = q->rq.elvpriv; | ||
375 | |||
376 | spin_unlock_irq(q->queue_lock); | ||
377 | |||
378 | if (!nr_rqs) | ||
379 | break; | ||
380 | msleep(10); | ||
381 | } | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * blk_cleanup_queue - shutdown a request queue | ||
386 | * @q: request queue to shutdown | ||
387 | * | ||
388 | * Mark @q DEAD, drain all pending requests, destroy and put it. All | ||
389 | * future requests will be failed immediately with -ENODEV. | ||
355 | */ | 390 | */ |
356 | void blk_cleanup_queue(struct request_queue *q) | 391 | void blk_cleanup_queue(struct request_queue *q) |
357 | { | 392 | { |
358 | /* | 393 | spinlock_t *lock = q->queue_lock; |
359 | * We know we have process context here, so we can be a little | ||
360 | * cautious and ensure that pending block actions on this device | ||
361 | * are done before moving on. Going into this function, we should | ||
362 | * not have processes doing IO to this device. | ||
363 | */ | ||
364 | blk_sync_queue(q); | ||
365 | 394 | ||
366 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 395 | /* mark @q DEAD, no new request or merges will be allowed afterwards */ |
367 | mutex_lock(&q->sysfs_lock); | 396 | mutex_lock(&q->sysfs_lock); |
368 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 397 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
369 | mutex_unlock(&q->sysfs_lock); | 398 | |
399 | spin_lock_irq(lock); | ||
400 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | ||
401 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | ||
402 | queue_flag_set(QUEUE_FLAG_DEAD, q); | ||
370 | 403 | ||
371 | if (q->queue_lock != &q->__queue_lock) | 404 | if (q->queue_lock != &q->__queue_lock) |
372 | q->queue_lock = &q->__queue_lock; | 405 | q->queue_lock = &q->__queue_lock; |
373 | 406 | ||
407 | spin_unlock_irq(lock); | ||
408 | mutex_unlock(&q->sysfs_lock); | ||
409 | |||
410 | /* drain all requests queued before DEAD marking */ | ||
411 | blk_drain_queue(q, true); | ||
412 | |||
413 | /* @q won't process any more request, flush async actions */ | ||
414 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | ||
415 | blk_sync_queue(q); | ||
416 | |||
417 | /* @q is and will stay empty, shutdown and put */ | ||
374 | blk_put_queue(q); | 418 | blk_put_queue(q); |
375 | } | 419 | } |
376 | EXPORT_SYMBOL(blk_cleanup_queue); | 420 | EXPORT_SYMBOL(blk_cleanup_queue); |
@@ -541,7 +585,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
541 | /* | 585 | /* |
542 | * This also sets hw/phys segments, boundary and size | 586 | * This also sets hw/phys segments, boundary and size |
543 | */ | 587 | */ |
544 | blk_queue_make_request(q, __make_request); | 588 | blk_queue_make_request(q, blk_queue_bio); |
545 | 589 | ||
546 | q->sg_reserved_size = INT_MAX; | 590 | q->sg_reserved_size = INT_MAX; |
547 | 591 | ||
@@ -576,7 +620,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) | |||
576 | } | 620 | } |
577 | 621 | ||
578 | static struct request * | 622 | static struct request * |
579 | blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | 623 | blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask) |
580 | { | 624 | { |
581 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 625 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
582 | 626 | ||
@@ -587,12 +631,10 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | |||
587 | 631 | ||
588 | rq->cmd_flags = flags | REQ_ALLOCED; | 632 | rq->cmd_flags = flags | REQ_ALLOCED; |
589 | 633 | ||
590 | if (priv) { | 634 | if ((flags & REQ_ELVPRIV) && |
591 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | 635 | unlikely(elv_set_request(q, rq, gfp_mask))) { |
592 | mempool_free(rq, q->rq.rq_pool); | 636 | mempool_free(rq, q->rq.rq_pool); |
593 | return NULL; | 637 | return NULL; |
594 | } | ||
595 | rq->cmd_flags |= REQ_ELVPRIV; | ||
596 | } | 638 | } |
597 | 639 | ||
598 | return rq; | 640 | return rq; |
@@ -651,12 +693,13 @@ static void __freed_request(struct request_queue *q, int sync) | |||
651 | * A request has just been released. Account for it, update the full and | 693 | * A request has just been released. Account for it, update the full and |
652 | * congestion status, wake up any waiters. Called under q->queue_lock. | 694 | * congestion status, wake up any waiters. Called under q->queue_lock. |
653 | */ | 695 | */ |
654 | static void freed_request(struct request_queue *q, int sync, int priv) | 696 | static void freed_request(struct request_queue *q, unsigned int flags) |
655 | { | 697 | { |
656 | struct request_list *rl = &q->rq; | 698 | struct request_list *rl = &q->rq; |
699 | int sync = rw_is_sync(flags); | ||
657 | 700 | ||
658 | rl->count[sync]--; | 701 | rl->count[sync]--; |
659 | if (priv) | 702 | if (flags & REQ_ELVPRIV) |
660 | rl->elvpriv--; | 703 | rl->elvpriv--; |
661 | 704 | ||
662 | __freed_request(q, sync); | 705 | __freed_request(q, sync); |
@@ -684,10 +727,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio) | |||
684 | return true; | 727 | return true; |
685 | } | 728 | } |
686 | 729 | ||
687 | /* | 730 | /** |
688 | * Get a free request, queue_lock must be held. | 731 | * get_request - get a free request |
689 | * Returns NULL on failure, with queue_lock held. | 732 | * @q: request_queue to allocate request from |
690 | * Returns !NULL on success, with queue_lock *not held*. | 733 | * @rw_flags: RW and SYNC flags |
734 | * @bio: bio to allocate request for (can be %NULL) | ||
735 | * @gfp_mask: allocation mask | ||
736 | * | ||
737 | * Get a free request from @q. This function may fail under memory | ||
738 | * pressure or if @q is dead. | ||
739 | * | ||
740 | * Must be callled with @q->queue_lock held and, | ||
741 | * Returns %NULL on failure, with @q->queue_lock held. | ||
742 | * Returns !%NULL on success, with @q->queue_lock *not held*. | ||
691 | */ | 743 | */ |
692 | static struct request *get_request(struct request_queue *q, int rw_flags, | 744 | static struct request *get_request(struct request_queue *q, int rw_flags, |
693 | struct bio *bio, gfp_t gfp_mask) | 745 | struct bio *bio, gfp_t gfp_mask) |
@@ -696,7 +748,10 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
696 | struct request_list *rl = &q->rq; | 748 | struct request_list *rl = &q->rq; |
697 | struct io_context *ioc = NULL; | 749 | struct io_context *ioc = NULL; |
698 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 750 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
699 | int may_queue, priv = 0; | 751 | int may_queue; |
752 | |||
753 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
754 | return NULL; | ||
700 | 755 | ||
701 | may_queue = elv_may_queue(q, rw_flags); | 756 | may_queue = elv_may_queue(q, rw_flags); |
702 | if (may_queue == ELV_MQUEUE_NO) | 757 | if (may_queue == ELV_MQUEUE_NO) |
@@ -740,17 +795,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
740 | rl->count[is_sync]++; | 795 | rl->count[is_sync]++; |
741 | rl->starved[is_sync] = 0; | 796 | rl->starved[is_sync] = 0; |
742 | 797 | ||
743 | if (blk_rq_should_init_elevator(bio)) { | 798 | if (blk_rq_should_init_elevator(bio) && |
744 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 799 | !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { |
745 | if (priv) | 800 | rw_flags |= REQ_ELVPRIV; |
746 | rl->elvpriv++; | 801 | rl->elvpriv++; |
747 | } | 802 | } |
748 | 803 | ||
749 | if (blk_queue_io_stat(q)) | 804 | if (blk_queue_io_stat(q)) |
750 | rw_flags |= REQ_IO_STAT; | 805 | rw_flags |= REQ_IO_STAT; |
751 | spin_unlock_irq(q->queue_lock); | 806 | spin_unlock_irq(q->queue_lock); |
752 | 807 | ||
753 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 808 | rq = blk_alloc_request(q, rw_flags, gfp_mask); |
754 | if (unlikely(!rq)) { | 809 | if (unlikely(!rq)) { |
755 | /* | 810 | /* |
756 | * Allocation failed presumably due to memory. Undo anything | 811 | * Allocation failed presumably due to memory. Undo anything |
@@ -760,7 +815,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
760 | * wait queue, but this is pretty rare. | 815 | * wait queue, but this is pretty rare. |
761 | */ | 816 | */ |
762 | spin_lock_irq(q->queue_lock); | 817 | spin_lock_irq(q->queue_lock); |
763 | freed_request(q, is_sync, priv); | 818 | freed_request(q, rw_flags); |
764 | 819 | ||
765 | /* | 820 | /* |
766 | * in the very unlikely event that allocation failed and no | 821 | * in the very unlikely event that allocation failed and no |
@@ -790,11 +845,18 @@ out: | |||
790 | return rq; | 845 | return rq; |
791 | } | 846 | } |
792 | 847 | ||
793 | /* | 848 | /** |
794 | * No available requests for this queue, wait for some requests to become | 849 | * get_request_wait - get a free request with retry |
795 | * available. | 850 | * @q: request_queue to allocate request from |
851 | * @rw_flags: RW and SYNC flags | ||
852 | * @bio: bio to allocate request for (can be %NULL) | ||
853 | * | ||
854 | * Get a free request from @q. This function keeps retrying under memory | ||
855 | * pressure and fails iff @q is dead. | ||
796 | * | 856 | * |
797 | * Called with q->queue_lock held, and returns with it unlocked. | 857 | * Must be callled with @q->queue_lock held and, |
858 | * Returns %NULL on failure, with @q->queue_lock held. | ||
859 | * Returns !%NULL on success, with @q->queue_lock *not held*. | ||
798 | */ | 860 | */ |
799 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 861 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
800 | struct bio *bio) | 862 | struct bio *bio) |
@@ -808,6 +870,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
808 | struct io_context *ioc; | 870 | struct io_context *ioc; |
809 | struct request_list *rl = &q->rq; | 871 | struct request_list *rl = &q->rq; |
810 | 872 | ||
873 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
874 | return NULL; | ||
875 | |||
811 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 876 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
812 | TASK_UNINTERRUPTIBLE); | 877 | TASK_UNINTERRUPTIBLE); |
813 | 878 | ||
@@ -838,19 +903,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | |||
838 | { | 903 | { |
839 | struct request *rq; | 904 | struct request *rq; |
840 | 905 | ||
841 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
842 | return NULL; | ||
843 | |||
844 | BUG_ON(rw != READ && rw != WRITE); | 906 | BUG_ON(rw != READ && rw != WRITE); |
845 | 907 | ||
846 | spin_lock_irq(q->queue_lock); | 908 | spin_lock_irq(q->queue_lock); |
847 | if (gfp_mask & __GFP_WAIT) { | 909 | if (gfp_mask & __GFP_WAIT) |
848 | rq = get_request_wait(q, rw, NULL); | 910 | rq = get_request_wait(q, rw, NULL); |
849 | } else { | 911 | else |
850 | rq = get_request(q, rw, NULL, gfp_mask); | 912 | rq = get_request(q, rw, NULL, gfp_mask); |
851 | if (!rq) | 913 | if (!rq) |
852 | spin_unlock_irq(q->queue_lock); | 914 | spin_unlock_irq(q->queue_lock); |
853 | } | ||
854 | /* q->queue_lock is unlocked at this point */ | 915 | /* q->queue_lock is unlocked at this point */ |
855 | 916 | ||
856 | return rq; | 917 | return rq; |
@@ -1052,14 +1113,13 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
1052 | * it didn't come out of our reserved rq pools | 1113 | * it didn't come out of our reserved rq pools |
1053 | */ | 1114 | */ |
1054 | if (req->cmd_flags & REQ_ALLOCED) { | 1115 | if (req->cmd_flags & REQ_ALLOCED) { |
1055 | int is_sync = rq_is_sync(req) != 0; | 1116 | unsigned int flags = req->cmd_flags; |
1056 | int priv = req->cmd_flags & REQ_ELVPRIV; | ||
1057 | 1117 | ||
1058 | BUG_ON(!list_empty(&req->queuelist)); | 1118 | BUG_ON(!list_empty(&req->queuelist)); |
1059 | BUG_ON(!hlist_unhashed(&req->hash)); | 1119 | BUG_ON(!hlist_unhashed(&req->hash)); |
1060 | 1120 | ||
1061 | blk_free_request(q, req); | 1121 | blk_free_request(q, req); |
1062 | freed_request(q, is_sync, priv); | 1122 | freed_request(q, flags); |
1063 | } | 1123 | } |
1064 | } | 1124 | } |
1065 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1125 | EXPORT_SYMBOL_GPL(__blk_put_request); |
@@ -1161,18 +1221,32 @@ static bool bio_attempt_front_merge(struct request_queue *q, | |||
1161 | return true; | 1221 | return true; |
1162 | } | 1222 | } |
1163 | 1223 | ||
1164 | /* | 1224 | /** |
1165 | * Attempts to merge with the plugged list in the current process. Returns | 1225 | * attempt_plug_merge - try to merge with %current's plugged list |
1166 | * true if merge was successful, otherwise false. | 1226 | * @q: request_queue new bio is being queued at |
1227 | * @bio: new bio being queued | ||
1228 | * @request_count: out parameter for number of traversed plugged requests | ||
1229 | * | ||
1230 | * Determine whether @bio being queued on @q can be merged with a request | ||
1231 | * on %current's plugged list. Returns %true if merge was successful, | ||
1232 | * otherwise %false. | ||
1233 | * | ||
1234 | * This function is called without @q->queue_lock; however, elevator is | ||
1235 | * accessed iff there already are requests on the plugged list which in | ||
1236 | * turn guarantees validity of the elevator. | ||
1237 | * | ||
1238 | * Note that, on successful merge, elevator operation | ||
1239 | * elevator_bio_merged_fn() will be called without queue lock. Elevator | ||
1240 | * must be ready for this. | ||
1167 | */ | 1241 | */ |
1168 | static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, | 1242 | static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, |
1169 | struct bio *bio, unsigned int *request_count) | 1243 | unsigned int *request_count) |
1170 | { | 1244 | { |
1171 | struct blk_plug *plug; | 1245 | struct blk_plug *plug; |
1172 | struct request *rq; | 1246 | struct request *rq; |
1173 | bool ret = false; | 1247 | bool ret = false; |
1174 | 1248 | ||
1175 | plug = tsk->plug; | 1249 | plug = current->plug; |
1176 | if (!plug) | 1250 | if (!plug) |
1177 | goto out; | 1251 | goto out; |
1178 | *request_count = 0; | 1252 | *request_count = 0; |
@@ -1202,7 +1276,6 @@ out: | |||
1202 | 1276 | ||
1203 | void init_request_from_bio(struct request *req, struct bio *bio) | 1277 | void init_request_from_bio(struct request *req, struct bio *bio) |
1204 | { | 1278 | { |
1205 | req->cpu = bio->bi_comp_cpu; | ||
1206 | req->cmd_type = REQ_TYPE_FS; | 1279 | req->cmd_type = REQ_TYPE_FS; |
1207 | 1280 | ||
1208 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; | 1281 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; |
@@ -1215,7 +1288,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
1215 | blk_rq_bio_prep(req->q, req, bio); | 1288 | blk_rq_bio_prep(req->q, req, bio); |
1216 | } | 1289 | } |
1217 | 1290 | ||
1218 | static int __make_request(struct request_queue *q, struct bio *bio) | 1291 | void blk_queue_bio(struct request_queue *q, struct bio *bio) |
1219 | { | 1292 | { |
1220 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1293 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
1221 | struct blk_plug *plug; | 1294 | struct blk_plug *plug; |
@@ -1240,8 +1313,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
1240 | * Check if we can merge with the plugged list before grabbing | 1313 | * Check if we can merge with the plugged list before grabbing |
1241 | * any locks. | 1314 | * any locks. |
1242 | */ | 1315 | */ |
1243 | if (attempt_plug_merge(current, q, bio, &request_count)) | 1316 | if (attempt_plug_merge(q, bio, &request_count)) |
1244 | goto out; | 1317 | return; |
1245 | 1318 | ||
1246 | spin_lock_irq(q->queue_lock); | 1319 | spin_lock_irq(q->queue_lock); |
1247 | 1320 | ||
@@ -1275,6 +1348,10 @@ get_rq: | |||
1275 | * Returns with the queue unlocked. | 1348 | * Returns with the queue unlocked. |
1276 | */ | 1349 | */ |
1277 | req = get_request_wait(q, rw_flags, bio); | 1350 | req = get_request_wait(q, rw_flags, bio); |
1351 | if (unlikely(!req)) { | ||
1352 | bio_endio(bio, -ENODEV); /* @q is dead */ | ||
1353 | goto out_unlock; | ||
1354 | } | ||
1278 | 1355 | ||
1279 | /* | 1356 | /* |
1280 | * After dropping the lock and possibly sleeping here, our request | 1357 | * After dropping the lock and possibly sleeping here, our request |
@@ -1284,8 +1361,7 @@ get_rq: | |||
1284 | */ | 1361 | */ |
1285 | init_request_from_bio(req, bio); | 1362 | init_request_from_bio(req, bio); |
1286 | 1363 | ||
1287 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1364 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) |
1288 | bio_flagged(bio, BIO_CPU_AFFINE)) | ||
1289 | req->cpu = raw_smp_processor_id(); | 1365 | req->cpu = raw_smp_processor_id(); |
1290 | 1366 | ||
1291 | plug = current->plug; | 1367 | plug = current->plug; |
@@ -1316,9 +1392,8 @@ get_rq: | |||
1316 | out_unlock: | 1392 | out_unlock: |
1317 | spin_unlock_irq(q->queue_lock); | 1393 | spin_unlock_irq(q->queue_lock); |
1318 | } | 1394 | } |
1319 | out: | ||
1320 | return 0; | ||
1321 | } | 1395 | } |
1396 | EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ | ||
1322 | 1397 | ||
1323 | /* | 1398 | /* |
1324 | * If bio->bi_dev is a partition, remap the location | 1399 | * If bio->bi_dev is a partition, remap the location |
@@ -1417,165 +1492,135 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | |||
1417 | return 0; | 1492 | return 0; |
1418 | } | 1493 | } |
1419 | 1494 | ||
1420 | /** | 1495 | static noinline_for_stack bool |
1421 | * generic_make_request - hand a buffer to its device driver for I/O | 1496 | generic_make_request_checks(struct bio *bio) |
1422 | * @bio: The bio describing the location in memory and on the device. | ||
1423 | * | ||
1424 | * generic_make_request() is used to make I/O requests of block | ||
1425 | * devices. It is passed a &struct bio, which describes the I/O that needs | ||
1426 | * to be done. | ||
1427 | * | ||
1428 | * generic_make_request() does not return any status. The | ||
1429 | * success/failure status of the request, along with notification of | ||
1430 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
1431 | * function described (one day) else where. | ||
1432 | * | ||
1433 | * The caller of generic_make_request must make sure that bi_io_vec | ||
1434 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
1435 | * set to describe the device address, and the | ||
1436 | * bi_end_io and optionally bi_private are set to describe how | ||
1437 | * completion notification should be signaled. | ||
1438 | * | ||
1439 | * generic_make_request and the drivers it calls may use bi_next if this | ||
1440 | * bio happens to be merged with someone else, and may change bi_dev and | ||
1441 | * bi_sector for remaps as it sees fit. So the values of these fields | ||
1442 | * should NOT be depended on after the call to generic_make_request. | ||
1443 | */ | ||
1444 | static inline void __generic_make_request(struct bio *bio) | ||
1445 | { | 1497 | { |
1446 | struct request_queue *q; | 1498 | struct request_queue *q; |
1447 | sector_t old_sector; | 1499 | int nr_sectors = bio_sectors(bio); |
1448 | int ret, nr_sectors = bio_sectors(bio); | ||
1449 | dev_t old_dev; | ||
1450 | int err = -EIO; | 1500 | int err = -EIO; |
1501 | char b[BDEVNAME_SIZE]; | ||
1502 | struct hd_struct *part; | ||
1451 | 1503 | ||
1452 | might_sleep(); | 1504 | might_sleep(); |
1453 | 1505 | ||
1454 | if (bio_check_eod(bio, nr_sectors)) | 1506 | if (bio_check_eod(bio, nr_sectors)) |
1455 | goto end_io; | 1507 | goto end_io; |
1456 | 1508 | ||
1457 | /* | 1509 | q = bdev_get_queue(bio->bi_bdev); |
1458 | * Resolve the mapping until finished. (drivers are | 1510 | if (unlikely(!q)) { |
1459 | * still free to implement/resolve their own stacking | 1511 | printk(KERN_ERR |
1460 | * by explicitly returning 0) | 1512 | "generic_make_request: Trying to access " |
1461 | * | 1513 | "nonexistent block-device %s (%Lu)\n", |
1462 | * NOTE: we don't repeat the blk_size check for each new device. | 1514 | bdevname(bio->bi_bdev, b), |
1463 | * Stacking drivers are expected to know what they are doing. | 1515 | (long long) bio->bi_sector); |
1464 | */ | 1516 | goto end_io; |
1465 | old_sector = -1; | 1517 | } |
1466 | old_dev = 0; | ||
1467 | do { | ||
1468 | char b[BDEVNAME_SIZE]; | ||
1469 | struct hd_struct *part; | ||
1470 | |||
1471 | q = bdev_get_queue(bio->bi_bdev); | ||
1472 | if (unlikely(!q)) { | ||
1473 | printk(KERN_ERR | ||
1474 | "generic_make_request: Trying to access " | ||
1475 | "nonexistent block-device %s (%Lu)\n", | ||
1476 | bdevname(bio->bi_bdev, b), | ||
1477 | (long long) bio->bi_sector); | ||
1478 | goto end_io; | ||
1479 | } | ||
1480 | |||
1481 | if (unlikely(!(bio->bi_rw & REQ_DISCARD) && | ||
1482 | nr_sectors > queue_max_hw_sectors(q))) { | ||
1483 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
1484 | bdevname(bio->bi_bdev, b), | ||
1485 | bio_sectors(bio), | ||
1486 | queue_max_hw_sectors(q)); | ||
1487 | goto end_io; | ||
1488 | } | ||
1489 | |||
1490 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
1491 | goto end_io; | ||
1492 | |||
1493 | part = bio->bi_bdev->bd_part; | ||
1494 | if (should_fail_request(part, bio->bi_size) || | ||
1495 | should_fail_request(&part_to_disk(part)->part0, | ||
1496 | bio->bi_size)) | ||
1497 | goto end_io; | ||
1498 | |||
1499 | /* | ||
1500 | * If this device has partitions, remap block n | ||
1501 | * of partition p to block n+start(p) of the disk. | ||
1502 | */ | ||
1503 | blk_partition_remap(bio); | ||
1504 | 1518 | ||
1505 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | 1519 | if (unlikely(!(bio->bi_rw & REQ_DISCARD) && |
1506 | goto end_io; | 1520 | nr_sectors > queue_max_hw_sectors(q))) { |
1521 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
1522 | bdevname(bio->bi_bdev, b), | ||
1523 | bio_sectors(bio), | ||
1524 | queue_max_hw_sectors(q)); | ||
1525 | goto end_io; | ||
1526 | } | ||
1507 | 1527 | ||
1508 | if (old_sector != -1) | 1528 | part = bio->bi_bdev->bd_part; |
1509 | trace_block_bio_remap(q, bio, old_dev, old_sector); | 1529 | if (should_fail_request(part, bio->bi_size) || |
1530 | should_fail_request(&part_to_disk(part)->part0, | ||
1531 | bio->bi_size)) | ||
1532 | goto end_io; | ||
1510 | 1533 | ||
1511 | old_sector = bio->bi_sector; | 1534 | /* |
1512 | old_dev = bio->bi_bdev->bd_dev; | 1535 | * If this device has partitions, remap block n |
1536 | * of partition p to block n+start(p) of the disk. | ||
1537 | */ | ||
1538 | blk_partition_remap(bio); | ||
1513 | 1539 | ||
1514 | if (bio_check_eod(bio, nr_sectors)) | 1540 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) |
1515 | goto end_io; | 1541 | goto end_io; |
1516 | 1542 | ||
1517 | /* | 1543 | if (bio_check_eod(bio, nr_sectors)) |
1518 | * Filter flush bio's early so that make_request based | 1544 | goto end_io; |
1519 | * drivers without flush support don't have to worry | ||
1520 | * about them. | ||
1521 | */ | ||
1522 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | ||
1523 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | ||
1524 | if (!nr_sectors) { | ||
1525 | err = 0; | ||
1526 | goto end_io; | ||
1527 | } | ||
1528 | } | ||
1529 | 1545 | ||
1530 | if ((bio->bi_rw & REQ_DISCARD) && | 1546 | /* |
1531 | (!blk_queue_discard(q) || | 1547 | * Filter flush bio's early so that make_request based |
1532 | ((bio->bi_rw & REQ_SECURE) && | 1548 | * drivers without flush support don't have to worry |
1533 | !blk_queue_secdiscard(q)))) { | 1549 | * about them. |
1534 | err = -EOPNOTSUPP; | 1550 | */ |
1551 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | ||
1552 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | ||
1553 | if (!nr_sectors) { | ||
1554 | err = 0; | ||
1535 | goto end_io; | 1555 | goto end_io; |
1536 | } | 1556 | } |
1557 | } | ||
1537 | 1558 | ||
1538 | if (blk_throtl_bio(q, &bio)) | 1559 | if ((bio->bi_rw & REQ_DISCARD) && |
1539 | goto end_io; | 1560 | (!blk_queue_discard(q) || |
1540 | 1561 | ((bio->bi_rw & REQ_SECURE) && | |
1541 | /* | 1562 | !blk_queue_secdiscard(q)))) { |
1542 | * If bio = NULL, bio has been throttled and will be submitted | 1563 | err = -EOPNOTSUPP; |
1543 | * later. | 1564 | goto end_io; |
1544 | */ | 1565 | } |
1545 | if (!bio) | ||
1546 | break; | ||
1547 | |||
1548 | trace_block_bio_queue(q, bio); | ||
1549 | 1566 | ||
1550 | ret = q->make_request_fn(q, bio); | 1567 | if (blk_throtl_bio(q, bio)) |
1551 | } while (ret); | 1568 | return false; /* throttled, will be resubmitted later */ |
1552 | 1569 | ||
1553 | return; | 1570 | trace_block_bio_queue(q, bio); |
1571 | return true; | ||
1554 | 1572 | ||
1555 | end_io: | 1573 | end_io: |
1556 | bio_endio(bio, err); | 1574 | bio_endio(bio, err); |
1575 | return false; | ||
1557 | } | 1576 | } |
1558 | 1577 | ||
1559 | /* | 1578 | /** |
1560 | * We only want one ->make_request_fn to be active at a time, | 1579 | * generic_make_request - hand a buffer to its device driver for I/O |
1561 | * else stack usage with stacked devices could be a problem. | 1580 | * @bio: The bio describing the location in memory and on the device. |
1562 | * So use current->bio_list to keep a list of requests | 1581 | * |
1563 | * submited by a make_request_fn function. | 1582 | * generic_make_request() is used to make I/O requests of block |
1564 | * current->bio_list is also used as a flag to say if | 1583 | * devices. It is passed a &struct bio, which describes the I/O that needs |
1565 | * generic_make_request is currently active in this task or not. | 1584 | * to be done. |
1566 | * If it is NULL, then no make_request is active. If it is non-NULL, | 1585 | * |
1567 | * then a make_request is active, and new requests should be added | 1586 | * generic_make_request() does not return any status. The |
1568 | * at the tail | 1587 | * success/failure status of the request, along with notification of |
1588 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
1589 | * function described (one day) else where. | ||
1590 | * | ||
1591 | * The caller of generic_make_request must make sure that bi_io_vec | ||
1592 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
1593 | * set to describe the device address, and the | ||
1594 | * bi_end_io and optionally bi_private are set to describe how | ||
1595 | * completion notification should be signaled. | ||
1596 | * | ||
1597 | * generic_make_request and the drivers it calls may use bi_next if this | ||
1598 | * bio happens to be merged with someone else, and may resubmit the bio to | ||
1599 | * a lower device by calling into generic_make_request recursively, which | ||
1600 | * means the bio should NOT be touched after the call to ->make_request_fn. | ||
1569 | */ | 1601 | */ |
1570 | void generic_make_request(struct bio *bio) | 1602 | void generic_make_request(struct bio *bio) |
1571 | { | 1603 | { |
1572 | struct bio_list bio_list_on_stack; | 1604 | struct bio_list bio_list_on_stack; |
1573 | 1605 | ||
1606 | if (!generic_make_request_checks(bio)) | ||
1607 | return; | ||
1608 | |||
1609 | /* | ||
1610 | * We only want one ->make_request_fn to be active at a time, else | ||
1611 | * stack usage with stacked devices could be a problem. So use | ||
1612 | * current->bio_list to keep a list of requests submited by a | ||
1613 | * make_request_fn function. current->bio_list is also used as a | ||
1614 | * flag to say if generic_make_request is currently active in this | ||
1615 | * task or not. If it is NULL, then no make_request is active. If | ||
1616 | * it is non-NULL, then a make_request is active, and new requests | ||
1617 | * should be added at the tail | ||
1618 | */ | ||
1574 | if (current->bio_list) { | 1619 | if (current->bio_list) { |
1575 | /* make_request is active */ | ||
1576 | bio_list_add(current->bio_list, bio); | 1620 | bio_list_add(current->bio_list, bio); |
1577 | return; | 1621 | return; |
1578 | } | 1622 | } |
1623 | |||
1579 | /* following loop may be a bit non-obvious, and so deserves some | 1624 | /* following loop may be a bit non-obvious, and so deserves some |
1580 | * explanation. | 1625 | * explanation. |
1581 | * Before entering the loop, bio->bi_next is NULL (as all callers | 1626 | * Before entering the loop, bio->bi_next is NULL (as all callers |
@@ -1583,22 +1628,21 @@ void generic_make_request(struct bio *bio) | |||
1583 | * We pretend that we have just taken it off a longer list, so | 1628 | * We pretend that we have just taken it off a longer list, so |
1584 | * we assign bio_list to a pointer to the bio_list_on_stack, | 1629 | * we assign bio_list to a pointer to the bio_list_on_stack, |
1585 | * thus initialising the bio_list of new bios to be | 1630 | * thus initialising the bio_list of new bios to be |
1586 | * added. __generic_make_request may indeed add some more bios | 1631 | * added. ->make_request() may indeed add some more bios |
1587 | * through a recursive call to generic_make_request. If it | 1632 | * through a recursive call to generic_make_request. If it |
1588 | * did, we find a non-NULL value in bio_list and re-enter the loop | 1633 | * did, we find a non-NULL value in bio_list and re-enter the loop |
1589 | * from the top. In this case we really did just take the bio | 1634 | * from the top. In this case we really did just take the bio |
1590 | * of the top of the list (no pretending) and so remove it from | 1635 | * of the top of the list (no pretending) and so remove it from |
1591 | * bio_list, and call into __generic_make_request again. | 1636 | * bio_list, and call into ->make_request() again. |
1592 | * | ||
1593 | * The loop was structured like this to make only one call to | ||
1594 | * __generic_make_request (which is important as it is large and | ||
1595 | * inlined) and to keep the structure simple. | ||
1596 | */ | 1637 | */ |
1597 | BUG_ON(bio->bi_next); | 1638 | BUG_ON(bio->bi_next); |
1598 | bio_list_init(&bio_list_on_stack); | 1639 | bio_list_init(&bio_list_on_stack); |
1599 | current->bio_list = &bio_list_on_stack; | 1640 | current->bio_list = &bio_list_on_stack; |
1600 | do { | 1641 | do { |
1601 | __generic_make_request(bio); | 1642 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
1643 | |||
1644 | q->make_request_fn(q, bio); | ||
1645 | |||
1602 | bio = bio_list_pop(current->bio_list); | 1646 | bio = bio_list_pop(current->bio_list); |
1603 | } while (bio); | 1647 | } while (bio); |
1604 | current->bio_list = NULL; /* deactivate */ | 1648 | current->bio_list = NULL; /* deactivate */ |
@@ -2630,6 +2674,20 @@ EXPORT_SYMBOL(kblockd_schedule_delayed_work); | |||
2630 | 2674 | ||
2631 | #define PLUG_MAGIC 0x91827364 | 2675 | #define PLUG_MAGIC 0x91827364 |
2632 | 2676 | ||
2677 | /** | ||
2678 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | ||
2679 | * @plug: The &struct blk_plug that needs to be initialized | ||
2680 | * | ||
2681 | * Description: | ||
2682 | * Tracking blk_plug inside the task_struct will help with auto-flushing the | ||
2683 | * pending I/O should the task end up blocking between blk_start_plug() and | ||
2684 | * blk_finish_plug(). This is important from a performance perspective, but | ||
2685 | * also ensures that we don't deadlock. For instance, if the task is blocking | ||
2686 | * for a memory allocation, memory reclaim could end up wanting to free a | ||
2687 | * page belonging to that request that is currently residing in our private | ||
2688 | * plug. By flushing the pending I/O when the process goes to sleep, we avoid | ||
2689 | * this kind of deadlock. | ||
2690 | */ | ||
2633 | void blk_start_plug(struct blk_plug *plug) | 2691 | void blk_start_plug(struct blk_plug *plug) |
2634 | { | 2692 | { |
2635 | struct task_struct *tsk = current; | 2693 | struct task_struct *tsk = current; |
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 60fda88c57f0..e7f9f657f105 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
@@ -457,11 +457,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
457 | } | 457 | } |
458 | 458 | ||
459 | /** | 459 | /** |
460 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | 460 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
461 | * @kobj: the kobj belonging of the request queue to be released | 461 | * @kobj: the kobj belonging to the request queue to be released |
462 | * | 462 | * |
463 | * Description: | 463 | * Description: |
464 | * blk_cleanup_queue is the pair to blk_init_queue() or | 464 | * blk_release_queue is the pair to blk_init_queue() or |
465 | * blk_queue_make_request(). It should be called when a request queue is | 465 | * blk_queue_make_request(). It should be called when a request queue is |
466 | * being released; typically when a block device is being de-registered. | 466 | * being released; typically when a block device is being de-registered. |
467 | * Currently, its primary task it to free all the &struct request | 467 | * Currently, its primary task it to free all the &struct request |
@@ -490,6 +490,7 @@ static void blk_release_queue(struct kobject *kobj) | |||
490 | if (q->queue_tags) | 490 | if (q->queue_tags) |
491 | __blk_queue_free_tags(q); | 491 | __blk_queue_free_tags(q); |
492 | 492 | ||
493 | blk_throtl_release(q); | ||
493 | blk_trace_shutdown(q); | 494 | blk_trace_shutdown(q); |
494 | 495 | ||
495 | bdi_destroy(&q->backing_dev_info); | 496 | bdi_destroy(&q->backing_dev_info); |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a19f58c6fc3a..8edb9499b509 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/bio.h> | 10 | #include <linux/bio.h> |
11 | #include <linux/blktrace_api.h> | 11 | #include <linux/blktrace_api.h> |
12 | #include "blk-cgroup.h" | 12 | #include "blk-cgroup.h" |
13 | #include "blk.h" | ||
13 | 14 | ||
14 | /* Max dispatch from a group in 1 round */ | 15 | /* Max dispatch from a group in 1 round */ |
15 | static int throtl_grp_quantum = 8; | 16 | static int throtl_grp_quantum = 8; |
@@ -302,16 +303,16 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | |||
302 | return tg; | 303 | return tg; |
303 | } | 304 | } |
304 | 305 | ||
305 | /* | ||
306 | * This function returns with queue lock unlocked in case of error, like | ||
307 | * request queue is no more | ||
308 | */ | ||
309 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | 306 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) |
310 | { | 307 | { |
311 | struct throtl_grp *tg = NULL, *__tg = NULL; | 308 | struct throtl_grp *tg = NULL, *__tg = NULL; |
312 | struct blkio_cgroup *blkcg; | 309 | struct blkio_cgroup *blkcg; |
313 | struct request_queue *q = td->queue; | 310 | struct request_queue *q = td->queue; |
314 | 311 | ||
312 | /* no throttling for dead queue */ | ||
313 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
314 | return NULL; | ||
315 | |||
315 | rcu_read_lock(); | 316 | rcu_read_lock(); |
316 | blkcg = task_blkio_cgroup(current); | 317 | blkcg = task_blkio_cgroup(current); |
317 | tg = throtl_find_tg(td, blkcg); | 318 | tg = throtl_find_tg(td, blkcg); |
@@ -323,32 +324,22 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | |||
323 | /* | 324 | /* |
324 | * Need to allocate a group. Allocation of group also needs allocation | 325 | * Need to allocate a group. Allocation of group also needs allocation |
325 | * of per cpu stats which in-turn takes a mutex() and can block. Hence | 326 | * of per cpu stats which in-turn takes a mutex() and can block. Hence |
326 | * we need to drop rcu lock and queue_lock before we call alloc | 327 | * we need to drop rcu lock and queue_lock before we call alloc. |
327 | * | ||
328 | * Take the request queue reference to make sure queue does not | ||
329 | * go away once we return from allocation. | ||
330 | */ | 328 | */ |
331 | blk_get_queue(q); | ||
332 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
333 | spin_unlock_irq(q->queue_lock); | 330 | spin_unlock_irq(q->queue_lock); |
334 | 331 | ||
335 | tg = throtl_alloc_tg(td); | 332 | tg = throtl_alloc_tg(td); |
336 | /* | ||
337 | * We might have slept in group allocation. Make sure queue is not | ||
338 | * dead | ||
339 | */ | ||
340 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
341 | blk_put_queue(q); | ||
342 | if (tg) | ||
343 | kfree(tg); | ||
344 | |||
345 | return ERR_PTR(-ENODEV); | ||
346 | } | ||
347 | blk_put_queue(q); | ||
348 | 333 | ||
349 | /* Group allocated and queue is still alive. take the lock */ | 334 | /* Group allocated and queue is still alive. take the lock */ |
350 | spin_lock_irq(q->queue_lock); | 335 | spin_lock_irq(q->queue_lock); |
351 | 336 | ||
337 | /* Make sure @q is still alive */ | ||
338 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
339 | kfree(tg); | ||
340 | return NULL; | ||
341 | } | ||
342 | |||
352 | /* | 343 | /* |
353 | * Initialize the new group. After sleeping, read the blkcg again. | 344 | * Initialize the new group. After sleeping, read the blkcg again. |
354 | */ | 345 | */ |
@@ -1014,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td) | |||
1014 | } | 1005 | } |
1015 | } | 1006 | } |
1016 | 1007 | ||
1017 | static void throtl_td_free(struct throtl_data *td) | ||
1018 | { | ||
1019 | kfree(td); | ||
1020 | } | ||
1021 | |||
1022 | /* | 1008 | /* |
1023 | * Blk cgroup controller notification saying that blkio_group object is being | 1009 | * Blk cgroup controller notification saying that blkio_group object is being |
1024 | * delinked as associated cgroup object is going away. That also means that | 1010 | * delinked as associated cgroup object is going away. That also means that |
@@ -1123,17 +1109,17 @@ static struct blkio_policy_type blkio_policy_throtl = { | |||
1123 | .plid = BLKIO_POLICY_THROTL, | 1109 | .plid = BLKIO_POLICY_THROTL, |
1124 | }; | 1110 | }; |
1125 | 1111 | ||
1126 | int blk_throtl_bio(struct request_queue *q, struct bio **biop) | 1112 | bool blk_throtl_bio(struct request_queue *q, struct bio *bio) |
1127 | { | 1113 | { |
1128 | struct throtl_data *td = q->td; | 1114 | struct throtl_data *td = q->td; |
1129 | struct throtl_grp *tg; | 1115 | struct throtl_grp *tg; |
1130 | struct bio *bio = *biop; | ||
1131 | bool rw = bio_data_dir(bio), update_disptime = true; | 1116 | bool rw = bio_data_dir(bio), update_disptime = true; |
1132 | struct blkio_cgroup *blkcg; | 1117 | struct blkio_cgroup *blkcg; |
1118 | bool throttled = false; | ||
1133 | 1119 | ||
1134 | if (bio->bi_rw & REQ_THROTTLED) { | 1120 | if (bio->bi_rw & REQ_THROTTLED) { |
1135 | bio->bi_rw &= ~REQ_THROTTLED; | 1121 | bio->bi_rw &= ~REQ_THROTTLED; |
1136 | return 0; | 1122 | goto out; |
1137 | } | 1123 | } |
1138 | 1124 | ||
1139 | /* | 1125 | /* |
@@ -1152,7 +1138,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1138 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
1153 | rw, rw_is_sync(bio->bi_rw)); | 1139 | rw, rw_is_sync(bio->bi_rw)); |
1154 | rcu_read_unlock(); | 1140 | rcu_read_unlock(); |
1155 | return 0; | 1141 | goto out; |
1156 | } | 1142 | } |
1157 | } | 1143 | } |
1158 | rcu_read_unlock(); | 1144 | rcu_read_unlock(); |
@@ -1161,18 +1147,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1161 | * Either group has not been allocated yet or it is not an unlimited | 1147 | * Either group has not been allocated yet or it is not an unlimited |
1162 | * IO group | 1148 | * IO group |
1163 | */ | 1149 | */ |
1164 | |||
1165 | spin_lock_irq(q->queue_lock); | 1150 | spin_lock_irq(q->queue_lock); |
1166 | tg = throtl_get_tg(td); | 1151 | tg = throtl_get_tg(td); |
1167 | 1152 | if (unlikely(!tg)) | |
1168 | if (IS_ERR(tg)) { | 1153 | goto out_unlock; |
1169 | if (PTR_ERR(tg) == -ENODEV) { | ||
1170 | /* | ||
1171 | * Queue is gone. No queue lock held here. | ||
1172 | */ | ||
1173 | return -ENODEV; | ||
1174 | } | ||
1175 | } | ||
1176 | 1154 | ||
1177 | if (tg->nr_queued[rw]) { | 1155 | if (tg->nr_queued[rw]) { |
1178 | /* | 1156 | /* |
@@ -1200,7 +1178,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
1200 | * So keep on trimming slice even if bio is not queued. | 1178 | * So keep on trimming slice even if bio is not queued. |
1201 | */ | 1179 | */ |
1202 | throtl_trim_slice(td, tg, rw); | 1180 | throtl_trim_slice(td, tg, rw); |
1203 | goto out; | 1181 | goto out_unlock; |
1204 | } | 1182 | } |
1205 | 1183 | ||
1206 | queue_bio: | 1184 | queue_bio: |
@@ -1212,16 +1190,52 @@ queue_bio: | |||
1212 | tg->nr_queued[READ], tg->nr_queued[WRITE]); | 1190 | tg->nr_queued[READ], tg->nr_queued[WRITE]); |
1213 | 1191 | ||
1214 | throtl_add_bio_tg(q->td, tg, bio); | 1192 | throtl_add_bio_tg(q->td, tg, bio); |
1215 | *biop = NULL; | 1193 | throttled = true; |
1216 | 1194 | ||
1217 | if (update_disptime) { | 1195 | if (update_disptime) { |
1218 | tg_update_disptime(td, tg); | 1196 | tg_update_disptime(td, tg); |
1219 | throtl_schedule_next_dispatch(td); | 1197 | throtl_schedule_next_dispatch(td); |
1220 | } | 1198 | } |
1221 | 1199 | ||
1200 | out_unlock: | ||
1201 | spin_unlock_irq(q->queue_lock); | ||
1222 | out: | 1202 | out: |
1203 | return throttled; | ||
1204 | } | ||
1205 | |||
1206 | /** | ||
1207 | * blk_throtl_drain - drain throttled bios | ||
1208 | * @q: request_queue to drain throttled bios for | ||
1209 | * | ||
1210 | * Dispatch all currently throttled bios on @q through ->make_request_fn(). | ||
1211 | */ | ||
1212 | void blk_throtl_drain(struct request_queue *q) | ||
1213 | __releases(q->queue_lock) __acquires(q->queue_lock) | ||
1214 | { | ||
1215 | struct throtl_data *td = q->td; | ||
1216 | struct throtl_rb_root *st = &td->tg_service_tree; | ||
1217 | struct throtl_grp *tg; | ||
1218 | struct bio_list bl; | ||
1219 | struct bio *bio; | ||
1220 | |||
1221 | lockdep_is_held(q->queue_lock); | ||
1222 | |||
1223 | bio_list_init(&bl); | ||
1224 | |||
1225 | while ((tg = throtl_rb_first(st))) { | ||
1226 | throtl_dequeue_tg(td, tg); | ||
1227 | |||
1228 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) | ||
1229 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | ||
1230 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) | ||
1231 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | ||
1232 | } | ||
1223 | spin_unlock_irq(q->queue_lock); | 1233 | spin_unlock_irq(q->queue_lock); |
1224 | return 0; | 1234 | |
1235 | while ((bio = bio_list_pop(&bl))) | ||
1236 | generic_make_request(bio); | ||
1237 | |||
1238 | spin_lock_irq(q->queue_lock); | ||
1225 | } | 1239 | } |
1226 | 1240 | ||
1227 | int blk_throtl_init(struct request_queue *q) | 1241 | int blk_throtl_init(struct request_queue *q) |
@@ -1296,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q) | |||
1296 | * it. | 1310 | * it. |
1297 | */ | 1311 | */ |
1298 | throtl_shutdown_wq(q); | 1312 | throtl_shutdown_wq(q); |
1299 | throtl_td_free(td); | 1313 | } |
1314 | |||
1315 | void blk_throtl_release(struct request_queue *q) | ||
1316 | { | ||
1317 | kfree(q->td); | ||
1300 | } | 1318 | } |
1301 | 1319 | ||
1302 | static int __init throtl_init(void) | 1320 | static int __init throtl_init(void) |
diff --git a/block/blk.h b/block/blk.h index 20b900a377c9..3f6551b3c92d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -15,6 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
15 | struct bio *bio); | 15 | struct bio *bio); |
16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
17 | struct bio *bio); | 17 | struct bio *bio); |
18 | void blk_drain_queue(struct request_queue *q, bool drain_all); | ||
18 | void blk_dequeue_request(struct request *rq); | 19 | void blk_dequeue_request(struct request *rq); |
19 | void __blk_queue_free_tags(struct request_queue *q); | 20 | void __blk_queue_free_tags(struct request_queue *q); |
20 | bool __blk_end_bidi_request(struct request *rq, int error, | 21 | bool __blk_end_bidi_request(struct request *rq, int error, |
@@ -188,4 +189,21 @@ static inline int blk_do_io_stat(struct request *rq) | |||
188 | (rq->cmd_flags & REQ_DISCARD)); | 189 | (rq->cmd_flags & REQ_DISCARD)); |
189 | } | 190 | } |
190 | 191 | ||
191 | #endif | 192 | #ifdef CONFIG_BLK_DEV_THROTTLING |
193 | extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); | ||
194 | extern void blk_throtl_drain(struct request_queue *q); | ||
195 | extern int blk_throtl_init(struct request_queue *q); | ||
196 | extern void blk_throtl_exit(struct request_queue *q); | ||
197 | extern void blk_throtl_release(struct request_queue *q); | ||
198 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
199 | static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | ||
200 | { | ||
201 | return false; | ||
202 | } | ||
203 | static inline void blk_throtl_drain(struct request_queue *q) { } | ||
204 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
205 | static inline void blk_throtl_exit(struct request_queue *q) { } | ||
206 | static inline void blk_throtl_release(struct request_queue *q) { } | ||
207 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
208 | |||
209 | #endif /* BLK_INTERNAL_H */ | ||
diff --git a/block/elevator.c b/block/elevator.c index a3b64bc71d88..66343d6917d0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/compiler.h> | 33 | #include <linux/compiler.h> |
34 | #include <linux/delay.h> | ||
35 | #include <linux/blktrace_api.h> | 34 | #include <linux/blktrace_api.h> |
36 | #include <linux/hash.h> | 35 | #include <linux/hash.h> |
37 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
@@ -182,7 +181,7 @@ static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, | |||
182 | eq->elevator_data = data; | 181 | eq->elevator_data = data; |
183 | } | 182 | } |
184 | 183 | ||
185 | static char chosen_elevator[16]; | 184 | static char chosen_elevator[ELV_NAME_MAX]; |
186 | 185 | ||
187 | static int __init elevator_setup(char *str) | 186 | static int __init elevator_setup(char *str) |
188 | { | 187 | { |
@@ -606,43 +605,35 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) | |||
606 | void elv_drain_elevator(struct request_queue *q) | 605 | void elv_drain_elevator(struct request_queue *q) |
607 | { | 606 | { |
608 | static int printed; | 607 | static int printed; |
608 | |||
609 | lockdep_assert_held(q->queue_lock); | ||
610 | |||
609 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | 611 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) |
610 | ; | 612 | ; |
611 | if (q->nr_sorted == 0) | 613 | if (q->nr_sorted && printed++ < 10) { |
612 | return; | ||
613 | if (printed++ < 10) { | ||
614 | printk(KERN_ERR "%s: forced dispatching is broken " | 614 | printk(KERN_ERR "%s: forced dispatching is broken " |
615 | "(nr_sorted=%u), please report this\n", | 615 | "(nr_sorted=%u), please report this\n", |
616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); | 616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); |
617 | } | 617 | } |
618 | } | 618 | } |
619 | 619 | ||
620 | /* | ||
621 | * Call with queue lock held, interrupts disabled | ||
622 | */ | ||
623 | void elv_quiesce_start(struct request_queue *q) | 620 | void elv_quiesce_start(struct request_queue *q) |
624 | { | 621 | { |
625 | if (!q->elevator) | 622 | if (!q->elevator) |
626 | return; | 623 | return; |
627 | 624 | ||
625 | spin_lock_irq(q->queue_lock); | ||
628 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); | 626 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
627 | spin_unlock_irq(q->queue_lock); | ||
629 | 628 | ||
630 | /* | 629 | blk_drain_queue(q, false); |
631 | * make sure we don't have any requests in flight | ||
632 | */ | ||
633 | elv_drain_elevator(q); | ||
634 | while (q->rq.elvpriv) { | ||
635 | __blk_run_queue(q); | ||
636 | spin_unlock_irq(q->queue_lock); | ||
637 | msleep(10); | ||
638 | spin_lock_irq(q->queue_lock); | ||
639 | elv_drain_elevator(q); | ||
640 | } | ||
641 | } | 630 | } |
642 | 631 | ||
643 | void elv_quiesce_end(struct request_queue *q) | 632 | void elv_quiesce_end(struct request_queue *q) |
644 | { | 633 | { |
634 | spin_lock_irq(q->queue_lock); | ||
645 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 635 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
636 | spin_unlock_irq(q->queue_lock); | ||
646 | } | 637 | } |
647 | 638 | ||
648 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) | 639 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
@@ -972,7 +963,6 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
972 | /* | 963 | /* |
973 | * Turn on BYPASS and drain all requests w/ elevator private data | 964 | * Turn on BYPASS and drain all requests w/ elevator private data |
974 | */ | 965 | */ |
975 | spin_lock_irq(q->queue_lock); | ||
976 | elv_quiesce_start(q); | 966 | elv_quiesce_start(q); |
977 | 967 | ||
978 | /* | 968 | /* |
@@ -983,8 +973,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
983 | /* | 973 | /* |
984 | * attach and start new elevator | 974 | * attach and start new elevator |
985 | */ | 975 | */ |
976 | spin_lock_irq(q->queue_lock); | ||
986 | elevator_attach(q, e, data); | 977 | elevator_attach(q, e, data); |
987 | |||
988 | spin_unlock_irq(q->queue_lock); | 978 | spin_unlock_irq(q->queue_lock); |
989 | 979 | ||
990 | if (old_elevator->registered) { | 980 | if (old_elevator->registered) { |
@@ -999,9 +989,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
999 | * finally exit old elevator and turn off BYPASS. | 989 | * finally exit old elevator and turn off BYPASS. |
1000 | */ | 990 | */ |
1001 | elevator_exit(old_elevator); | 991 | elevator_exit(old_elevator); |
1002 | spin_lock_irq(q->queue_lock); | ||
1003 | elv_quiesce_end(q); | 992 | elv_quiesce_end(q); |
1004 | spin_unlock_irq(q->queue_lock); | ||
1005 | 993 | ||
1006 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 994 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
1007 | 995 | ||
@@ -1015,10 +1003,7 @@ fail_register: | |||
1015 | elevator_exit(e); | 1003 | elevator_exit(e); |
1016 | q->elevator = old_elevator; | 1004 | q->elevator = old_elevator; |
1017 | elv_register_queue(q); | 1005 | elv_register_queue(q); |
1018 | 1006 | elv_quiesce_end(q); | |
1019 | spin_lock_irq(q->queue_lock); | ||
1020 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
1021 | spin_unlock_irq(q->queue_lock); | ||
1022 | 1007 | ||
1023 | return err; | 1008 | return err; |
1024 | } | 1009 | } |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4f4230b79bb6..fbdf0d802ec4 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
@@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
565 | { | 565 | { |
566 | int err; | 566 | int err; |
567 | 567 | ||
568 | if (!q || blk_get_queue(q)) | 568 | if (!q) |
569 | return -ENXIO; | 569 | return -ENXIO; |
570 | 570 | ||
571 | switch (cmd) { | 571 | switch (cmd) { |
@@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
686 | err = -ENOTTY; | 686 | err = -ENOTTY; |
687 | } | 687 | } |
688 | 688 | ||
689 | blk_put_queue(q); | ||
690 | return err; | 689 | return err; |
691 | } | 690 | } |
692 | EXPORT_SYMBOL(scsi_cmd_ioctl); | 691 | EXPORT_SYMBOL(scsi_cmd_ioctl); |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 528f6318ded1..167ba0af47f5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode) | |||
159 | return 0; | 159 | return 0; |
160 | } | 160 | } |
161 | 161 | ||
162 | static int | 162 | static void |
163 | aoeblk_make_request(struct request_queue *q, struct bio *bio) | 163 | aoeblk_make_request(struct request_queue *q, struct bio *bio) |
164 | { | 164 | { |
165 | struct sk_buff_head queue; | 165 | struct sk_buff_head queue; |
@@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
172 | if (bio == NULL) { | 172 | if (bio == NULL) { |
173 | printk(KERN_ERR "aoe: bio is NULL\n"); | 173 | printk(KERN_ERR "aoe: bio is NULL\n"); |
174 | BUG(); | 174 | BUG(); |
175 | return 0; | 175 | return; |
176 | } | 176 | } |
177 | d = bio->bi_bdev->bd_disk->private_data; | 177 | d = bio->bi_bdev->bd_disk->private_data; |
178 | if (d == NULL) { | 178 | if (d == NULL) { |
179 | printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); | 179 | printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); |
180 | BUG(); | 180 | BUG(); |
181 | bio_endio(bio, -ENXIO); | 181 | bio_endio(bio, -ENXIO); |
182 | return 0; | 182 | return; |
183 | } else if (bio->bi_io_vec == NULL) { | 183 | } else if (bio->bi_io_vec == NULL) { |
184 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); | 184 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); |
185 | BUG(); | 185 | BUG(); |
186 | bio_endio(bio, -ENXIO); | 186 | bio_endio(bio, -ENXIO); |
187 | return 0; | 187 | return; |
188 | } | 188 | } |
189 | buf = mempool_alloc(d->bufpool, GFP_NOIO); | 189 | buf = mempool_alloc(d->bufpool, GFP_NOIO); |
190 | if (buf == NULL) { | 190 | if (buf == NULL) { |
191 | printk(KERN_INFO "aoe: buf allocation failure\n"); | 191 | printk(KERN_INFO "aoe: buf allocation failure\n"); |
192 | bio_endio(bio, -ENOMEM); | 192 | bio_endio(bio, -ENOMEM); |
193 | return 0; | 193 | return; |
194 | } | 194 | } |
195 | memset(buf, 0, sizeof(*buf)); | 195 | memset(buf, 0, sizeof(*buf)); |
196 | INIT_LIST_HEAD(&buf->bufs); | 196 | INIT_LIST_HEAD(&buf->bufs); |
@@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
211 | spin_unlock_irqrestore(&d->lock, flags); | 211 | spin_unlock_irqrestore(&d->lock, flags); |
212 | mempool_free(buf, d->bufpool); | 212 | mempool_free(buf, d->bufpool); |
213 | bio_endio(bio, -ENXIO); | 213 | bio_endio(bio, -ENXIO); |
214 | return 0; | 214 | return; |
215 | } | 215 | } |
216 | 216 | ||
217 | list_add_tail(&buf->bufs, &d->bufq); | 217 | list_add_tail(&buf->bufs, &d->bufq); |
@@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
222 | 222 | ||
223 | spin_unlock_irqrestore(&d->lock, flags); | 223 | spin_unlock_irqrestore(&d->lock, flags); |
224 | aoenet_xmit(&queue); | 224 | aoenet_xmit(&queue); |
225 | |||
226 | return 0; | ||
227 | } | 225 | } |
228 | 226 | ||
229 | static int | 227 | static int |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index dba1c32e1ddf..d22119d49e53 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -323,7 +323,7 @@ out: | |||
323 | return err; | 323 | return err; |
324 | } | 324 | } |
325 | 325 | ||
326 | static int brd_make_request(struct request_queue *q, struct bio *bio) | 326 | static void brd_make_request(struct request_queue *q, struct bio *bio) |
327 | { | 327 | { |
328 | struct block_device *bdev = bio->bi_bdev; | 328 | struct block_device *bdev = bio->bi_bdev; |
329 | struct brd_device *brd = bdev->bd_disk->private_data; | 329 | struct brd_device *brd = bdev->bd_disk->private_data; |
@@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) | |||
359 | 359 | ||
360 | out: | 360 | out: |
361 | bio_endio(bio, err); | 361 | bio_endio(bio, err); |
362 | |||
363 | return 0; | ||
364 | } | 362 | } |
365 | 363 | ||
366 | #ifdef CONFIG_BLK_DEV_XIP | 364 | #ifdef CONFIG_BLK_DEV_XIP |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index ef2ceed3be4b..36eee3969a98 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
@@ -1507,7 +1507,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev); | |||
1507 | extern int proc_details; | 1507 | extern int proc_details; |
1508 | 1508 | ||
1509 | /* drbd_req */ | 1509 | /* drbd_req */ |
1510 | extern int drbd_make_request(struct request_queue *q, struct bio *bio); | 1510 | extern void drbd_make_request(struct request_queue *q, struct bio *bio); |
1511 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | 1511 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); |
1512 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | 1512 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); |
1513 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | 1513 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3424d675b769..4a0f314086e5 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
@@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) | |||
1073 | return 0; | 1073 | return 0; |
1074 | } | 1074 | } |
1075 | 1075 | ||
1076 | int drbd_make_request(struct request_queue *q, struct bio *bio) | 1076 | void drbd_make_request(struct request_queue *q, struct bio *bio) |
1077 | { | 1077 | { |
1078 | unsigned int s_enr, e_enr; | 1078 | unsigned int s_enr, e_enr; |
1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | 1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
@@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1081 | 1081 | ||
1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { | 1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { |
1083 | bio_endio(bio, -EPERM); | 1083 | bio_endio(bio, -EPERM); |
1084 | return 0; | 1084 | return; |
1085 | } | 1085 | } |
1086 | 1086 | ||
1087 | start_time = jiffies; | 1087 | start_time = jiffies; |
@@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1100 | 1100 | ||
1101 | if (likely(s_enr == e_enr)) { | 1101 | if (likely(s_enr == e_enr)) { |
1102 | inc_ap_bio(mdev, 1); | 1102 | inc_ap_bio(mdev, 1); |
1103 | return drbd_make_request_common(mdev, bio, start_time); | 1103 | drbd_make_request_common(mdev, bio, start_time); |
1104 | return; | ||
1104 | } | 1105 | } |
1105 | 1106 | ||
1106 | /* can this bio be split generically? | 1107 | /* can this bio be split generically? |
@@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
1148 | 1149 | ||
1149 | bio_pair_release(bp); | 1150 | bio_pair_release(bp); |
1150 | } | 1151 | } |
1151 | return 0; | ||
1152 | } | 1152 | } |
1153 | 1153 | ||
1154 | /* This is called by bio_add_page(). With this function we reduce | 1154 | /* This is called by bio_add_page(). With this function we reduce |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 46cdd6945557..c77983ea86c8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -437,7 +437,7 @@ static struct bio *loop_get_bio(struct loop_device *lo) | |||
437 | return bio_list_pop(&lo->lo_bio_list); | 437 | return bio_list_pop(&lo->lo_bio_list); |
438 | } | 438 | } |
439 | 439 | ||
440 | static int loop_make_request(struct request_queue *q, struct bio *old_bio) | 440 | static void loop_make_request(struct request_queue *q, struct bio *old_bio) |
441 | { | 441 | { |
442 | struct loop_device *lo = q->queuedata; | 442 | struct loop_device *lo = q->queuedata; |
443 | int rw = bio_rw(old_bio); | 443 | int rw = bio_rw(old_bio); |
@@ -455,12 +455,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio) | |||
455 | loop_add_bio(lo, old_bio); | 455 | loop_add_bio(lo, old_bio); |
456 | wake_up(&lo->lo_event); | 456 | wake_up(&lo->lo_event); |
457 | spin_unlock_irq(&lo->lo_lock); | 457 | spin_unlock_irq(&lo->lo_lock); |
458 | return 0; | 458 | return; |
459 | 459 | ||
460 | out: | 460 | out: |
461 | spin_unlock_irq(&lo->lo_lock); | 461 | spin_unlock_irq(&lo->lo_lock); |
462 | bio_io_error(old_bio); | 462 | bio_io_error(old_bio); |
463 | return 0; | ||
464 | } | 463 | } |
465 | 464 | ||
466 | struct switch_request { | 465 | struct switch_request { |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index e133f094ab08..a63b0a2b7805 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
@@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err) | |||
2444 | pkt_bio_finished(pd); | 2444 | pkt_bio_finished(pd); |
2445 | } | 2445 | } |
2446 | 2446 | ||
2447 | static int pkt_make_request(struct request_queue *q, struct bio *bio) | 2447 | static void pkt_make_request(struct request_queue *q, struct bio *bio) |
2448 | { | 2448 | { |
2449 | struct pktcdvd_device *pd; | 2449 | struct pktcdvd_device *pd; |
2450 | char b[BDEVNAME_SIZE]; | 2450 | char b[BDEVNAME_SIZE]; |
@@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2473 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; | 2473 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; |
2474 | pd->stats.secs_r += bio->bi_size >> 9; | 2474 | pd->stats.secs_r += bio->bi_size >> 9; |
2475 | pkt_queue_bio(pd, cloned_bio); | 2475 | pkt_queue_bio(pd, cloned_bio); |
2476 | return 0; | 2476 | return; |
2477 | } | 2477 | } |
2478 | 2478 | ||
2479 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { | 2479 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { |
@@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2509 | pkt_make_request(q, &bp->bio1); | 2509 | pkt_make_request(q, &bp->bio1); |
2510 | pkt_make_request(q, &bp->bio2); | 2510 | pkt_make_request(q, &bp->bio2); |
2511 | bio_pair_release(bp); | 2511 | bio_pair_release(bp); |
2512 | return 0; | 2512 | return; |
2513 | } | 2513 | } |
2514 | } | 2514 | } |
2515 | 2515 | ||
@@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2533 | } | 2533 | } |
2534 | spin_unlock(&pkt->lock); | 2534 | spin_unlock(&pkt->lock); |
2535 | spin_unlock(&pd->cdrw.active_list_lock); | 2535 | spin_unlock(&pd->cdrw.active_list_lock); |
2536 | return 0; | 2536 | return; |
2537 | } else { | 2537 | } else { |
2538 | blocked_bio = 1; | 2538 | blocked_bio = 1; |
2539 | } | 2539 | } |
@@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
2584 | */ | 2584 | */ |
2585 | wake_up(&pd->wqueue); | 2585 | wake_up(&pd->wqueue); |
2586 | } | 2586 | } |
2587 | return 0; | 2587 | return; |
2588 | end_io: | 2588 | end_io: |
2589 | bio_io_error(bio); | 2589 | bio_io_error(bio); |
2590 | return 0; | ||
2591 | } | 2590 | } |
2592 | 2591 | ||
2593 | 2592 | ||
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index b3bdb8af89cf..7fad7af87eb2 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
@@ -596,7 +596,7 @@ out: | |||
596 | return next; | 596 | return next; |
597 | } | 597 | } |
598 | 598 | ||
599 | static int ps3vram_make_request(struct request_queue *q, struct bio *bio) | 599 | static void ps3vram_make_request(struct request_queue *q, struct bio *bio) |
600 | { | 600 | { |
601 | struct ps3_system_bus_device *dev = q->queuedata; | 601 | struct ps3_system_bus_device *dev = q->queuedata; |
602 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); | 602 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); |
@@ -610,13 +610,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio) | |||
610 | spin_unlock_irq(&priv->lock); | 610 | spin_unlock_irq(&priv->lock); |
611 | 611 | ||
612 | if (busy) | 612 | if (busy) |
613 | return 0; | 613 | return; |
614 | 614 | ||
615 | do { | 615 | do { |
616 | bio = ps3vram_do_bio(dev, bio); | 616 | bio = ps3vram_do_bio(dev, bio); |
617 | } while (bio); | 617 | } while (bio); |
618 | |||
619 | return 0; | ||
620 | } | 618 | } |
621 | 619 | ||
622 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | 620 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 031ca720d926..aa2712060bfb 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
@@ -513,7 +513,7 @@ static void process_page(unsigned long data) | |||
513 | } | 513 | } |
514 | } | 514 | } |
515 | 515 | ||
516 | static int mm_make_request(struct request_queue *q, struct bio *bio) | 516 | static void mm_make_request(struct request_queue *q, struct bio *bio) |
517 | { | 517 | { |
518 | struct cardinfo *card = q->queuedata; | 518 | struct cardinfo *card = q->queuedata; |
519 | pr_debug("mm_make_request %llu %u\n", | 519 | pr_debug("mm_make_request %llu %u\n", |
@@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) | |||
525 | card->biotail = &bio->bi_next; | 525 | card->biotail = &bio->bi_next; |
526 | spin_unlock_irq(&card->lock); | 526 | spin_unlock_irq(&card->lock); |
527 | 527 | ||
528 | return 0; | 528 | return; |
529 | } | 529 | } |
530 | 530 | ||
531 | static irqreturn_t mm_interrupt(int irq, void *__card) | 531 | static irqreturn_t mm_interrupt(int irq, void *__card) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 52b39f335bb3..7b986e77b75e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -180,9 +180,6 @@ struct mapped_device { | |||
180 | /* forced geometry settings */ | 180 | /* forced geometry settings */ |
181 | struct hd_geometry geometry; | 181 | struct hd_geometry geometry; |
182 | 182 | ||
183 | /* For saving the address of __make_request for request based dm */ | ||
184 | make_request_fn *saved_make_request_fn; | ||
185 | |||
186 | /* sysfs handle */ | 183 | /* sysfs handle */ |
187 | struct kobject kobj; | 184 | struct kobject kobj; |
188 | 185 | ||
@@ -1391,7 +1388,7 @@ out: | |||
1391 | * The request function that just remaps the bio built up by | 1388 | * The request function that just remaps the bio built up by |
1392 | * dm_merge_bvec. | 1389 | * dm_merge_bvec. |
1393 | */ | 1390 | */ |
1394 | static int _dm_request(struct request_queue *q, struct bio *bio) | 1391 | static void _dm_request(struct request_queue *q, struct bio *bio) |
1395 | { | 1392 | { |
1396 | int rw = bio_data_dir(bio); | 1393 | int rw = bio_data_dir(bio); |
1397 | struct mapped_device *md = q->queuedata; | 1394 | struct mapped_device *md = q->queuedata; |
@@ -1412,19 +1409,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio) | |||
1412 | queue_io(md, bio); | 1409 | queue_io(md, bio); |
1413 | else | 1410 | else |
1414 | bio_io_error(bio); | 1411 | bio_io_error(bio); |
1415 | return 0; | 1412 | return; |
1416 | } | 1413 | } |
1417 | 1414 | ||
1418 | __split_and_process_bio(md, bio); | 1415 | __split_and_process_bio(md, bio); |
1419 | up_read(&md->io_lock); | 1416 | up_read(&md->io_lock); |
1420 | return 0; | 1417 | return; |
1421 | } | ||
1422 | |||
1423 | static int dm_make_request(struct request_queue *q, struct bio *bio) | ||
1424 | { | ||
1425 | struct mapped_device *md = q->queuedata; | ||
1426 | |||
1427 | return md->saved_make_request_fn(q, bio); /* call __make_request() */ | ||
1428 | } | 1418 | } |
1429 | 1419 | ||
1430 | static int dm_request_based(struct mapped_device *md) | 1420 | static int dm_request_based(struct mapped_device *md) |
@@ -1432,14 +1422,14 @@ static int dm_request_based(struct mapped_device *md) | |||
1432 | return blk_queue_stackable(md->queue); | 1422 | return blk_queue_stackable(md->queue); |
1433 | } | 1423 | } |
1434 | 1424 | ||
1435 | static int dm_request(struct request_queue *q, struct bio *bio) | 1425 | static void dm_request(struct request_queue *q, struct bio *bio) |
1436 | { | 1426 | { |
1437 | struct mapped_device *md = q->queuedata; | 1427 | struct mapped_device *md = q->queuedata; |
1438 | 1428 | ||
1439 | if (dm_request_based(md)) | 1429 | if (dm_request_based(md)) |
1440 | return dm_make_request(q, bio); | 1430 | blk_queue_bio(q, bio); |
1441 | 1431 | else | |
1442 | return _dm_request(q, bio); | 1432 | _dm_request(q, bio); |
1443 | } | 1433 | } |
1444 | 1434 | ||
1445 | void dm_dispatch_request(struct request *rq) | 1435 | void dm_dispatch_request(struct request *rq) |
@@ -2172,7 +2162,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) | |||
2172 | return 0; | 2162 | return 0; |
2173 | 2163 | ||
2174 | md->queue = q; | 2164 | md->queue = q; |
2175 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
2176 | dm_init_md_queue(md); | 2165 | dm_init_md_queue(md); |
2177 | blk_queue_softirq_done(md->queue, dm_softirq_done); | 2166 | blk_queue_softirq_done(md->queue, dm_softirq_done); |
2178 | blk_queue_prep_rq(md->queue, dm_prep_fn); | 2167 | blk_queue_prep_rq(md->queue, dm_prep_fn); |
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 23078dabb6df..5ef304d4341c 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
@@ -169,7 +169,7 @@ static void add_sector(conf_t *conf, sector_t start, int mode) | |||
169 | conf->nfaults = n+1; | 169 | conf->nfaults = n+1; |
170 | } | 170 | } |
171 | 171 | ||
172 | static int make_request(mddev_t *mddev, struct bio *bio) | 172 | static void make_request(mddev_t *mddev, struct bio *bio) |
173 | { | 173 | { |
174 | conf_t *conf = mddev->private; | 174 | conf_t *conf = mddev->private; |
175 | int failit = 0; | 175 | int failit = 0; |
@@ -181,7 +181,7 @@ static int make_request(mddev_t *mddev, struct bio *bio) | |||
181 | * just fail immediately | 181 | * just fail immediately |
182 | */ | 182 | */ |
183 | bio_endio(bio, -EIO); | 183 | bio_endio(bio, -EIO); |
184 | return 0; | 184 | return; |
185 | } | 185 | } |
186 | 186 | ||
187 | if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), | 187 | if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), |
@@ -211,15 +211,15 @@ static int make_request(mddev_t *mddev, struct bio *bio) | |||
211 | } | 211 | } |
212 | if (failit) { | 212 | if (failit) { |
213 | struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); | 213 | struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); |
214 | |||
214 | b->bi_bdev = conf->rdev->bdev; | 215 | b->bi_bdev = conf->rdev->bdev; |
215 | b->bi_private = bio; | 216 | b->bi_private = bio; |
216 | b->bi_end_io = faulty_fail; | 217 | b->bi_end_io = faulty_fail; |
217 | generic_make_request(b); | 218 | bio = b; |
218 | return 0; | 219 | } else |
219 | } else { | ||
220 | bio->bi_bdev = conf->rdev->bdev; | 220 | bio->bi_bdev = conf->rdev->bdev; |
221 | return 1; | 221 | |
222 | } | 222 | generic_make_request(bio); |
223 | } | 223 | } |
224 | 224 | ||
225 | static void status(struct seq_file *seq, mddev_t *mddev) | 225 | static void status(struct seq_file *seq, mddev_t *mddev) |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 6cd2c313e800..c6ee491d98e7 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -264,14 +264,14 @@ static int linear_stop (mddev_t *mddev) | |||
264 | return 0; | 264 | return 0; |
265 | } | 265 | } |
266 | 266 | ||
267 | static int linear_make_request (mddev_t *mddev, struct bio *bio) | 267 | static void linear_make_request (mddev_t *mddev, struct bio *bio) |
268 | { | 268 | { |
269 | dev_info_t *tmp_dev; | 269 | dev_info_t *tmp_dev; |
270 | sector_t start_sector; | 270 | sector_t start_sector; |
271 | 271 | ||
272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
273 | md_flush_request(mddev, bio); | 273 | md_flush_request(mddev, bio); |
274 | return 0; | 274 | return; |
275 | } | 275 | } |
276 | 276 | ||
277 | rcu_read_lock(); | 277 | rcu_read_lock(); |
@@ -293,7 +293,7 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) | |||
293 | (unsigned long long)start_sector); | 293 | (unsigned long long)start_sector); |
294 | rcu_read_unlock(); | 294 | rcu_read_unlock(); |
295 | bio_io_error(bio); | 295 | bio_io_error(bio); |
296 | return 0; | 296 | return; |
297 | } | 297 | } |
298 | if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > | 298 | if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > |
299 | tmp_dev->end_sector)) { | 299 | tmp_dev->end_sector)) { |
@@ -307,20 +307,17 @@ static int linear_make_request (mddev_t *mddev, struct bio *bio) | |||
307 | 307 | ||
308 | bp = bio_split(bio, end_sector - bio->bi_sector); | 308 | bp = bio_split(bio, end_sector - bio->bi_sector); |
309 | 309 | ||
310 | if (linear_make_request(mddev, &bp->bio1)) | 310 | linear_make_request(mddev, &bp->bio1); |
311 | generic_make_request(&bp->bio1); | 311 | linear_make_request(mddev, &bp->bio2); |
312 | if (linear_make_request(mddev, &bp->bio2)) | ||
313 | generic_make_request(&bp->bio2); | ||
314 | bio_pair_release(bp); | 312 | bio_pair_release(bp); |
315 | return 0; | 313 | return; |
316 | } | 314 | } |
317 | 315 | ||
318 | bio->bi_bdev = tmp_dev->rdev->bdev; | 316 | bio->bi_bdev = tmp_dev->rdev->bdev; |
319 | bio->bi_sector = bio->bi_sector - start_sector | 317 | bio->bi_sector = bio->bi_sector - start_sector |
320 | + tmp_dev->rdev->data_offset; | 318 | + tmp_dev->rdev->data_offset; |
321 | rcu_read_unlock(); | 319 | rcu_read_unlock(); |
322 | 320 | generic_make_request(bio); | |
323 | return 1; | ||
324 | } | 321 | } |
325 | 322 | ||
326 | static void linear_status (struct seq_file *seq, mddev_t *mddev) | 323 | static void linear_status (struct seq_file *seq, mddev_t *mddev) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 5c95ccb59500..8f52d4eb78a0 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -335,18 +335,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
335 | * call has finished, the bio has been linked into some internal structure | 335 | * call has finished, the bio has been linked into some internal structure |
336 | * and so is visible to ->quiesce(), so we don't need the refcount any more. | 336 | * and so is visible to ->quiesce(), so we don't need the refcount any more. |
337 | */ | 337 | */ |
338 | static int md_make_request(struct request_queue *q, struct bio *bio) | 338 | static void md_make_request(struct request_queue *q, struct bio *bio) |
339 | { | 339 | { |
340 | const int rw = bio_data_dir(bio); | 340 | const int rw = bio_data_dir(bio); |
341 | mddev_t *mddev = q->queuedata; | 341 | mddev_t *mddev = q->queuedata; |
342 | int rv; | ||
343 | int cpu; | 342 | int cpu; |
344 | unsigned int sectors; | 343 | unsigned int sectors; |
345 | 344 | ||
346 | if (mddev == NULL || mddev->pers == NULL | 345 | if (mddev == NULL || mddev->pers == NULL |
347 | || !mddev->ready) { | 346 | || !mddev->ready) { |
348 | bio_io_error(bio); | 347 | bio_io_error(bio); |
349 | return 0; | 348 | return; |
350 | } | 349 | } |
351 | smp_rmb(); /* Ensure implications of 'active' are visible */ | 350 | smp_rmb(); /* Ensure implications of 'active' are visible */ |
352 | rcu_read_lock(); | 351 | rcu_read_lock(); |
@@ -371,7 +370,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
371 | * go away inside make_request | 370 | * go away inside make_request |
372 | */ | 371 | */ |
373 | sectors = bio_sectors(bio); | 372 | sectors = bio_sectors(bio); |
374 | rv = mddev->pers->make_request(mddev, bio); | 373 | mddev->pers->make_request(mddev, bio); |
375 | 374 | ||
376 | cpu = part_stat_lock(); | 375 | cpu = part_stat_lock(); |
377 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 376 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
@@ -380,8 +379,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
380 | 379 | ||
381 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 380 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
382 | wake_up(&mddev->sb_wait); | 381 | wake_up(&mddev->sb_wait); |
383 | |||
384 | return rv; | ||
385 | } | 382 | } |
386 | 383 | ||
387 | /* mddev_suspend makes sure no new requests are submitted | 384 | /* mddev_suspend makes sure no new requests are submitted |
@@ -480,8 +477,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
480 | bio_endio(bio, 0); | 477 | bio_endio(bio, 0); |
481 | else { | 478 | else { |
482 | bio->bi_rw &= ~REQ_FLUSH; | 479 | bio->bi_rw &= ~REQ_FLUSH; |
483 | if (mddev->pers->make_request(mddev, bio)) | 480 | mddev->pers->make_request(mddev, bio); |
484 | generic_make_request(bio); | ||
485 | } | 481 | } |
486 | 482 | ||
487 | mddev->flush_bio = NULL; | 483 | mddev->flush_bio = NULL; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 0a309dc29b45..1509a3eb9ae1 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -424,7 +424,7 @@ struct mdk_personality | |||
424 | int level; | 424 | int level; |
425 | struct list_head list; | 425 | struct list_head list; |
426 | struct module *owner; | 426 | struct module *owner; |
427 | int (*make_request)(mddev_t *mddev, struct bio *bio); | 427 | void (*make_request)(mddev_t *mddev, struct bio *bio); |
428 | int (*run)(mddev_t *mddev); | 428 | int (*run)(mddev_t *mddev); |
429 | int (*stop)(mddev_t *mddev); | 429 | int (*stop)(mddev_t *mddev); |
430 | void (*status)(struct seq_file *seq, mddev_t *mddev); | 430 | void (*status)(struct seq_file *seq, mddev_t *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d5b5fb300171..618dd9e22513 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
107 | } | 107 | } |
108 | 108 | ||
109 | static int multipath_make_request(mddev_t *mddev, struct bio * bio) | 109 | static void multipath_make_request(mddev_t *mddev, struct bio * bio) |
110 | { | 110 | { |
111 | multipath_conf_t *conf = mddev->private; | 111 | multipath_conf_t *conf = mddev->private; |
112 | struct multipath_bh * mp_bh; | 112 | struct multipath_bh * mp_bh; |
@@ -114,7 +114,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
114 | 114 | ||
115 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 115 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
116 | md_flush_request(mddev, bio); | 116 | md_flush_request(mddev, bio); |
117 | return 0; | 117 | return; |
118 | } | 118 | } |
119 | 119 | ||
120 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); | 120 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); |
@@ -126,7 +126,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
126 | if (mp_bh->path < 0) { | 126 | if (mp_bh->path < 0) { |
127 | bio_endio(bio, -EIO); | 127 | bio_endio(bio, -EIO); |
128 | mempool_free(mp_bh, conf->pool); | 128 | mempool_free(mp_bh, conf->pool); |
129 | return 0; | 129 | return; |
130 | } | 130 | } |
131 | multipath = conf->multipaths + mp_bh->path; | 131 | multipath = conf->multipaths + mp_bh->path; |
132 | 132 | ||
@@ -137,7 +137,7 @@ static int multipath_make_request(mddev_t *mddev, struct bio * bio) | |||
137 | mp_bh->bio.bi_end_io = multipath_end_request; | 137 | mp_bh->bio.bi_end_io = multipath_end_request; |
138 | mp_bh->bio.bi_private = mp_bh; | 138 | mp_bh->bio.bi_private = mp_bh; |
139 | generic_make_request(&mp_bh->bio); | 139 | generic_make_request(&mp_bh->bio); |
140 | return 0; | 140 | return; |
141 | } | 141 | } |
142 | 142 | ||
143 | static void multipath_status (struct seq_file *seq, mddev_t *mddev) | 143 | static void multipath_status (struct seq_file *seq, mddev_t *mddev) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index e86bf3682e1e..4066615d61af 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -466,7 +466,7 @@ static inline int is_io_in_chunk_boundary(mddev_t *mddev, | |||
466 | } | 466 | } |
467 | } | 467 | } |
468 | 468 | ||
469 | static int raid0_make_request(mddev_t *mddev, struct bio *bio) | 469 | static void raid0_make_request(mddev_t *mddev, struct bio *bio) |
470 | { | 470 | { |
471 | unsigned int chunk_sects; | 471 | unsigned int chunk_sects; |
472 | sector_t sector_offset; | 472 | sector_t sector_offset; |
@@ -475,7 +475,7 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) | |||
475 | 475 | ||
476 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 476 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
477 | md_flush_request(mddev, bio); | 477 | md_flush_request(mddev, bio); |
478 | return 0; | 478 | return; |
479 | } | 479 | } |
480 | 480 | ||
481 | chunk_sects = mddev->chunk_sectors; | 481 | chunk_sects = mddev->chunk_sectors; |
@@ -495,13 +495,10 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) | |||
495 | else | 495 | else |
496 | bp = bio_split(bio, chunk_sects - | 496 | bp = bio_split(bio, chunk_sects - |
497 | sector_div(sector, chunk_sects)); | 497 | sector_div(sector, chunk_sects)); |
498 | if (raid0_make_request(mddev, &bp->bio1)) | 498 | raid0_make_request(mddev, &bp->bio1); |
499 | generic_make_request(&bp->bio1); | 499 | raid0_make_request(mddev, &bp->bio2); |
500 | if (raid0_make_request(mddev, &bp->bio2)) | ||
501 | generic_make_request(&bp->bio2); | ||
502 | |||
503 | bio_pair_release(bp); | 500 | bio_pair_release(bp); |
504 | return 0; | 501 | return; |
505 | } | 502 | } |
506 | 503 | ||
507 | sector_offset = bio->bi_sector; | 504 | sector_offset = bio->bi_sector; |
@@ -511,10 +508,9 @@ static int raid0_make_request(mddev_t *mddev, struct bio *bio) | |||
511 | bio->bi_bdev = tmp_dev->bdev; | 508 | bio->bi_bdev = tmp_dev->bdev; |
512 | bio->bi_sector = sector_offset + zone->dev_start + | 509 | bio->bi_sector = sector_offset + zone->dev_start + |
513 | tmp_dev->data_offset; | 510 | tmp_dev->data_offset; |
514 | /* | 511 | |
515 | * Let the main block layer submit the IO and resolve recursion: | 512 | generic_make_request(bio); |
516 | */ | 513 | return; |
517 | return 1; | ||
518 | 514 | ||
519 | bad_map: | 515 | bad_map: |
520 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" | 516 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" |
@@ -523,7 +519,7 @@ bad_map: | |||
523 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 519 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
524 | 520 | ||
525 | bio_io_error(bio); | 521 | bio_io_error(bio); |
526 | return 0; | 522 | return; |
527 | } | 523 | } |
528 | 524 | ||
529 | static void raid0_status(struct seq_file *seq, mddev_t *mddev) | 525 | static void raid0_status(struct seq_file *seq, mddev_t *mddev) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index d9587dffe533..2948a520f7ba 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -785,7 +785,7 @@ do_sync_io: | |||
785 | PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 785 | PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); |
786 | } | 786 | } |
787 | 787 | ||
788 | static int make_request(mddev_t *mddev, struct bio * bio) | 788 | static void make_request(mddev_t *mddev, struct bio * bio) |
789 | { | 789 | { |
790 | conf_t *conf = mddev->private; | 790 | conf_t *conf = mddev->private; |
791 | mirror_info_t *mirror; | 791 | mirror_info_t *mirror; |
@@ -870,7 +870,7 @@ read_again: | |||
870 | if (rdisk < 0) { | 870 | if (rdisk < 0) { |
871 | /* couldn't find anywhere to read from */ | 871 | /* couldn't find anywhere to read from */ |
872 | raid_end_bio_io(r1_bio); | 872 | raid_end_bio_io(r1_bio); |
873 | return 0; | 873 | return; |
874 | } | 874 | } |
875 | mirror = conf->mirrors + rdisk; | 875 | mirror = conf->mirrors + rdisk; |
876 | 876 | ||
@@ -928,7 +928,7 @@ read_again: | |||
928 | goto read_again; | 928 | goto read_again; |
929 | } else | 929 | } else |
930 | generic_make_request(read_bio); | 930 | generic_make_request(read_bio); |
931 | return 0; | 931 | return; |
932 | } | 932 | } |
933 | 933 | ||
934 | /* | 934 | /* |
@@ -1123,8 +1123,6 @@ read_again: | |||
1123 | 1123 | ||
1124 | if (do_sync || !bitmap || !plugged) | 1124 | if (do_sync || !bitmap || !plugged) |
1125 | md_wakeup_thread(mddev->thread); | 1125 | md_wakeup_thread(mddev->thread); |
1126 | |||
1127 | return 0; | ||
1128 | } | 1126 | } |
1129 | 1127 | ||
1130 | static void status(struct seq_file *seq, mddev_t *mddev) | 1128 | static void status(struct seq_file *seq, mddev_t *mddev) |
@@ -2174,7 +2172,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2174 | bio->bi_next = NULL; | 2172 | bio->bi_next = NULL; |
2175 | bio->bi_flags &= ~(BIO_POOL_MASK-1); | 2173 | bio->bi_flags &= ~(BIO_POOL_MASK-1); |
2176 | bio->bi_flags |= 1 << BIO_UPTODATE; | 2174 | bio->bi_flags |= 1 << BIO_UPTODATE; |
2177 | bio->bi_comp_cpu = -1; | ||
2178 | bio->bi_rw = READ; | 2175 | bio->bi_rw = READ; |
2179 | bio->bi_vcnt = 0; | 2176 | bio->bi_vcnt = 0; |
2180 | bio->bi_idx = 0; | 2177 | bio->bi_idx = 0; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0cd9672cf9cb..ea5fc0b6a84c 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -830,7 +830,7 @@ static void unfreeze_array(conf_t *conf) | |||
830 | spin_unlock_irq(&conf->resync_lock); | 830 | spin_unlock_irq(&conf->resync_lock); |
831 | } | 831 | } |
832 | 832 | ||
833 | static int make_request(mddev_t *mddev, struct bio * bio) | 833 | static void make_request(mddev_t *mddev, struct bio * bio) |
834 | { | 834 | { |
835 | conf_t *conf = mddev->private; | 835 | conf_t *conf = mddev->private; |
836 | mirror_info_t *mirror; | 836 | mirror_info_t *mirror; |
@@ -849,7 +849,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
849 | 849 | ||
850 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 850 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
851 | md_flush_request(mddev, bio); | 851 | md_flush_request(mddev, bio); |
852 | return 0; | 852 | return; |
853 | } | 853 | } |
854 | 854 | ||
855 | /* If this request crosses a chunk boundary, we need to | 855 | /* If this request crosses a chunk boundary, we need to |
@@ -881,10 +881,8 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
881 | conf->nr_waiting++; | 881 | conf->nr_waiting++; |
882 | spin_unlock_irq(&conf->resync_lock); | 882 | spin_unlock_irq(&conf->resync_lock); |
883 | 883 | ||
884 | if (make_request(mddev, &bp->bio1)) | 884 | make_request(mddev, &bp->bio1); |
885 | generic_make_request(&bp->bio1); | 885 | make_request(mddev, &bp->bio2); |
886 | if (make_request(mddev, &bp->bio2)) | ||
887 | generic_make_request(&bp->bio2); | ||
888 | 886 | ||
889 | spin_lock_irq(&conf->resync_lock); | 887 | spin_lock_irq(&conf->resync_lock); |
890 | conf->nr_waiting--; | 888 | conf->nr_waiting--; |
@@ -892,14 +890,14 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
892 | spin_unlock_irq(&conf->resync_lock); | 890 | spin_unlock_irq(&conf->resync_lock); |
893 | 891 | ||
894 | bio_pair_release(bp); | 892 | bio_pair_release(bp); |
895 | return 0; | 893 | return; |
896 | bad_map: | 894 | bad_map: |
897 | printk("md/raid10:%s: make_request bug: can't convert block across chunks" | 895 | printk("md/raid10:%s: make_request bug: can't convert block across chunks" |
898 | " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, | 896 | " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, |
899 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 897 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
900 | 898 | ||
901 | bio_io_error(bio); | 899 | bio_io_error(bio); |
902 | return 0; | 900 | return; |
903 | } | 901 | } |
904 | 902 | ||
905 | md_write_start(mddev, bio); | 903 | md_write_start(mddev, bio); |
@@ -942,7 +940,7 @@ read_again: | |||
942 | slot = r10_bio->read_slot; | 940 | slot = r10_bio->read_slot; |
943 | if (disk < 0) { | 941 | if (disk < 0) { |
944 | raid_end_bio_io(r10_bio); | 942 | raid_end_bio_io(r10_bio); |
945 | return 0; | 943 | return; |
946 | } | 944 | } |
947 | mirror = conf->mirrors + disk; | 945 | mirror = conf->mirrors + disk; |
948 | 946 | ||
@@ -990,7 +988,7 @@ read_again: | |||
990 | goto read_again; | 988 | goto read_again; |
991 | } else | 989 | } else |
992 | generic_make_request(read_bio); | 990 | generic_make_request(read_bio); |
993 | return 0; | 991 | return; |
994 | } | 992 | } |
995 | 993 | ||
996 | /* | 994 | /* |
@@ -1158,7 +1156,6 @@ retry_write: | |||
1158 | 1156 | ||
1159 | if (do_sync || !mddev->bitmap || !plugged) | 1157 | if (do_sync || !mddev->bitmap || !plugged) |
1160 | md_wakeup_thread(mddev->thread); | 1158 | md_wakeup_thread(mddev->thread); |
1161 | return 0; | ||
1162 | } | 1159 | } |
1163 | 1160 | ||
1164 | static void status(struct seq_file *seq, mddev_t *mddev) | 1161 | static void status(struct seq_file *seq, mddev_t *mddev) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index ac5e8b57e50f..83f2c44e170f 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -3695,7 +3695,7 @@ static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) | |||
3695 | return sh; | 3695 | return sh; |
3696 | } | 3696 | } |
3697 | 3697 | ||
3698 | static int make_request(mddev_t *mddev, struct bio * bi) | 3698 | static void make_request(mddev_t *mddev, struct bio * bi) |
3699 | { | 3699 | { |
3700 | raid5_conf_t *conf = mddev->private; | 3700 | raid5_conf_t *conf = mddev->private; |
3701 | int dd_idx; | 3701 | int dd_idx; |
@@ -3708,7 +3708,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
3708 | 3708 | ||
3709 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { | 3709 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { |
3710 | md_flush_request(mddev, bi); | 3710 | md_flush_request(mddev, bi); |
3711 | return 0; | 3711 | return; |
3712 | } | 3712 | } |
3713 | 3713 | ||
3714 | md_write_start(mddev, bi); | 3714 | md_write_start(mddev, bi); |
@@ -3716,7 +3716,7 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
3716 | if (rw == READ && | 3716 | if (rw == READ && |
3717 | mddev->reshape_position == MaxSector && | 3717 | mddev->reshape_position == MaxSector && |
3718 | chunk_aligned_read(mddev,bi)) | 3718 | chunk_aligned_read(mddev,bi)) |
3719 | return 0; | 3719 | return; |
3720 | 3720 | ||
3721 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 3721 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
3722 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 3722 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
@@ -3851,8 +3851,6 @@ static int make_request(mddev_t *mddev, struct bio * bi) | |||
3851 | 3851 | ||
3852 | bio_endio(bi, 0); | 3852 | bio_endio(bi, 0); |
3853 | } | 3853 | } |
3854 | |||
3855 | return 0; | ||
3856 | } | 3854 | } |
3857 | 3855 | ||
3858 | static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); | 3856 | static sector_t raid5_size(mddev_t *mddev, sector_t sectors, int raid_disks); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9b43ae94beba..a5a55da2a1ac 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -27,7 +27,7 @@ | |||
27 | 27 | ||
28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); | 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); |
29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); |
30 | static int dcssblk_make_request(struct request_queue *q, struct bio *bio); | 30 | static void dcssblk_make_request(struct request_queue *q, struct bio *bio); |
31 | static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, | 31 | static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, |
32 | void **kaddr, unsigned long *pfn); | 32 | void **kaddr, unsigned long *pfn); |
33 | 33 | ||
@@ -814,7 +814,7 @@ out: | |||
814 | return rc; | 814 | return rc; |
815 | } | 815 | } |
816 | 816 | ||
817 | static int | 817 | static void |
818 | dcssblk_make_request(struct request_queue *q, struct bio *bio) | 818 | dcssblk_make_request(struct request_queue *q, struct bio *bio) |
819 | { | 819 | { |
820 | struct dcssblk_dev_info *dev_info; | 820 | struct dcssblk_dev_info *dev_info; |
@@ -871,10 +871,9 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
871 | bytes_done += bvec->bv_len; | 871 | bytes_done += bvec->bv_len; |
872 | } | 872 | } |
873 | bio_endio(bio, 0); | 873 | bio_endio(bio, 0); |
874 | return 0; | 874 | return; |
875 | fail: | 875 | fail: |
876 | bio_io_error(bio); | 876 | bio_io_error(bio); |
877 | return 0; | ||
878 | } | 877 | } |
879 | 878 | ||
880 | static int | 879 | static int |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 1f6a4d894e73..98f3e4ade924 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void) | |||
181 | /* | 181 | /* |
182 | * Block device make request function. | 182 | * Block device make request function. |
183 | */ | 183 | */ |
184 | static int xpram_make_request(struct request_queue *q, struct bio *bio) | 184 | static void xpram_make_request(struct request_queue *q, struct bio *bio) |
185 | { | 185 | { |
186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; | 186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; |
187 | struct bio_vec *bvec; | 187 | struct bio_vec *bvec; |
@@ -221,10 +221,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio) | |||
221 | } | 221 | } |
222 | set_bit(BIO_UPTODATE, &bio->bi_flags); | 222 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
223 | bio_endio(bio, 0); | 223 | bio_endio(bio, 0); |
224 | return 0; | 224 | return; |
225 | fail: | 225 | fail: |
226 | bio_io_error(bio); | 226 | bio_io_error(bio); |
227 | return 0; | ||
228 | } | 227 | } |
229 | 228 | ||
230 | static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 229 | static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index d70ec1ad10de..02589cab6710 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -556,24 +556,22 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) | |||
556 | /* | 556 | /* |
557 | * Handler function for all zram I/O requests. | 557 | * Handler function for all zram I/O requests. |
558 | */ | 558 | */ |
559 | static int zram_make_request(struct request_queue *queue, struct bio *bio) | 559 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
560 | { | 560 | { |
561 | struct zram *zram = queue->queuedata; | 561 | struct zram *zram = queue->queuedata; |
562 | 562 | ||
563 | if (!valid_io_request(zram, bio)) { | 563 | if (!valid_io_request(zram, bio)) { |
564 | zram_stat64_inc(zram, &zram->stats.invalid_io); | 564 | zram_stat64_inc(zram, &zram->stats.invalid_io); |
565 | bio_io_error(bio); | 565 | bio_io_error(bio); |
566 | return 0; | 566 | return; |
567 | } | 567 | } |
568 | 568 | ||
569 | if (unlikely(!zram->init_done) && zram_init_device(zram)) { | 569 | if (unlikely(!zram->init_done) && zram_init_device(zram)) { |
570 | bio_io_error(bio); | 570 | bio_io_error(bio); |
571 | return 0; | 571 | return; |
572 | } | 572 | } |
573 | 573 | ||
574 | __zram_make_request(zram, bio, bio_data_dir(bio)); | 574 | __zram_make_request(zram, bio, bio_data_dir(bio)); |
575 | |||
576 | return 0; | ||
577 | } | 575 | } |
578 | 576 | ||
579 | void zram_reset_device(struct zram *zram) | 577 | void zram_reset_device(struct zram *zram) |
@@ -255,7 +255,6 @@ void bio_init(struct bio *bio) | |||
255 | { | 255 | { |
256 | memset(bio, 0, sizeof(*bio)); | 256 | memset(bio, 0, sizeof(*bio)); |
257 | bio->bi_flags = 1 << BIO_UPTODATE; | 257 | bio->bi_flags = 1 << BIO_UPTODATE; |
258 | bio->bi_comp_cpu = -1; | ||
259 | atomic_set(&bio->bi_cnt, 1); | 258 | atomic_set(&bio->bi_cnt, 1); |
260 | } | 259 | } |
261 | EXPORT_SYMBOL(bio_init); | 260 | EXPORT_SYMBOL(bio_init); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index ce33e6868a2f..a3c071c9e189 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -269,14 +269,6 @@ extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); | |||
269 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 269 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
270 | 270 | ||
271 | /* | 271 | /* |
272 | * Allow queuer to specify a completion CPU for this bio | ||
273 | */ | ||
274 | static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) | ||
275 | { | ||
276 | bio->bi_comp_cpu = cpu; | ||
277 | } | ||
278 | |||
279 | /* | ||
280 | * bio_set is used to allow other portions of the IO system to | 272 | * bio_set is used to allow other portions of the IO system to |
281 | * allocate their own private memory pools for bio and iovec structures. | 273 | * allocate their own private memory pools for bio and iovec structures. |
282 | * These memory pools in turn all allocate from the bio_slab | 274 | * These memory pools in turn all allocate from the bio_slab |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 71fc53bb8f1c..4053cbd4490e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -59,8 +59,6 @@ struct bio { | |||
59 | 59 | ||
60 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ | 60 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ |
61 | 61 | ||
62 | unsigned int bi_comp_cpu; /* completion CPU */ | ||
63 | |||
64 | atomic_t bi_cnt; /* pin count */ | 62 | atomic_t bi_cnt; /* pin count */ |
65 | 63 | ||
66 | struct bio_vec *bi_io_vec; /* the actual vec list */ | 64 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
@@ -93,11 +91,10 @@ struct bio { | |||
93 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | 91 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ |
94 | #define BIO_USER_MAPPED 6 /* contains user pages */ | 92 | #define BIO_USER_MAPPED 6 /* contains user pages */ |
95 | #define BIO_EOPNOTSUPP 7 /* not supported */ | 93 | #define BIO_EOPNOTSUPP 7 /* not supported */ |
96 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ | 94 | #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ |
97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | 95 | #define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ |
98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | 96 | #define BIO_QUIET 10 /* Make BIO Quiet */ |
99 | #define BIO_QUIET 11 /* Make BIO Quiet */ | 97 | #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */ |
100 | #define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */ | ||
101 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | 98 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) |
102 | 99 | ||
103 | /* | 100 | /* |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7fbaa9103344..5267cd2f20dc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -195,7 +195,7 @@ struct request_pm_state | |||
195 | #include <linux/elevator.h> | 195 | #include <linux/elevator.h> |
196 | 196 | ||
197 | typedef void (request_fn_proc) (struct request_queue *q); | 197 | typedef void (request_fn_proc) (struct request_queue *q); |
198 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 198 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); |
199 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 199 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
200 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 200 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
201 | 201 | ||
@@ -680,6 +680,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
680 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 680 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
681 | struct scsi_ioctl_command __user *); | 681 | struct scsi_ioctl_command __user *); |
682 | 682 | ||
683 | extern void blk_queue_bio(struct request_queue *q, struct bio *bio); | ||
684 | |||
683 | /* | 685 | /* |
684 | * A queue has just exitted congestion. Note this in the global counter of | 686 | * A queue has just exitted congestion. Note this in the global counter of |
685 | * congested queues, and wake up anyone who was waiting for requests to be | 687 | * congested queues, and wake up anyone who was waiting for requests to be |
@@ -863,16 +865,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int); | |||
863 | extern void blk_put_queue(struct request_queue *); | 865 | extern void blk_put_queue(struct request_queue *); |
864 | 866 | ||
865 | /* | 867 | /* |
866 | * Note: Code in between changing the blk_plug list/cb_list or element of such | 868 | * blk_plug permits building a queue of related requests by holding the I/O |
867 | * lists is preemptable, but such code can't do sleep (or be very careful), | 869 | * fragments for a short period. This allows merging of sequential requests |
868 | * otherwise data is corrupted. For details, please check schedule() where | 870 | * into single larger request. As the requests are moved from a per-task list to |
869 | * blk_schedule_flush_plug() is called. | 871 | * the device's request_queue in a batch, this results in improved scalability |
872 | * as the lock contention for request_queue lock is reduced. | ||
873 | * | ||
874 | * It is ok not to disable preemption when adding the request to the plug list | ||
875 | * or when attempting a merge, because blk_schedule_flush_list() will only flush | ||
876 | * the plug list when the task sleeps by itself. For details, please see | ||
877 | * schedule() where blk_schedule_flush_plug() is called. | ||
870 | */ | 878 | */ |
871 | struct blk_plug { | 879 | struct blk_plug { |
872 | unsigned long magic; | 880 | unsigned long magic; /* detect uninitialized use-cases */ |
873 | struct list_head list; | 881 | struct list_head list; /* requests */ |
874 | struct list_head cb_list; | 882 | struct list_head cb_list; /* md requires an unplug callback */ |
875 | unsigned int should_sort; | 883 | unsigned int should_sort; /* list to be sorted before flushing? */ |
876 | }; | 884 | }; |
877 | #define BLK_MAX_REQUEST_COUNT 16 | 885 | #define BLK_MAX_REQUEST_COUNT 16 |
878 | 886 | ||
@@ -1189,20 +1197,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1189 | } | 1197 | } |
1190 | #endif | 1198 | #endif |
1191 | 1199 | ||
1192 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
1193 | extern int blk_throtl_init(struct request_queue *q); | ||
1194 | extern void blk_throtl_exit(struct request_queue *q); | ||
1195 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
1196 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
1197 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
1198 | { | ||
1199 | return 0; | ||
1200 | } | ||
1201 | |||
1202 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
1203 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
1204 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
1205 | |||
1206 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1200 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1207 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1201 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1208 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1202 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index d800d5142184..1d0f7a2ff73b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -38,6 +38,12 @@ struct elevator_ops | |||
38 | elevator_merged_fn *elevator_merged_fn; | 38 | elevator_merged_fn *elevator_merged_fn; |
39 | elevator_merge_req_fn *elevator_merge_req_fn; | 39 | elevator_merge_req_fn *elevator_merge_req_fn; |
40 | elevator_allow_merge_fn *elevator_allow_merge_fn; | 40 | elevator_allow_merge_fn *elevator_allow_merge_fn; |
41 | |||
42 | /* | ||
43 | * Used for both plugged list and elevator merging and in the | ||
44 | * former case called without queue_lock. Read comment on top of | ||
45 | * attempt_plug_merge() for details. | ||
46 | */ | ||
41 | elevator_bio_merged_fn *elevator_bio_merged_fn; | 47 | elevator_bio_merged_fn *elevator_bio_merged_fn; |
42 | 48 | ||
43 | elevator_dispatch_fn *elevator_dispatch_fn; | 49 | elevator_dispatch_fn *elevator_dispatch_fn; |
diff --git a/kernel/sys.c b/kernel/sys.c index 18ee1d2f6474..1dbbe695a5ef 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -1172,7 +1172,7 @@ DECLARE_RWSEM(uts_sem); | |||
1172 | static int override_release(char __user *release, int len) | 1172 | static int override_release(char __user *release, int len) |
1173 | { | 1173 | { |
1174 | int ret = 0; | 1174 | int ret = 0; |
1175 | char buf[len]; | 1175 | char buf[65]; |
1176 | 1176 | ||
1177 | if (current->personality & UNAME26) { | 1177 | if (current->personality & UNAME26) { |
1178 | char *rest = UTS_RELEASE; | 1178 | char *rest = UTS_RELEASE; |
diff --git a/mm/bounce.c b/mm/bounce.c index 1481de68184b..434fb4f0c5e4 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/hash.h> | 15 | #include <linux/hash.h> |
16 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
17 | #include <linux/bootmem.h> | ||
17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
18 | 19 | ||
19 | #include <trace/events/block.h> | 20 | #include <trace/events/block.h> |
@@ -26,12 +27,10 @@ static mempool_t *page_pool, *isa_page_pool; | |||
26 | #ifdef CONFIG_HIGHMEM | 27 | #ifdef CONFIG_HIGHMEM |
27 | static __init int init_emergency_pool(void) | 28 | static __init int init_emergency_pool(void) |
28 | { | 29 | { |
29 | struct sysinfo i; | 30 | #ifndef CONFIG_MEMORY_HOTPLUG |
30 | si_meminfo(&i); | 31 | if (max_pfn <= max_low_pfn) |
31 | si_swapinfo(&i); | ||
32 | |||
33 | if (!i.totalhigh) | ||
34 | return 0; | 32 | return 0; |
33 | #endif | ||
35 | 34 | ||
36 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); | 35 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); |
37 | BUG_ON(!page_pool); | 36 | BUG_ON(!page_pool); |