diff options
42 files changed, 587 insertions, 621 deletions
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c index 48e50f8c1c7e..e3011338ab40 100644 --- a/arch/m68k/emu/nfblock.c +++ b/arch/m68k/emu/nfblock.c | |||
| @@ -59,7 +59,7 @@ struct nfhd_device { | |||
| 59 | struct gendisk *disk; | 59 | struct gendisk *disk; |
| 60 | }; | 60 | }; |
| 61 | 61 | ||
| 62 | static int nfhd_make_request(struct request_queue *queue, struct bio *bio) | 62 | static void nfhd_make_request(struct request_queue *queue, struct bio *bio) |
| 63 | { | 63 | { |
| 64 | struct nfhd_device *dev = queue->queuedata; | 64 | struct nfhd_device *dev = queue->queuedata; |
| 65 | struct bio_vec *bvec; | 65 | struct bio_vec *bvec; |
| @@ -76,7 +76,6 @@ static int nfhd_make_request(struct request_queue *queue, struct bio *bio) | |||
| 76 | sec += len; | 76 | sec += len; |
| 77 | } | 77 | } |
| 78 | bio_endio(bio, 0); | 78 | bio_endio(bio, 0); |
| 79 | return 0; | ||
| 80 | } | 79 | } |
| 81 | 80 | ||
| 82 | static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 81 | static int nfhd_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 265f0f09395a..ba4271919062 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
| @@ -104,7 +104,7 @@ axon_ram_irq_handler(int irq, void *dev) | |||
| 104 | * axon_ram_make_request - make_request() method for block device | 104 | * axon_ram_make_request - make_request() method for block device |
| 105 | * @queue, @bio: see blk_queue_make_request() | 105 | * @queue, @bio: see blk_queue_make_request() |
| 106 | */ | 106 | */ |
| 107 | static int | 107 | static void |
| 108 | axon_ram_make_request(struct request_queue *queue, struct bio *bio) | 108 | axon_ram_make_request(struct request_queue *queue, struct bio *bio) |
| 109 | { | 109 | { |
| 110 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; | 110 | struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; |
| @@ -113,7 +113,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
| 113 | struct bio_vec *vec; | 113 | struct bio_vec *vec; |
| 114 | unsigned int transfered; | 114 | unsigned int transfered; |
| 115 | unsigned short idx; | 115 | unsigned short idx; |
| 116 | int rc = 0; | ||
| 117 | 116 | ||
| 118 | phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); | 117 | phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); |
| 119 | phys_end = bank->io_addr + bank->size; | 118 | phys_end = bank->io_addr + bank->size; |
| @@ -121,8 +120,7 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
| 121 | bio_for_each_segment(vec, bio, idx) { | 120 | bio_for_each_segment(vec, bio, idx) { |
| 122 | if (unlikely(phys_mem + vec->bv_len > phys_end)) { | 121 | if (unlikely(phys_mem + vec->bv_len > phys_end)) { |
| 123 | bio_io_error(bio); | 122 | bio_io_error(bio); |
| 124 | rc = -ERANGE; | 123 | return; |
| 125 | break; | ||
| 126 | } | 124 | } |
| 127 | 125 | ||
| 128 | user_mem = page_address(vec->bv_page) + vec->bv_offset; | 126 | user_mem = page_address(vec->bv_page) + vec->bv_offset; |
| @@ -135,8 +133,6 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio) | |||
| 135 | transfered += vec->bv_len; | 133 | transfered += vec->bv_len; |
| 136 | } | 134 | } |
| 137 | bio_endio(bio, 0); | 135 | bio_endio(bio, 0); |
| 138 | |||
| 139 | return rc; | ||
| 140 | } | 136 | } |
| 141 | 137 | ||
| 142 | /** | 138 | /** |
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index b596e54ddd71..8f630cec906e 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
| @@ -768,25 +768,14 @@ static uint64_t blkio_get_stat(struct blkio_group *blkg, | |||
| 768 | return disk_total; | 768 | return disk_total; |
| 769 | } | 769 | } |
| 770 | 770 | ||
| 771 | static int blkio_check_dev_num(dev_t dev) | ||
| 772 | { | ||
| 773 | int part = 0; | ||
| 774 | struct gendisk *disk; | ||
| 775 | |||
| 776 | disk = get_gendisk(dev, &part); | ||
| 777 | if (!disk || part) | ||
| 778 | return -ENODEV; | ||
| 779 | |||
| 780 | return 0; | ||
| 781 | } | ||
| 782 | |||
| 783 | static int blkio_policy_parse_and_set(char *buf, | 771 | static int blkio_policy_parse_and_set(char *buf, |
| 784 | struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) | 772 | struct blkio_policy_node *newpn, enum blkio_policy_id plid, int fileid) |
| 785 | { | 773 | { |
| 774 | struct gendisk *disk = NULL; | ||
| 786 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; | 775 | char *s[4], *p, *major_s = NULL, *minor_s = NULL; |
| 787 | int ret; | ||
| 788 | unsigned long major, minor; | 776 | unsigned long major, minor; |
| 789 | int i = 0; | 777 | int i = 0, ret = -EINVAL; |
| 778 | int part; | ||
| 790 | dev_t dev; | 779 | dev_t dev; |
| 791 | u64 temp; | 780 | u64 temp; |
| 792 | 781 | ||
| @@ -804,37 +793,36 @@ static int blkio_policy_parse_and_set(char *buf, | |||
| 804 | } | 793 | } |
| 805 | 794 | ||
| 806 | if (i != 2) | 795 | if (i != 2) |
| 807 | return -EINVAL; | 796 | goto out; |
| 808 | 797 | ||
| 809 | p = strsep(&s[0], ":"); | 798 | p = strsep(&s[0], ":"); |
| 810 | if (p != NULL) | 799 | if (p != NULL) |
| 811 | major_s = p; | 800 | major_s = p; |
| 812 | else | 801 | else |
| 813 | return -EINVAL; | 802 | goto out; |
| 814 | 803 | ||
| 815 | minor_s = s[0]; | 804 | minor_s = s[0]; |
| 816 | if (!minor_s) | 805 | if (!minor_s) |
| 817 | return -EINVAL; | 806 | goto out; |
| 818 | 807 | ||
| 819 | ret = strict_strtoul(major_s, 10, &major); | 808 | if (strict_strtoul(major_s, 10, &major)) |
| 820 | if (ret) | 809 | goto out; |
| 821 | return -EINVAL; | ||
| 822 | 810 | ||
| 823 | ret = strict_strtoul(minor_s, 10, &minor); | 811 | if (strict_strtoul(minor_s, 10, &minor)) |
| 824 | if (ret) | 812 | goto out; |
| 825 | return -EINVAL; | ||
| 826 | 813 | ||
| 827 | dev = MKDEV(major, minor); | 814 | dev = MKDEV(major, minor); |
| 828 | 815 | ||
| 829 | ret = strict_strtoull(s[1], 10, &temp); | 816 | if (strict_strtoull(s[1], 10, &temp)) |
| 830 | if (ret) | 817 | goto out; |
| 831 | return -EINVAL; | ||
| 832 | 818 | ||
| 833 | /* For rule removal, do not check for device presence. */ | 819 | /* For rule removal, do not check for device presence. */ |
| 834 | if (temp) { | 820 | if (temp) { |
| 835 | ret = blkio_check_dev_num(dev); | 821 | disk = get_gendisk(dev, &part); |
| 836 | if (ret) | 822 | if (!disk || part) { |
| 837 | return ret; | 823 | ret = -ENODEV; |
| 824 | goto out; | ||
| 825 | } | ||
| 838 | } | 826 | } |
| 839 | 827 | ||
| 840 | newpn->dev = dev; | 828 | newpn->dev = dev; |
| @@ -843,7 +831,7 @@ static int blkio_policy_parse_and_set(char *buf, | |||
| 843 | case BLKIO_POLICY_PROP: | 831 | case BLKIO_POLICY_PROP: |
| 844 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || | 832 | if ((temp < BLKIO_WEIGHT_MIN && temp > 0) || |
| 845 | temp > BLKIO_WEIGHT_MAX) | 833 | temp > BLKIO_WEIGHT_MAX) |
| 846 | return -EINVAL; | 834 | goto out; |
| 847 | 835 | ||
| 848 | newpn->plid = plid; | 836 | newpn->plid = plid; |
| 849 | newpn->fileid = fileid; | 837 | newpn->fileid = fileid; |
| @@ -860,7 +848,7 @@ static int blkio_policy_parse_and_set(char *buf, | |||
| 860 | case BLKIO_THROTL_read_iops_device: | 848 | case BLKIO_THROTL_read_iops_device: |
| 861 | case BLKIO_THROTL_write_iops_device: | 849 | case BLKIO_THROTL_write_iops_device: |
| 862 | if (temp > THROTL_IOPS_MAX) | 850 | if (temp > THROTL_IOPS_MAX) |
| 863 | return -EINVAL; | 851 | goto out; |
| 864 | 852 | ||
| 865 | newpn->plid = plid; | 853 | newpn->plid = plid; |
| 866 | newpn->fileid = fileid; | 854 | newpn->fileid = fileid; |
| @@ -871,68 +859,96 @@ static int blkio_policy_parse_and_set(char *buf, | |||
| 871 | default: | 859 | default: |
| 872 | BUG(); | 860 | BUG(); |
| 873 | } | 861 | } |
| 874 | 862 | ret = 0; | |
| 875 | return 0; | 863 | out: |
| 864 | put_disk(disk); | ||
| 865 | return ret; | ||
| 876 | } | 866 | } |
| 877 | 867 | ||
| 878 | unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, | 868 | unsigned int blkcg_get_weight(struct blkio_cgroup *blkcg, |
| 879 | dev_t dev) | 869 | dev_t dev) |
| 880 | { | 870 | { |
| 881 | struct blkio_policy_node *pn; | 871 | struct blkio_policy_node *pn; |
| 872 | unsigned long flags; | ||
| 873 | unsigned int weight; | ||
| 874 | |||
| 875 | spin_lock_irqsave(&blkcg->lock, flags); | ||
| 882 | 876 | ||
| 883 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP, | 877 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_PROP, |
| 884 | BLKIO_PROP_weight_device); | 878 | BLKIO_PROP_weight_device); |
| 885 | if (pn) | 879 | if (pn) |
| 886 | return pn->val.weight; | 880 | weight = pn->val.weight; |
| 887 | else | 881 | else |
| 888 | return blkcg->weight; | 882 | weight = blkcg->weight; |
| 883 | |||
| 884 | spin_unlock_irqrestore(&blkcg->lock, flags); | ||
| 885 | |||
| 886 | return weight; | ||
| 889 | } | 887 | } |
| 890 | EXPORT_SYMBOL_GPL(blkcg_get_weight); | 888 | EXPORT_SYMBOL_GPL(blkcg_get_weight); |
| 891 | 889 | ||
| 892 | uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev) | 890 | uint64_t blkcg_get_read_bps(struct blkio_cgroup *blkcg, dev_t dev) |
| 893 | { | 891 | { |
| 894 | struct blkio_policy_node *pn; | 892 | struct blkio_policy_node *pn; |
| 893 | unsigned long flags; | ||
| 894 | uint64_t bps = -1; | ||
| 895 | 895 | ||
| 896 | spin_lock_irqsave(&blkcg->lock, flags); | ||
| 896 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, | 897 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
| 897 | BLKIO_THROTL_read_bps_device); | 898 | BLKIO_THROTL_read_bps_device); |
| 898 | if (pn) | 899 | if (pn) |
| 899 | return pn->val.bps; | 900 | bps = pn->val.bps; |
| 900 | else | 901 | spin_unlock_irqrestore(&blkcg->lock, flags); |
| 901 | return -1; | 902 | |
| 903 | return bps; | ||
| 902 | } | 904 | } |
| 903 | 905 | ||
| 904 | uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev) | 906 | uint64_t blkcg_get_write_bps(struct blkio_cgroup *blkcg, dev_t dev) |
| 905 | { | 907 | { |
| 906 | struct blkio_policy_node *pn; | 908 | struct blkio_policy_node *pn; |
| 909 | unsigned long flags; | ||
| 910 | uint64_t bps = -1; | ||
| 911 | |||
| 912 | spin_lock_irqsave(&blkcg->lock, flags); | ||
| 907 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, | 913 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
| 908 | BLKIO_THROTL_write_bps_device); | 914 | BLKIO_THROTL_write_bps_device); |
| 909 | if (pn) | 915 | if (pn) |
| 910 | return pn->val.bps; | 916 | bps = pn->val.bps; |
| 911 | else | 917 | spin_unlock_irqrestore(&blkcg->lock, flags); |
| 912 | return -1; | 918 | |
| 919 | return bps; | ||
| 913 | } | 920 | } |
| 914 | 921 | ||
| 915 | unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev) | 922 | unsigned int blkcg_get_read_iops(struct blkio_cgroup *blkcg, dev_t dev) |
| 916 | { | 923 | { |
| 917 | struct blkio_policy_node *pn; | 924 | struct blkio_policy_node *pn; |
| 925 | unsigned long flags; | ||
| 926 | unsigned int iops = -1; | ||
| 918 | 927 | ||
| 928 | spin_lock_irqsave(&blkcg->lock, flags); | ||
| 919 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, | 929 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
| 920 | BLKIO_THROTL_read_iops_device); | 930 | BLKIO_THROTL_read_iops_device); |
| 921 | if (pn) | 931 | if (pn) |
| 922 | return pn->val.iops; | 932 | iops = pn->val.iops; |
| 923 | else | 933 | spin_unlock_irqrestore(&blkcg->lock, flags); |
| 924 | return -1; | 934 | |
| 935 | return iops; | ||
| 925 | } | 936 | } |
| 926 | 937 | ||
| 927 | unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev) | 938 | unsigned int blkcg_get_write_iops(struct blkio_cgroup *blkcg, dev_t dev) |
| 928 | { | 939 | { |
| 929 | struct blkio_policy_node *pn; | 940 | struct blkio_policy_node *pn; |
| 941 | unsigned long flags; | ||
| 942 | unsigned int iops = -1; | ||
| 943 | |||
| 944 | spin_lock_irqsave(&blkcg->lock, flags); | ||
| 930 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, | 945 | pn = blkio_policy_search_node(blkcg, dev, BLKIO_POLICY_THROTL, |
| 931 | BLKIO_THROTL_write_iops_device); | 946 | BLKIO_THROTL_write_iops_device); |
| 932 | if (pn) | 947 | if (pn) |
| 933 | return pn->val.iops; | 948 | iops = pn->val.iops; |
| 934 | else | 949 | spin_unlock_irqrestore(&blkcg->lock, flags); |
| 935 | return -1; | 950 | |
| 951 | return iops; | ||
| 936 | } | 952 | } |
| 937 | 953 | ||
| 938 | /* Checks whether user asked for deleting a policy rule */ | 954 | /* Checks whether user asked for deleting a policy rule */ |
| @@ -1085,6 +1101,7 @@ static int blkiocg_file_write(struct cgroup *cgrp, struct cftype *cft, | |||
| 1085 | 1101 | ||
| 1086 | if (blkio_delete_rule_command(newpn)) { | 1102 | if (blkio_delete_rule_command(newpn)) { |
| 1087 | blkio_policy_delete_node(pn); | 1103 | blkio_policy_delete_node(pn); |
| 1104 | kfree(pn); | ||
| 1088 | spin_unlock_irq(&blkcg->lock); | 1105 | spin_unlock_irq(&blkcg->lock); |
| 1089 | goto update_io_group; | 1106 | goto update_io_group; |
| 1090 | } | 1107 | } |
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h index a71d2904ffb9..6f3ace7e792f 100644 --- a/block/blk-cgroup.h +++ b/block/blk-cgroup.h | |||
| @@ -188,7 +188,7 @@ struct blkio_policy_node { | |||
| 188 | union { | 188 | union { |
| 189 | unsigned int weight; | 189 | unsigned int weight; |
| 190 | /* | 190 | /* |
| 191 | * Rate read/write in terms of byptes per second | 191 | * Rate read/write in terms of bytes per second |
| 192 | * Whether this rate represents read or write is determined | 192 | * Whether this rate represents read or write is determined |
| 193 | * by file type "fileid". | 193 | * by file type "fileid". |
| 194 | */ | 194 | */ |
diff --git a/block/blk-core.c b/block/blk-core.c index d34433ae7917..f43c8a5840ae 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
| 29 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
| 30 | #include <linux/list_sort.h> | 30 | #include <linux/list_sort.h> |
| 31 | #include <linux/delay.h> | ||
| 31 | 32 | ||
| 32 | #define CREATE_TRACE_POINTS | 33 | #define CREATE_TRACE_POINTS |
| 33 | #include <trace/events/block.h> | 34 | #include <trace/events/block.h> |
| @@ -38,8 +39,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); | |||
| 38 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); | 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_remap); |
| 39 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); | 40 | EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_complete); |
| 40 | 41 | ||
| 41 | static int __make_request(struct request_queue *q, struct bio *bio); | ||
| 42 | |||
| 43 | /* | 42 | /* |
| 44 | * For the allocated request tables | 43 | * For the allocated request tables |
| 45 | */ | 44 | */ |
| @@ -347,30 +346,80 @@ void blk_put_queue(struct request_queue *q) | |||
| 347 | } | 346 | } |
| 348 | EXPORT_SYMBOL(blk_put_queue); | 347 | EXPORT_SYMBOL(blk_put_queue); |
| 349 | 348 | ||
| 350 | /* | 349 | /** |
| 351 | * Note: If a driver supplied the queue lock, it is disconnected | 350 | * blk_drain_queue - drain requests from request_queue |
| 352 | * by this function. The actual state of the lock doesn't matter | 351 | * @q: queue to drain |
| 353 | * here as the request_queue isn't accessible after this point | 352 | * @drain_all: whether to drain all requests or only the ones w/ ELVPRIV |
| 354 | * (QUEUE_FLAG_DEAD is set) and no other requests will be queued. | 353 | * |
| 354 | * Drain requests from @q. If @drain_all is set, all requests are drained. | ||
| 355 | * If not, only ELVPRIV requests are drained. The caller is responsible | ||
| 356 | * for ensuring that no new requests which need to be drained are queued. | ||
| 357 | */ | ||
| 358 | void blk_drain_queue(struct request_queue *q, bool drain_all) | ||
| 359 | { | ||
| 360 | while (true) { | ||
| 361 | int nr_rqs; | ||
| 362 | |||
| 363 | spin_lock_irq(q->queue_lock); | ||
| 364 | |||
| 365 | elv_drain_elevator(q); | ||
| 366 | if (drain_all) | ||
| 367 | blk_throtl_drain(q); | ||
| 368 | |||
| 369 | __blk_run_queue(q); | ||
| 370 | |||
| 371 | if (drain_all) | ||
| 372 | nr_rqs = q->rq.count[0] + q->rq.count[1]; | ||
| 373 | else | ||
| 374 | nr_rqs = q->rq.elvpriv; | ||
| 375 | |||
| 376 | spin_unlock_irq(q->queue_lock); | ||
| 377 | |||
| 378 | if (!nr_rqs) | ||
| 379 | break; | ||
| 380 | msleep(10); | ||
| 381 | } | ||
| 382 | } | ||
| 383 | |||
| 384 | /** | ||
| 385 | * blk_cleanup_queue - shutdown a request queue | ||
| 386 | * @q: request queue to shutdown | ||
| 387 | * | ||
| 388 | * Mark @q DEAD, drain all pending requests, destroy and put it. All | ||
| 389 | * future requests will be failed immediately with -ENODEV. | ||
| 355 | */ | 390 | */ |
| 356 | void blk_cleanup_queue(struct request_queue *q) | 391 | void blk_cleanup_queue(struct request_queue *q) |
| 357 | { | 392 | { |
| 358 | /* | 393 | spinlock_t *lock = q->queue_lock; |
| 359 | * We know we have process context here, so we can be a little | ||
| 360 | * cautious and ensure that pending block actions on this device | ||
| 361 | * are done before moving on. Going into this function, we should | ||
| 362 | * not have processes doing IO to this device. | ||
| 363 | */ | ||
| 364 | blk_sync_queue(q); | ||
| 365 | 394 | ||
| 366 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | 395 | /* mark @q DEAD, no new request or merges will be allowed afterwards */ |
| 367 | mutex_lock(&q->sysfs_lock); | 396 | mutex_lock(&q->sysfs_lock); |
| 368 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); | 397 | queue_flag_set_unlocked(QUEUE_FLAG_DEAD, q); |
| 369 | mutex_unlock(&q->sysfs_lock); | 398 | |
| 399 | spin_lock_irq(lock); | ||
| 400 | queue_flag_set(QUEUE_FLAG_NOMERGES, q); | ||
| 401 | queue_flag_set(QUEUE_FLAG_NOXMERGES, q); | ||
| 402 | queue_flag_set(QUEUE_FLAG_DEAD, q); | ||
| 370 | 403 | ||
| 371 | if (q->queue_lock != &q->__queue_lock) | 404 | if (q->queue_lock != &q->__queue_lock) |
| 372 | q->queue_lock = &q->__queue_lock; | 405 | q->queue_lock = &q->__queue_lock; |
| 373 | 406 | ||
| 407 | spin_unlock_irq(lock); | ||
| 408 | mutex_unlock(&q->sysfs_lock); | ||
| 409 | |||
| 410 | /* | ||
| 411 | * Drain all requests queued before DEAD marking. The caller might | ||
| 412 | * be trying to tear down @q before its elevator is initialized, in | ||
| 413 | * which case we don't want to call into draining. | ||
| 414 | */ | ||
| 415 | if (q->elevator) | ||
| 416 | blk_drain_queue(q, true); | ||
| 417 | |||
| 418 | /* @q won't process any more request, flush async actions */ | ||
| 419 | del_timer_sync(&q->backing_dev_info.laptop_mode_wb_timer); | ||
| 420 | blk_sync_queue(q); | ||
| 421 | |||
| 422 | /* @q is and will stay empty, shutdown and put */ | ||
| 374 | blk_put_queue(q); | 423 | blk_put_queue(q); |
| 375 | } | 424 | } |
| 376 | EXPORT_SYMBOL(blk_cleanup_queue); | 425 | EXPORT_SYMBOL(blk_cleanup_queue); |
| @@ -541,7 +590,7 @@ blk_init_allocated_queue_node(struct request_queue *q, request_fn_proc *rfn, | |||
| 541 | /* | 590 | /* |
| 542 | * This also sets hw/phys segments, boundary and size | 591 | * This also sets hw/phys segments, boundary and size |
| 543 | */ | 592 | */ |
| 544 | blk_queue_make_request(q, __make_request); | 593 | blk_queue_make_request(q, blk_queue_bio); |
| 545 | 594 | ||
| 546 | q->sg_reserved_size = INT_MAX; | 595 | q->sg_reserved_size = INT_MAX; |
| 547 | 596 | ||
| @@ -576,7 +625,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) | |||
| 576 | } | 625 | } |
| 577 | 626 | ||
| 578 | static struct request * | 627 | static struct request * |
| 579 | blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | 628 | blk_alloc_request(struct request_queue *q, unsigned int flags, gfp_t gfp_mask) |
| 580 | { | 629 | { |
| 581 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); | 630 | struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); |
| 582 | 631 | ||
| @@ -587,12 +636,10 @@ blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) | |||
| 587 | 636 | ||
| 588 | rq->cmd_flags = flags | REQ_ALLOCED; | 637 | rq->cmd_flags = flags | REQ_ALLOCED; |
| 589 | 638 | ||
| 590 | if (priv) { | 639 | if ((flags & REQ_ELVPRIV) && |
| 591 | if (unlikely(elv_set_request(q, rq, gfp_mask))) { | 640 | unlikely(elv_set_request(q, rq, gfp_mask))) { |
| 592 | mempool_free(rq, q->rq.rq_pool); | 641 | mempool_free(rq, q->rq.rq_pool); |
| 593 | return NULL; | 642 | return NULL; |
| 594 | } | ||
| 595 | rq->cmd_flags |= REQ_ELVPRIV; | ||
| 596 | } | 643 | } |
| 597 | 644 | ||
| 598 | return rq; | 645 | return rq; |
| @@ -651,12 +698,13 @@ static void __freed_request(struct request_queue *q, int sync) | |||
| 651 | * A request has just been released. Account for it, update the full and | 698 | * A request has just been released. Account for it, update the full and |
| 652 | * congestion status, wake up any waiters. Called under q->queue_lock. | 699 | * congestion status, wake up any waiters. Called under q->queue_lock. |
| 653 | */ | 700 | */ |
| 654 | static void freed_request(struct request_queue *q, int sync, int priv) | 701 | static void freed_request(struct request_queue *q, unsigned int flags) |
| 655 | { | 702 | { |
| 656 | struct request_list *rl = &q->rq; | 703 | struct request_list *rl = &q->rq; |
| 704 | int sync = rw_is_sync(flags); | ||
| 657 | 705 | ||
| 658 | rl->count[sync]--; | 706 | rl->count[sync]--; |
| 659 | if (priv) | 707 | if (flags & REQ_ELVPRIV) |
| 660 | rl->elvpriv--; | 708 | rl->elvpriv--; |
| 661 | 709 | ||
| 662 | __freed_request(q, sync); | 710 | __freed_request(q, sync); |
| @@ -684,10 +732,19 @@ static bool blk_rq_should_init_elevator(struct bio *bio) | |||
| 684 | return true; | 732 | return true; |
| 685 | } | 733 | } |
| 686 | 734 | ||
| 687 | /* | 735 | /** |
| 688 | * Get a free request, queue_lock must be held. | 736 | * get_request - get a free request |
| 689 | * Returns NULL on failure, with queue_lock held. | 737 | * @q: request_queue to allocate request from |
| 690 | * Returns !NULL on success, with queue_lock *not held*. | 738 | * @rw_flags: RW and SYNC flags |
| 739 | * @bio: bio to allocate request for (can be %NULL) | ||
| 740 | * @gfp_mask: allocation mask | ||
| 741 | * | ||
| 742 | * Get a free request from @q. This function may fail under memory | ||
| 743 | * pressure or if @q is dead. | ||
| 744 | * | ||
| 745 | * Must be callled with @q->queue_lock held and, | ||
| 746 | * Returns %NULL on failure, with @q->queue_lock held. | ||
| 747 | * Returns !%NULL on success, with @q->queue_lock *not held*. | ||
| 691 | */ | 748 | */ |
| 692 | static struct request *get_request(struct request_queue *q, int rw_flags, | 749 | static struct request *get_request(struct request_queue *q, int rw_flags, |
| 693 | struct bio *bio, gfp_t gfp_mask) | 750 | struct bio *bio, gfp_t gfp_mask) |
| @@ -696,7 +753,10 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
| 696 | struct request_list *rl = &q->rq; | 753 | struct request_list *rl = &q->rq; |
| 697 | struct io_context *ioc = NULL; | 754 | struct io_context *ioc = NULL; |
| 698 | const bool is_sync = rw_is_sync(rw_flags) != 0; | 755 | const bool is_sync = rw_is_sync(rw_flags) != 0; |
| 699 | int may_queue, priv = 0; | 756 | int may_queue; |
| 757 | |||
| 758 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
| 759 | return NULL; | ||
| 700 | 760 | ||
| 701 | may_queue = elv_may_queue(q, rw_flags); | 761 | may_queue = elv_may_queue(q, rw_flags); |
| 702 | if (may_queue == ELV_MQUEUE_NO) | 762 | if (may_queue == ELV_MQUEUE_NO) |
| @@ -740,17 +800,17 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
| 740 | rl->count[is_sync]++; | 800 | rl->count[is_sync]++; |
| 741 | rl->starved[is_sync] = 0; | 801 | rl->starved[is_sync] = 0; |
| 742 | 802 | ||
| 743 | if (blk_rq_should_init_elevator(bio)) { | 803 | if (blk_rq_should_init_elevator(bio) && |
| 744 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 804 | !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags)) { |
| 745 | if (priv) | 805 | rw_flags |= REQ_ELVPRIV; |
| 746 | rl->elvpriv++; | 806 | rl->elvpriv++; |
| 747 | } | 807 | } |
| 748 | 808 | ||
| 749 | if (blk_queue_io_stat(q)) | 809 | if (blk_queue_io_stat(q)) |
| 750 | rw_flags |= REQ_IO_STAT; | 810 | rw_flags |= REQ_IO_STAT; |
| 751 | spin_unlock_irq(q->queue_lock); | 811 | spin_unlock_irq(q->queue_lock); |
| 752 | 812 | ||
| 753 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 813 | rq = blk_alloc_request(q, rw_flags, gfp_mask); |
| 754 | if (unlikely(!rq)) { | 814 | if (unlikely(!rq)) { |
| 755 | /* | 815 | /* |
| 756 | * Allocation failed presumably due to memory. Undo anything | 816 | * Allocation failed presumably due to memory. Undo anything |
| @@ -760,7 +820,7 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
| 760 | * wait queue, but this is pretty rare. | 820 | * wait queue, but this is pretty rare. |
| 761 | */ | 821 | */ |
| 762 | spin_lock_irq(q->queue_lock); | 822 | spin_lock_irq(q->queue_lock); |
| 763 | freed_request(q, is_sync, priv); | 823 | freed_request(q, rw_flags); |
| 764 | 824 | ||
| 765 | /* | 825 | /* |
| 766 | * in the very unlikely event that allocation failed and no | 826 | * in the very unlikely event that allocation failed and no |
| @@ -790,11 +850,18 @@ out: | |||
| 790 | return rq; | 850 | return rq; |
| 791 | } | 851 | } |
| 792 | 852 | ||
| 793 | /* | 853 | /** |
| 794 | * No available requests for this queue, wait for some requests to become | 854 | * get_request_wait - get a free request with retry |
| 795 | * available. | 855 | * @q: request_queue to allocate request from |
| 856 | * @rw_flags: RW and SYNC flags | ||
| 857 | * @bio: bio to allocate request for (can be %NULL) | ||
| 858 | * | ||
| 859 | * Get a free request from @q. This function keeps retrying under memory | ||
| 860 | * pressure and fails iff @q is dead. | ||
| 796 | * | 861 | * |
| 797 | * Called with q->queue_lock held, and returns with it unlocked. | 862 | * Must be callled with @q->queue_lock held and, |
| 863 | * Returns %NULL on failure, with @q->queue_lock held. | ||
| 864 | * Returns !%NULL on success, with @q->queue_lock *not held*. | ||
| 798 | */ | 865 | */ |
| 799 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, | 866 | static struct request *get_request_wait(struct request_queue *q, int rw_flags, |
| 800 | struct bio *bio) | 867 | struct bio *bio) |
| @@ -808,6 +875,9 @@ static struct request *get_request_wait(struct request_queue *q, int rw_flags, | |||
| 808 | struct io_context *ioc; | 875 | struct io_context *ioc; |
| 809 | struct request_list *rl = &q->rq; | 876 | struct request_list *rl = &q->rq; |
| 810 | 877 | ||
| 878 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
| 879 | return NULL; | ||
| 880 | |||
| 811 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, | 881 | prepare_to_wait_exclusive(&rl->wait[is_sync], &wait, |
| 812 | TASK_UNINTERRUPTIBLE); | 882 | TASK_UNINTERRUPTIBLE); |
| 813 | 883 | ||
| @@ -838,19 +908,15 @@ struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask) | |||
| 838 | { | 908 | { |
| 839 | struct request *rq; | 909 | struct request *rq; |
| 840 | 910 | ||
| 841 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
| 842 | return NULL; | ||
| 843 | |||
| 844 | BUG_ON(rw != READ && rw != WRITE); | 911 | BUG_ON(rw != READ && rw != WRITE); |
| 845 | 912 | ||
| 846 | spin_lock_irq(q->queue_lock); | 913 | spin_lock_irq(q->queue_lock); |
| 847 | if (gfp_mask & __GFP_WAIT) { | 914 | if (gfp_mask & __GFP_WAIT) |
| 848 | rq = get_request_wait(q, rw, NULL); | 915 | rq = get_request_wait(q, rw, NULL); |
| 849 | } else { | 916 | else |
| 850 | rq = get_request(q, rw, NULL, gfp_mask); | 917 | rq = get_request(q, rw, NULL, gfp_mask); |
| 851 | if (!rq) | 918 | if (!rq) |
| 852 | spin_unlock_irq(q->queue_lock); | 919 | spin_unlock_irq(q->queue_lock); |
| 853 | } | ||
| 854 | /* q->queue_lock is unlocked at this point */ | 920 | /* q->queue_lock is unlocked at this point */ |
| 855 | 921 | ||
| 856 | return rq; | 922 | return rq; |
| @@ -1052,14 +1118,13 @@ void __blk_put_request(struct request_queue *q, struct request *req) | |||
| 1052 | * it didn't come out of our reserved rq pools | 1118 | * it didn't come out of our reserved rq pools |
| 1053 | */ | 1119 | */ |
| 1054 | if (req->cmd_flags & REQ_ALLOCED) { | 1120 | if (req->cmd_flags & REQ_ALLOCED) { |
| 1055 | int is_sync = rq_is_sync(req) != 0; | 1121 | unsigned int flags = req->cmd_flags; |
| 1056 | int priv = req->cmd_flags & REQ_ELVPRIV; | ||
| 1057 | 1122 | ||
| 1058 | BUG_ON(!list_empty(&req->queuelist)); | 1123 | BUG_ON(!list_empty(&req->queuelist)); |
| 1059 | BUG_ON(!hlist_unhashed(&req->hash)); | 1124 | BUG_ON(!hlist_unhashed(&req->hash)); |
| 1060 | 1125 | ||
| 1061 | blk_free_request(q, req); | 1126 | blk_free_request(q, req); |
| 1062 | freed_request(q, is_sync, priv); | 1127 | freed_request(q, flags); |
| 1063 | } | 1128 | } |
| 1064 | } | 1129 | } |
| 1065 | EXPORT_SYMBOL_GPL(__blk_put_request); | 1130 | EXPORT_SYMBOL_GPL(__blk_put_request); |
| @@ -1161,18 +1226,32 @@ static bool bio_attempt_front_merge(struct request_queue *q, | |||
| 1161 | return true; | 1226 | return true; |
| 1162 | } | 1227 | } |
| 1163 | 1228 | ||
| 1164 | /* | 1229 | /** |
| 1165 | * Attempts to merge with the plugged list in the current process. Returns | 1230 | * attempt_plug_merge - try to merge with %current's plugged list |
| 1166 | * true if merge was successful, otherwise false. | 1231 | * @q: request_queue new bio is being queued at |
| 1232 | * @bio: new bio being queued | ||
| 1233 | * @request_count: out parameter for number of traversed plugged requests | ||
| 1234 | * | ||
| 1235 | * Determine whether @bio being queued on @q can be merged with a request | ||
| 1236 | * on %current's plugged list. Returns %true if merge was successful, | ||
| 1237 | * otherwise %false. | ||
| 1238 | * | ||
| 1239 | * This function is called without @q->queue_lock; however, elevator is | ||
| 1240 | * accessed iff there already are requests on the plugged list which in | ||
| 1241 | * turn guarantees validity of the elevator. | ||
| 1242 | * | ||
| 1243 | * Note that, on successful merge, elevator operation | ||
| 1244 | * elevator_bio_merged_fn() will be called without queue lock. Elevator | ||
| 1245 | * must be ready for this. | ||
| 1167 | */ | 1246 | */ |
| 1168 | static bool attempt_plug_merge(struct task_struct *tsk, struct request_queue *q, | 1247 | static bool attempt_plug_merge(struct request_queue *q, struct bio *bio, |
| 1169 | struct bio *bio, unsigned int *request_count) | 1248 | unsigned int *request_count) |
| 1170 | { | 1249 | { |
| 1171 | struct blk_plug *plug; | 1250 | struct blk_plug *plug; |
| 1172 | struct request *rq; | 1251 | struct request *rq; |
| 1173 | bool ret = false; | 1252 | bool ret = false; |
| 1174 | 1253 | ||
| 1175 | plug = tsk->plug; | 1254 | plug = current->plug; |
| 1176 | if (!plug) | 1255 | if (!plug) |
| 1177 | goto out; | 1256 | goto out; |
| 1178 | *request_count = 0; | 1257 | *request_count = 0; |
| @@ -1202,7 +1281,6 @@ out: | |||
| 1202 | 1281 | ||
| 1203 | void init_request_from_bio(struct request *req, struct bio *bio) | 1282 | void init_request_from_bio(struct request *req, struct bio *bio) |
| 1204 | { | 1283 | { |
| 1205 | req->cpu = bio->bi_comp_cpu; | ||
| 1206 | req->cmd_type = REQ_TYPE_FS; | 1284 | req->cmd_type = REQ_TYPE_FS; |
| 1207 | 1285 | ||
| 1208 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; | 1286 | req->cmd_flags |= bio->bi_rw & REQ_COMMON_MASK; |
| @@ -1215,7 +1293,7 @@ void init_request_from_bio(struct request *req, struct bio *bio) | |||
| 1215 | blk_rq_bio_prep(req->q, req, bio); | 1293 | blk_rq_bio_prep(req->q, req, bio); |
| 1216 | } | 1294 | } |
| 1217 | 1295 | ||
| 1218 | static int __make_request(struct request_queue *q, struct bio *bio) | 1296 | void blk_queue_bio(struct request_queue *q, struct bio *bio) |
| 1219 | { | 1297 | { |
| 1220 | const bool sync = !!(bio->bi_rw & REQ_SYNC); | 1298 | const bool sync = !!(bio->bi_rw & REQ_SYNC); |
| 1221 | struct blk_plug *plug; | 1299 | struct blk_plug *plug; |
| @@ -1240,8 +1318,8 @@ static int __make_request(struct request_queue *q, struct bio *bio) | |||
| 1240 | * Check if we can merge with the plugged list before grabbing | 1318 | * Check if we can merge with the plugged list before grabbing |
| 1241 | * any locks. | 1319 | * any locks. |
| 1242 | */ | 1320 | */ |
| 1243 | if (attempt_plug_merge(current, q, bio, &request_count)) | 1321 | if (attempt_plug_merge(q, bio, &request_count)) |
| 1244 | goto out; | 1322 | return; |
| 1245 | 1323 | ||
| 1246 | spin_lock_irq(q->queue_lock); | 1324 | spin_lock_irq(q->queue_lock); |
| 1247 | 1325 | ||
| @@ -1275,6 +1353,10 @@ get_rq: | |||
| 1275 | * Returns with the queue unlocked. | 1353 | * Returns with the queue unlocked. |
| 1276 | */ | 1354 | */ |
| 1277 | req = get_request_wait(q, rw_flags, bio); | 1355 | req = get_request_wait(q, rw_flags, bio); |
| 1356 | if (unlikely(!req)) { | ||
| 1357 | bio_endio(bio, -ENODEV); /* @q is dead */ | ||
| 1358 | goto out_unlock; | ||
| 1359 | } | ||
| 1278 | 1360 | ||
| 1279 | /* | 1361 | /* |
| 1280 | * After dropping the lock and possibly sleeping here, our request | 1362 | * After dropping the lock and possibly sleeping here, our request |
| @@ -1284,8 +1366,7 @@ get_rq: | |||
| 1284 | */ | 1366 | */ |
| 1285 | init_request_from_bio(req, bio); | 1367 | init_request_from_bio(req, bio); |
| 1286 | 1368 | ||
| 1287 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags) || | 1369 | if (test_bit(QUEUE_FLAG_SAME_COMP, &q->queue_flags)) |
| 1288 | bio_flagged(bio, BIO_CPU_AFFINE)) | ||
| 1289 | req->cpu = raw_smp_processor_id(); | 1370 | req->cpu = raw_smp_processor_id(); |
| 1290 | 1371 | ||
| 1291 | plug = current->plug; | 1372 | plug = current->plug; |
| @@ -1316,9 +1397,8 @@ get_rq: | |||
| 1316 | out_unlock: | 1397 | out_unlock: |
| 1317 | spin_unlock_irq(q->queue_lock); | 1398 | spin_unlock_irq(q->queue_lock); |
| 1318 | } | 1399 | } |
| 1319 | out: | ||
| 1320 | return 0; | ||
| 1321 | } | 1400 | } |
| 1401 | EXPORT_SYMBOL_GPL(blk_queue_bio); /* for device mapper only */ | ||
| 1322 | 1402 | ||
| 1323 | /* | 1403 | /* |
| 1324 | * If bio->bi_dev is a partition, remap the location | 1404 | * If bio->bi_dev is a partition, remap the location |
| @@ -1417,165 +1497,135 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) | |||
| 1417 | return 0; | 1497 | return 0; |
| 1418 | } | 1498 | } |
| 1419 | 1499 | ||
| 1420 | /** | 1500 | static noinline_for_stack bool |
| 1421 | * generic_make_request - hand a buffer to its device driver for I/O | 1501 | generic_make_request_checks(struct bio *bio) |
| 1422 | * @bio: The bio describing the location in memory and on the device. | ||
| 1423 | * | ||
| 1424 | * generic_make_request() is used to make I/O requests of block | ||
| 1425 | * devices. It is passed a &struct bio, which describes the I/O that needs | ||
| 1426 | * to be done. | ||
| 1427 | * | ||
| 1428 | * generic_make_request() does not return any status. The | ||
| 1429 | * success/failure status of the request, along with notification of | ||
| 1430 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
| 1431 | * function described (one day) else where. | ||
| 1432 | * | ||
| 1433 | * The caller of generic_make_request must make sure that bi_io_vec | ||
| 1434 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
| 1435 | * set to describe the device address, and the | ||
| 1436 | * bi_end_io and optionally bi_private are set to describe how | ||
| 1437 | * completion notification should be signaled. | ||
| 1438 | * | ||
| 1439 | * generic_make_request and the drivers it calls may use bi_next if this | ||
| 1440 | * bio happens to be merged with someone else, and may change bi_dev and | ||
| 1441 | * bi_sector for remaps as it sees fit. So the values of these fields | ||
| 1442 | * should NOT be depended on after the call to generic_make_request. | ||
| 1443 | */ | ||
| 1444 | static inline void __generic_make_request(struct bio *bio) | ||
| 1445 | { | 1502 | { |
| 1446 | struct request_queue *q; | 1503 | struct request_queue *q; |
| 1447 | sector_t old_sector; | 1504 | int nr_sectors = bio_sectors(bio); |
| 1448 | int ret, nr_sectors = bio_sectors(bio); | ||
| 1449 | dev_t old_dev; | ||
| 1450 | int err = -EIO; | 1505 | int err = -EIO; |
| 1506 | char b[BDEVNAME_SIZE]; | ||
| 1507 | struct hd_struct *part; | ||
| 1451 | 1508 | ||
| 1452 | might_sleep(); | 1509 | might_sleep(); |
| 1453 | 1510 | ||
| 1454 | if (bio_check_eod(bio, nr_sectors)) | 1511 | if (bio_check_eod(bio, nr_sectors)) |
| 1455 | goto end_io; | 1512 | goto end_io; |
| 1456 | 1513 | ||
| 1457 | /* | 1514 | q = bdev_get_queue(bio->bi_bdev); |
| 1458 | * Resolve the mapping until finished. (drivers are | 1515 | if (unlikely(!q)) { |
| 1459 | * still free to implement/resolve their own stacking | 1516 | printk(KERN_ERR |
| 1460 | * by explicitly returning 0) | 1517 | "generic_make_request: Trying to access " |
| 1461 | * | 1518 | "nonexistent block-device %s (%Lu)\n", |
| 1462 | * NOTE: we don't repeat the blk_size check for each new device. | 1519 | bdevname(bio->bi_bdev, b), |
| 1463 | * Stacking drivers are expected to know what they are doing. | 1520 | (long long) bio->bi_sector); |
| 1464 | */ | 1521 | goto end_io; |
| 1465 | old_sector = -1; | 1522 | } |
| 1466 | old_dev = 0; | ||
| 1467 | do { | ||
| 1468 | char b[BDEVNAME_SIZE]; | ||
| 1469 | struct hd_struct *part; | ||
| 1470 | |||
| 1471 | q = bdev_get_queue(bio->bi_bdev); | ||
| 1472 | if (unlikely(!q)) { | ||
| 1473 | printk(KERN_ERR | ||
| 1474 | "generic_make_request: Trying to access " | ||
| 1475 | "nonexistent block-device %s (%Lu)\n", | ||
| 1476 | bdevname(bio->bi_bdev, b), | ||
| 1477 | (long long) bio->bi_sector); | ||
| 1478 | goto end_io; | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | if (unlikely(!(bio->bi_rw & REQ_DISCARD) && | ||
| 1482 | nr_sectors > queue_max_hw_sectors(q))) { | ||
| 1483 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
| 1484 | bdevname(bio->bi_bdev, b), | ||
| 1485 | bio_sectors(bio), | ||
| 1486 | queue_max_hw_sectors(q)); | ||
| 1487 | goto end_io; | ||
| 1488 | } | ||
| 1489 | |||
| 1490 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
| 1491 | goto end_io; | ||
| 1492 | |||
| 1493 | part = bio->bi_bdev->bd_part; | ||
| 1494 | if (should_fail_request(part, bio->bi_size) || | ||
| 1495 | should_fail_request(&part_to_disk(part)->part0, | ||
| 1496 | bio->bi_size)) | ||
| 1497 | goto end_io; | ||
| 1498 | |||
| 1499 | /* | ||
| 1500 | * If this device has partitions, remap block n | ||
| 1501 | * of partition p to block n+start(p) of the disk. | ||
| 1502 | */ | ||
| 1503 | blk_partition_remap(bio); | ||
| 1504 | 1523 | ||
| 1505 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | 1524 | if (unlikely(!(bio->bi_rw & REQ_DISCARD) && |
| 1506 | goto end_io; | 1525 | nr_sectors > queue_max_hw_sectors(q))) { |
| 1526 | printk(KERN_ERR "bio too big device %s (%u > %u)\n", | ||
| 1527 | bdevname(bio->bi_bdev, b), | ||
| 1528 | bio_sectors(bio), | ||
| 1529 | queue_max_hw_sectors(q)); | ||
| 1530 | goto end_io; | ||
| 1531 | } | ||
| 1507 | 1532 | ||
| 1508 | if (old_sector != -1) | 1533 | part = bio->bi_bdev->bd_part; |
| 1509 | trace_block_bio_remap(q, bio, old_dev, old_sector); | 1534 | if (should_fail_request(part, bio->bi_size) || |
| 1535 | should_fail_request(&part_to_disk(part)->part0, | ||
| 1536 | bio->bi_size)) | ||
| 1537 | goto end_io; | ||
| 1510 | 1538 | ||
| 1511 | old_sector = bio->bi_sector; | 1539 | /* |
| 1512 | old_dev = bio->bi_bdev->bd_dev; | 1540 | * If this device has partitions, remap block n |
| 1541 | * of partition p to block n+start(p) of the disk. | ||
| 1542 | */ | ||
| 1543 | blk_partition_remap(bio); | ||
| 1513 | 1544 | ||
| 1514 | if (bio_check_eod(bio, nr_sectors)) | 1545 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) |
| 1515 | goto end_io; | 1546 | goto end_io; |
| 1516 | 1547 | ||
| 1517 | /* | 1548 | if (bio_check_eod(bio, nr_sectors)) |
| 1518 | * Filter flush bio's early so that make_request based | 1549 | goto end_io; |
| 1519 | * drivers without flush support don't have to worry | ||
| 1520 | * about them. | ||
| 1521 | */ | ||
| 1522 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | ||
| 1523 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | ||
| 1524 | if (!nr_sectors) { | ||
| 1525 | err = 0; | ||
| 1526 | goto end_io; | ||
| 1527 | } | ||
| 1528 | } | ||
| 1529 | 1550 | ||
| 1530 | if ((bio->bi_rw & REQ_DISCARD) && | 1551 | /* |
| 1531 | (!blk_queue_discard(q) || | 1552 | * Filter flush bio's early so that make_request based |
| 1532 | ((bio->bi_rw & REQ_SECURE) && | 1553 | * drivers without flush support don't have to worry |
| 1533 | !blk_queue_secdiscard(q)))) { | 1554 | * about them. |
| 1534 | err = -EOPNOTSUPP; | 1555 | */ |
| 1556 | if ((bio->bi_rw & (REQ_FLUSH | REQ_FUA)) && !q->flush_flags) { | ||
| 1557 | bio->bi_rw &= ~(REQ_FLUSH | REQ_FUA); | ||
| 1558 | if (!nr_sectors) { | ||
| 1559 | err = 0; | ||
| 1535 | goto end_io; | 1560 | goto end_io; |
| 1536 | } | 1561 | } |
| 1562 | } | ||
| 1537 | 1563 | ||
| 1538 | if (blk_throtl_bio(q, &bio)) | 1564 | if ((bio->bi_rw & REQ_DISCARD) && |
| 1539 | goto end_io; | 1565 | (!blk_queue_discard(q) || |
| 1540 | 1566 | ((bio->bi_rw & REQ_SECURE) && | |
| 1541 | /* | 1567 | !blk_queue_secdiscard(q)))) { |
| 1542 | * If bio = NULL, bio has been throttled and will be submitted | 1568 | err = -EOPNOTSUPP; |
| 1543 | * later. | 1569 | goto end_io; |
| 1544 | */ | 1570 | } |
| 1545 | if (!bio) | ||
| 1546 | break; | ||
| 1547 | |||
| 1548 | trace_block_bio_queue(q, bio); | ||
| 1549 | 1571 | ||
| 1550 | ret = q->make_request_fn(q, bio); | 1572 | if (blk_throtl_bio(q, bio)) |
| 1551 | } while (ret); | 1573 | return false; /* throttled, will be resubmitted later */ |
| 1552 | 1574 | ||
| 1553 | return; | 1575 | trace_block_bio_queue(q, bio); |
| 1576 | return true; | ||
| 1554 | 1577 | ||
| 1555 | end_io: | 1578 | end_io: |
| 1556 | bio_endio(bio, err); | 1579 | bio_endio(bio, err); |
| 1580 | return false; | ||
| 1557 | } | 1581 | } |
| 1558 | 1582 | ||
| 1559 | /* | 1583 | /** |
| 1560 | * We only want one ->make_request_fn to be active at a time, | 1584 | * generic_make_request - hand a buffer to its device driver for I/O |
| 1561 | * else stack usage with stacked devices could be a problem. | 1585 | * @bio: The bio describing the location in memory and on the device. |
| 1562 | * So use current->bio_list to keep a list of requests | 1586 | * |
| 1563 | * submited by a make_request_fn function. | 1587 | * generic_make_request() is used to make I/O requests of block |
| 1564 | * current->bio_list is also used as a flag to say if | 1588 | * devices. It is passed a &struct bio, which describes the I/O that needs |
| 1565 | * generic_make_request is currently active in this task or not. | 1589 | * to be done. |
| 1566 | * If it is NULL, then no make_request is active. If it is non-NULL, | 1590 | * |
| 1567 | * then a make_request is active, and new requests should be added | 1591 | * generic_make_request() does not return any status. The |
| 1568 | * at the tail | 1592 | * success/failure status of the request, along with notification of |
| 1593 | * completion, is delivered asynchronously through the bio->bi_end_io | ||
| 1594 | * function described (one day) else where. | ||
| 1595 | * | ||
| 1596 | * The caller of generic_make_request must make sure that bi_io_vec | ||
| 1597 | * are set to describe the memory buffer, and that bi_dev and bi_sector are | ||
| 1598 | * set to describe the device address, and the | ||
| 1599 | * bi_end_io and optionally bi_private are set to describe how | ||
| 1600 | * completion notification should be signaled. | ||
| 1601 | * | ||
| 1602 | * generic_make_request and the drivers it calls may use bi_next if this | ||
| 1603 | * bio happens to be merged with someone else, and may resubmit the bio to | ||
| 1604 | * a lower device by calling into generic_make_request recursively, which | ||
| 1605 | * means the bio should NOT be touched after the call to ->make_request_fn. | ||
| 1569 | */ | 1606 | */ |
| 1570 | void generic_make_request(struct bio *bio) | 1607 | void generic_make_request(struct bio *bio) |
| 1571 | { | 1608 | { |
| 1572 | struct bio_list bio_list_on_stack; | 1609 | struct bio_list bio_list_on_stack; |
| 1573 | 1610 | ||
| 1611 | if (!generic_make_request_checks(bio)) | ||
| 1612 | return; | ||
| 1613 | |||
| 1614 | /* | ||
| 1615 | * We only want one ->make_request_fn to be active at a time, else | ||
| 1616 | * stack usage with stacked devices could be a problem. So use | ||
| 1617 | * current->bio_list to keep a list of requests submited by a | ||
| 1618 | * make_request_fn function. current->bio_list is also used as a | ||
| 1619 | * flag to say if generic_make_request is currently active in this | ||
| 1620 | * task or not. If it is NULL, then no make_request is active. If | ||
| 1621 | * it is non-NULL, then a make_request is active, and new requests | ||
| 1622 | * should be added at the tail | ||
| 1623 | */ | ||
| 1574 | if (current->bio_list) { | 1624 | if (current->bio_list) { |
| 1575 | /* make_request is active */ | ||
| 1576 | bio_list_add(current->bio_list, bio); | 1625 | bio_list_add(current->bio_list, bio); |
| 1577 | return; | 1626 | return; |
| 1578 | } | 1627 | } |
| 1628 | |||
| 1579 | /* following loop may be a bit non-obvious, and so deserves some | 1629 | /* following loop may be a bit non-obvious, and so deserves some |
| 1580 | * explanation. | 1630 | * explanation. |
| 1581 | * Before entering the loop, bio->bi_next is NULL (as all callers | 1631 | * Before entering the loop, bio->bi_next is NULL (as all callers |
| @@ -1583,22 +1633,21 @@ void generic_make_request(struct bio *bio) | |||
| 1583 | * We pretend that we have just taken it off a longer list, so | 1633 | * We pretend that we have just taken it off a longer list, so |
| 1584 | * we assign bio_list to a pointer to the bio_list_on_stack, | 1634 | * we assign bio_list to a pointer to the bio_list_on_stack, |
| 1585 | * thus initialising the bio_list of new bios to be | 1635 | * thus initialising the bio_list of new bios to be |
| 1586 | * added. __generic_make_request may indeed add some more bios | 1636 | * added. ->make_request() may indeed add some more bios |
| 1587 | * through a recursive call to generic_make_request. If it | 1637 | * through a recursive call to generic_make_request. If it |
| 1588 | * did, we find a non-NULL value in bio_list and re-enter the loop | 1638 | * did, we find a non-NULL value in bio_list and re-enter the loop |
| 1589 | * from the top. In this case we really did just take the bio | 1639 | * from the top. In this case we really did just take the bio |
| 1590 | * of the top of the list (no pretending) and so remove it from | 1640 | * of the top of the list (no pretending) and so remove it from |
| 1591 | * bio_list, and call into __generic_make_request again. | 1641 | * bio_list, and call into ->make_request() again. |
| 1592 | * | ||
| 1593 | * The loop was structured like this to make only one call to | ||
| 1594 | * __generic_make_request (which is important as it is large and | ||
| 1595 | * inlined) and to keep the structure simple. | ||
| 1596 | */ | 1642 | */ |
| 1597 | BUG_ON(bio->bi_next); | 1643 | BUG_ON(bio->bi_next); |
| 1598 | bio_list_init(&bio_list_on_stack); | 1644 | bio_list_init(&bio_list_on_stack); |
| 1599 | current->bio_list = &bio_list_on_stack; | 1645 | current->bio_list = &bio_list_on_stack; |
| 1600 | do { | 1646 | do { |
| 1601 | __generic_make_request(bio); | 1647 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
| 1648 | |||
| 1649 | q->make_request_fn(q, bio); | ||
| 1650 | |||
| 1602 | bio = bio_list_pop(current->bio_list); | 1651 | bio = bio_list_pop(current->bio_list); |
| 1603 | } while (bio); | 1652 | } while (bio); |
| 1604 | current->bio_list = NULL; /* deactivate */ | 1653 | current->bio_list = NULL; /* deactivate */ |
| @@ -1725,6 +1774,8 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) | |||
| 1725 | where = ELEVATOR_INSERT_FLUSH; | 1774 | where = ELEVATOR_INSERT_FLUSH; |
| 1726 | 1775 | ||
| 1727 | add_acct_request(q, rq, where); | 1776 | add_acct_request(q, rq, where); |
| 1777 | if (where == ELEVATOR_INSERT_FLUSH) | ||
| 1778 | __blk_run_queue(q); | ||
| 1728 | spin_unlock_irqrestore(q->queue_lock, flags); | 1779 | spin_unlock_irqrestore(q->queue_lock, flags); |
| 1729 | 1780 | ||
| 1730 | return 0; | 1781 | return 0; |
| @@ -2628,6 +2679,20 @@ EXPORT_SYMBOL(kblockd_schedule_delayed_work); | |||
| 2628 | 2679 | ||
| 2629 | #define PLUG_MAGIC 0x91827364 | 2680 | #define PLUG_MAGIC 0x91827364 |
| 2630 | 2681 | ||
| 2682 | /** | ||
| 2683 | * blk_start_plug - initialize blk_plug and track it inside the task_struct | ||
| 2684 | * @plug: The &struct blk_plug that needs to be initialized | ||
| 2685 | * | ||
| 2686 | * Description: | ||
| 2687 | * Tracking blk_plug inside the task_struct will help with auto-flushing the | ||
| 2688 | * pending I/O should the task end up blocking between blk_start_plug() and | ||
| 2689 | * blk_finish_plug(). This is important from a performance perspective, but | ||
| 2690 | * also ensures that we don't deadlock. For instance, if the task is blocking | ||
| 2691 | * for a memory allocation, memory reclaim could end up wanting to free a | ||
| 2692 | * page belonging to that request that is currently residing in our private | ||
| 2693 | * plug. By flushing the pending I/O when the process goes to sleep, we avoid | ||
| 2694 | * this kind of deadlock. | ||
| 2695 | */ | ||
| 2631 | void blk_start_plug(struct blk_plug *plug) | 2696 | void blk_start_plug(struct blk_plug *plug) |
| 2632 | { | 2697 | { |
| 2633 | struct task_struct *tsk = current; | 2698 | struct task_struct *tsk = current; |
diff --git a/block/blk-flush.c b/block/blk-flush.c index 491eb30a242d..720ad607ff91 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c | |||
| @@ -320,7 +320,7 @@ void blk_insert_flush(struct request *rq) | |||
| 320 | return; | 320 | return; |
| 321 | } | 321 | } |
| 322 | 322 | ||
| 323 | BUG_ON(!rq->bio || rq->bio != rq->biotail); | 323 | BUG_ON(rq->bio != rq->biotail); /*assumes zero or single bio rq */ |
| 324 | 324 | ||
| 325 | /* | 325 | /* |
| 326 | * If there's data but flush is not necessary, the request can be | 326 | * If there's data but flush is not necessary, the request can be |
| @@ -330,7 +330,6 @@ void blk_insert_flush(struct request *rq) | |||
| 330 | if ((policy & REQ_FSEQ_DATA) && | 330 | if ((policy & REQ_FSEQ_DATA) && |
| 331 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { | 331 | !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { |
| 332 | list_add_tail(&rq->queuelist, &q->queue_head); | 332 | list_add_tail(&rq->queuelist, &q->queue_head); |
| 333 | blk_run_queue_async(q); | ||
| 334 | return; | 333 | return; |
| 335 | } | 334 | } |
| 336 | 335 | ||
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 60fda88c57f0..e7f9f657f105 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c | |||
| @@ -457,11 +457,11 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 457 | } | 457 | } |
| 458 | 458 | ||
| 459 | /** | 459 | /** |
| 460 | * blk_cleanup_queue: - release a &struct request_queue when it is no longer needed | 460 | * blk_release_queue: - release a &struct request_queue when it is no longer needed |
| 461 | * @kobj: the kobj belonging of the request queue to be released | 461 | * @kobj: the kobj belonging to the request queue to be released |
| 462 | * | 462 | * |
| 463 | * Description: | 463 | * Description: |
| 464 | * blk_cleanup_queue is the pair to blk_init_queue() or | 464 | * blk_release_queue is the pair to blk_init_queue() or |
| 465 | * blk_queue_make_request(). It should be called when a request queue is | 465 | * blk_queue_make_request(). It should be called when a request queue is |
| 466 | * being released; typically when a block device is being de-registered. | 466 | * being released; typically when a block device is being de-registered. |
| 467 | * Currently, its primary task it to free all the &struct request | 467 | * Currently, its primary task it to free all the &struct request |
| @@ -490,6 +490,7 @@ static void blk_release_queue(struct kobject *kobj) | |||
| 490 | if (q->queue_tags) | 490 | if (q->queue_tags) |
| 491 | __blk_queue_free_tags(q); | 491 | __blk_queue_free_tags(q); |
| 492 | 492 | ||
| 493 | blk_throtl_release(q); | ||
| 493 | blk_trace_shutdown(q); | 494 | blk_trace_shutdown(q); |
| 494 | 495 | ||
| 495 | bdi_destroy(&q->backing_dev_info); | 496 | bdi_destroy(&q->backing_dev_info); |
diff --git a/block/blk-tag.c b/block/blk-tag.c index ece65fc4c79b..e74d6d13838f 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c | |||
| @@ -286,12 +286,14 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) | |||
| 286 | 286 | ||
| 287 | BUG_ON(tag == -1); | 287 | BUG_ON(tag == -1); |
| 288 | 288 | ||
| 289 | if (unlikely(tag >= bqt->real_max_depth)) | 289 | if (unlikely(tag >= bqt->max_depth)) { |
| 290 | /* | 290 | /* |
| 291 | * This can happen after tag depth has been reduced. | 291 | * This can happen after tag depth has been reduced. |
| 292 | * FIXME: how about a warning or info message here? | 292 | * But tag shouldn't be larger than real_max_depth. |
| 293 | */ | 293 | */ |
| 294 | WARN_ON(tag >= bqt->real_max_depth); | ||
| 294 | return; | 295 | return; |
| 296 | } | ||
| 295 | 297 | ||
| 296 | list_del_init(&rq->queuelist); | 298 | list_del_init(&rq->queuelist); |
| 297 | rq->cmd_flags &= ~REQ_QUEUED; | 299 | rq->cmd_flags &= ~REQ_QUEUED; |
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a19f58c6fc3a..4553245d9317 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | #include <linux/bio.h> | 10 | #include <linux/bio.h> |
| 11 | #include <linux/blktrace_api.h> | 11 | #include <linux/blktrace_api.h> |
| 12 | #include "blk-cgroup.h" | 12 | #include "blk-cgroup.h" |
| 13 | #include "blk.h" | ||
| 13 | 14 | ||
| 14 | /* Max dispatch from a group in 1 round */ | 15 | /* Max dispatch from a group in 1 round */ |
| 15 | static int throtl_grp_quantum = 8; | 16 | static int throtl_grp_quantum = 8; |
| @@ -302,16 +303,16 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg) | |||
| 302 | return tg; | 303 | return tg; |
| 303 | } | 304 | } |
| 304 | 305 | ||
| 305 | /* | ||
| 306 | * This function returns with queue lock unlocked in case of error, like | ||
| 307 | * request queue is no more | ||
| 308 | */ | ||
| 309 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | 306 | static struct throtl_grp * throtl_get_tg(struct throtl_data *td) |
| 310 | { | 307 | { |
| 311 | struct throtl_grp *tg = NULL, *__tg = NULL; | 308 | struct throtl_grp *tg = NULL, *__tg = NULL; |
| 312 | struct blkio_cgroup *blkcg; | 309 | struct blkio_cgroup *blkcg; |
| 313 | struct request_queue *q = td->queue; | 310 | struct request_queue *q = td->queue; |
| 314 | 311 | ||
| 312 | /* no throttling for dead queue */ | ||
| 313 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) | ||
| 314 | return NULL; | ||
| 315 | |||
| 315 | rcu_read_lock(); | 316 | rcu_read_lock(); |
| 316 | blkcg = task_blkio_cgroup(current); | 317 | blkcg = task_blkio_cgroup(current); |
| 317 | tg = throtl_find_tg(td, blkcg); | 318 | tg = throtl_find_tg(td, blkcg); |
| @@ -323,32 +324,22 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td) | |||
| 323 | /* | 324 | /* |
| 324 | * Need to allocate a group. Allocation of group also needs allocation | 325 | * Need to allocate a group. Allocation of group also needs allocation |
| 325 | * of per cpu stats which in-turn takes a mutex() and can block. Hence | 326 | * of per cpu stats which in-turn takes a mutex() and can block. Hence |
| 326 | * we need to drop rcu lock and queue_lock before we call alloc | 327 | * we need to drop rcu lock and queue_lock before we call alloc. |
| 327 | * | ||
| 328 | * Take the request queue reference to make sure queue does not | ||
| 329 | * go away once we return from allocation. | ||
| 330 | */ | 328 | */ |
| 331 | blk_get_queue(q); | ||
| 332 | rcu_read_unlock(); | 329 | rcu_read_unlock(); |
| 333 | spin_unlock_irq(q->queue_lock); | 330 | spin_unlock_irq(q->queue_lock); |
| 334 | 331 | ||
| 335 | tg = throtl_alloc_tg(td); | 332 | tg = throtl_alloc_tg(td); |
| 336 | /* | ||
| 337 | * We might have slept in group allocation. Make sure queue is not | ||
| 338 | * dead | ||
| 339 | */ | ||
| 340 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
| 341 | blk_put_queue(q); | ||
| 342 | if (tg) | ||
| 343 | kfree(tg); | ||
| 344 | |||
| 345 | return ERR_PTR(-ENODEV); | ||
| 346 | } | ||
| 347 | blk_put_queue(q); | ||
| 348 | 333 | ||
| 349 | /* Group allocated and queue is still alive. take the lock */ | 334 | /* Group allocated and queue is still alive. take the lock */ |
| 350 | spin_lock_irq(q->queue_lock); | 335 | spin_lock_irq(q->queue_lock); |
| 351 | 336 | ||
| 337 | /* Make sure @q is still alive */ | ||
| 338 | if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | ||
| 339 | kfree(tg); | ||
| 340 | return NULL; | ||
| 341 | } | ||
| 342 | |||
| 352 | /* | 343 | /* |
| 353 | * Initialize the new group. After sleeping, read the blkcg again. | 344 | * Initialize the new group. After sleeping, read the blkcg again. |
| 354 | */ | 345 | */ |
| @@ -1014,11 +1005,6 @@ static void throtl_release_tgs(struct throtl_data *td) | |||
| 1014 | } | 1005 | } |
| 1015 | } | 1006 | } |
| 1016 | 1007 | ||
| 1017 | static void throtl_td_free(struct throtl_data *td) | ||
| 1018 | { | ||
| 1019 | kfree(td); | ||
| 1020 | } | ||
| 1021 | |||
| 1022 | /* | 1008 | /* |
| 1023 | * Blk cgroup controller notification saying that blkio_group object is being | 1009 | * Blk cgroup controller notification saying that blkio_group object is being |
| 1024 | * delinked as associated cgroup object is going away. That also means that | 1010 | * delinked as associated cgroup object is going away. That also means that |
| @@ -1123,17 +1109,17 @@ static struct blkio_policy_type blkio_policy_throtl = { | |||
| 1123 | .plid = BLKIO_POLICY_THROTL, | 1109 | .plid = BLKIO_POLICY_THROTL, |
| 1124 | }; | 1110 | }; |
| 1125 | 1111 | ||
| 1126 | int blk_throtl_bio(struct request_queue *q, struct bio **biop) | 1112 | bool blk_throtl_bio(struct request_queue *q, struct bio *bio) |
| 1127 | { | 1113 | { |
| 1128 | struct throtl_data *td = q->td; | 1114 | struct throtl_data *td = q->td; |
| 1129 | struct throtl_grp *tg; | 1115 | struct throtl_grp *tg; |
| 1130 | struct bio *bio = *biop; | ||
| 1131 | bool rw = bio_data_dir(bio), update_disptime = true; | 1116 | bool rw = bio_data_dir(bio), update_disptime = true; |
| 1132 | struct blkio_cgroup *blkcg; | 1117 | struct blkio_cgroup *blkcg; |
| 1118 | bool throttled = false; | ||
| 1133 | 1119 | ||
| 1134 | if (bio->bi_rw & REQ_THROTTLED) { | 1120 | if (bio->bi_rw & REQ_THROTTLED) { |
| 1135 | bio->bi_rw &= ~REQ_THROTTLED; | 1121 | bio->bi_rw &= ~REQ_THROTTLED; |
| 1136 | return 0; | 1122 | goto out; |
| 1137 | } | 1123 | } |
| 1138 | 1124 | ||
| 1139 | /* | 1125 | /* |
| @@ -1152,7 +1138,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
| 1152 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, | 1138 | blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size, |
| 1153 | rw, rw_is_sync(bio->bi_rw)); | 1139 | rw, rw_is_sync(bio->bi_rw)); |
| 1154 | rcu_read_unlock(); | 1140 | rcu_read_unlock(); |
| 1155 | return 0; | 1141 | goto out; |
| 1156 | } | 1142 | } |
| 1157 | } | 1143 | } |
| 1158 | rcu_read_unlock(); | 1144 | rcu_read_unlock(); |
| @@ -1161,18 +1147,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
| 1161 | * Either group has not been allocated yet or it is not an unlimited | 1147 | * Either group has not been allocated yet or it is not an unlimited |
| 1162 | * IO group | 1148 | * IO group |
| 1163 | */ | 1149 | */ |
| 1164 | |||
| 1165 | spin_lock_irq(q->queue_lock); | 1150 | spin_lock_irq(q->queue_lock); |
| 1166 | tg = throtl_get_tg(td); | 1151 | tg = throtl_get_tg(td); |
| 1167 | 1152 | if (unlikely(!tg)) | |
| 1168 | if (IS_ERR(tg)) { | 1153 | goto out_unlock; |
| 1169 | if (PTR_ERR(tg) == -ENODEV) { | ||
| 1170 | /* | ||
| 1171 | * Queue is gone. No queue lock held here. | ||
| 1172 | */ | ||
| 1173 | return -ENODEV; | ||
| 1174 | } | ||
| 1175 | } | ||
| 1176 | 1154 | ||
| 1177 | if (tg->nr_queued[rw]) { | 1155 | if (tg->nr_queued[rw]) { |
| 1178 | /* | 1156 | /* |
| @@ -1200,7 +1178,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
| 1200 | * So keep on trimming slice even if bio is not queued. | 1178 | * So keep on trimming slice even if bio is not queued. |
| 1201 | */ | 1179 | */ |
| 1202 | throtl_trim_slice(td, tg, rw); | 1180 | throtl_trim_slice(td, tg, rw); |
| 1203 | goto out; | 1181 | goto out_unlock; |
| 1204 | } | 1182 | } |
| 1205 | 1183 | ||
| 1206 | queue_bio: | 1184 | queue_bio: |
| @@ -1212,16 +1190,52 @@ queue_bio: | |||
| 1212 | tg->nr_queued[READ], tg->nr_queued[WRITE]); | 1190 | tg->nr_queued[READ], tg->nr_queued[WRITE]); |
| 1213 | 1191 | ||
| 1214 | throtl_add_bio_tg(q->td, tg, bio); | 1192 | throtl_add_bio_tg(q->td, tg, bio); |
| 1215 | *biop = NULL; | 1193 | throttled = true; |
| 1216 | 1194 | ||
| 1217 | if (update_disptime) { | 1195 | if (update_disptime) { |
| 1218 | tg_update_disptime(td, tg); | 1196 | tg_update_disptime(td, tg); |
| 1219 | throtl_schedule_next_dispatch(td); | 1197 | throtl_schedule_next_dispatch(td); |
| 1220 | } | 1198 | } |
| 1221 | 1199 | ||
| 1200 | out_unlock: | ||
| 1201 | spin_unlock_irq(q->queue_lock); | ||
| 1222 | out: | 1202 | out: |
| 1203 | return throttled; | ||
| 1204 | } | ||
| 1205 | |||
| 1206 | /** | ||
| 1207 | * blk_throtl_drain - drain throttled bios | ||
| 1208 | * @q: request_queue to drain throttled bios for | ||
| 1209 | * | ||
| 1210 | * Dispatch all currently throttled bios on @q through ->make_request_fn(). | ||
| 1211 | */ | ||
| 1212 | void blk_throtl_drain(struct request_queue *q) | ||
| 1213 | __releases(q->queue_lock) __acquires(q->queue_lock) | ||
| 1214 | { | ||
| 1215 | struct throtl_data *td = q->td; | ||
| 1216 | struct throtl_rb_root *st = &td->tg_service_tree; | ||
| 1217 | struct throtl_grp *tg; | ||
| 1218 | struct bio_list bl; | ||
| 1219 | struct bio *bio; | ||
| 1220 | |||
| 1221 | WARN_ON_ONCE(!queue_is_locked(q)); | ||
| 1222 | |||
| 1223 | bio_list_init(&bl); | ||
| 1224 | |||
| 1225 | while ((tg = throtl_rb_first(st))) { | ||
| 1226 | throtl_dequeue_tg(td, tg); | ||
| 1227 | |||
| 1228 | while ((bio = bio_list_peek(&tg->bio_lists[READ]))) | ||
| 1229 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | ||
| 1230 | while ((bio = bio_list_peek(&tg->bio_lists[WRITE]))) | ||
| 1231 | tg_dispatch_one_bio(td, tg, bio_data_dir(bio), &bl); | ||
| 1232 | } | ||
| 1223 | spin_unlock_irq(q->queue_lock); | 1233 | spin_unlock_irq(q->queue_lock); |
| 1224 | return 0; | 1234 | |
| 1235 | while ((bio = bio_list_pop(&bl))) | ||
| 1236 | generic_make_request(bio); | ||
| 1237 | |||
| 1238 | spin_lock_irq(q->queue_lock); | ||
| 1225 | } | 1239 | } |
| 1226 | 1240 | ||
| 1227 | int blk_throtl_init(struct request_queue *q) | 1241 | int blk_throtl_init(struct request_queue *q) |
| @@ -1296,7 +1310,11 @@ void blk_throtl_exit(struct request_queue *q) | |||
| 1296 | * it. | 1310 | * it. |
| 1297 | */ | 1311 | */ |
| 1298 | throtl_shutdown_wq(q); | 1312 | throtl_shutdown_wq(q); |
| 1299 | throtl_td_free(td); | 1313 | } |
| 1314 | |||
| 1315 | void blk_throtl_release(struct request_queue *q) | ||
| 1316 | { | ||
| 1317 | kfree(q->td); | ||
| 1300 | } | 1318 | } |
| 1301 | 1319 | ||
| 1302 | static int __init throtl_init(void) | 1320 | static int __init throtl_init(void) |
diff --git a/block/blk.h b/block/blk.h index 20b900a377c9..3f6551b3c92d 100644 --- a/block/blk.h +++ b/block/blk.h | |||
| @@ -15,6 +15,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq, | |||
| 15 | struct bio *bio); | 15 | struct bio *bio); |
| 16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, | 16 | int blk_rq_append_bio(struct request_queue *q, struct request *rq, |
| 17 | struct bio *bio); | 17 | struct bio *bio); |
| 18 | void blk_drain_queue(struct request_queue *q, bool drain_all); | ||
| 18 | void blk_dequeue_request(struct request *rq); | 19 | void blk_dequeue_request(struct request *rq); |
| 19 | void __blk_queue_free_tags(struct request_queue *q); | 20 | void __blk_queue_free_tags(struct request_queue *q); |
| 20 | bool __blk_end_bidi_request(struct request *rq, int error, | 21 | bool __blk_end_bidi_request(struct request *rq, int error, |
| @@ -188,4 +189,21 @@ static inline int blk_do_io_stat(struct request *rq) | |||
| 188 | (rq->cmd_flags & REQ_DISCARD)); | 189 | (rq->cmd_flags & REQ_DISCARD)); |
| 189 | } | 190 | } |
| 190 | 191 | ||
| 191 | #endif | 192 | #ifdef CONFIG_BLK_DEV_THROTTLING |
| 193 | extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio); | ||
| 194 | extern void blk_throtl_drain(struct request_queue *q); | ||
| 195 | extern int blk_throtl_init(struct request_queue *q); | ||
| 196 | extern void blk_throtl_exit(struct request_queue *q); | ||
| 197 | extern void blk_throtl_release(struct request_queue *q); | ||
| 198 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 199 | static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio) | ||
| 200 | { | ||
| 201 | return false; | ||
| 202 | } | ||
| 203 | static inline void blk_throtl_drain(struct request_queue *q) { } | ||
| 204 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
| 205 | static inline void blk_throtl_exit(struct request_queue *q) { } | ||
| 206 | static inline void blk_throtl_release(struct request_queue *q) { } | ||
| 207 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 208 | |||
| 209 | #endif /* BLK_INTERNAL_H */ | ||
diff --git a/block/elevator.c b/block/elevator.c index a3b64bc71d88..66343d6917d0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -31,7 +31,6 @@ | |||
| 31 | #include <linux/slab.h> | 31 | #include <linux/slab.h> |
| 32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
| 33 | #include <linux/compiler.h> | 33 | #include <linux/compiler.h> |
| 34 | #include <linux/delay.h> | ||
| 35 | #include <linux/blktrace_api.h> | 34 | #include <linux/blktrace_api.h> |
| 36 | #include <linux/hash.h> | 35 | #include <linux/hash.h> |
| 37 | #include <linux/uaccess.h> | 36 | #include <linux/uaccess.h> |
| @@ -182,7 +181,7 @@ static void elevator_attach(struct request_queue *q, struct elevator_queue *eq, | |||
| 182 | eq->elevator_data = data; | 181 | eq->elevator_data = data; |
| 183 | } | 182 | } |
| 184 | 183 | ||
| 185 | static char chosen_elevator[16]; | 184 | static char chosen_elevator[ELV_NAME_MAX]; |
| 186 | 185 | ||
| 187 | static int __init elevator_setup(char *str) | 186 | static int __init elevator_setup(char *str) |
| 188 | { | 187 | { |
| @@ -606,43 +605,35 @@ void elv_requeue_request(struct request_queue *q, struct request *rq) | |||
| 606 | void elv_drain_elevator(struct request_queue *q) | 605 | void elv_drain_elevator(struct request_queue *q) |
| 607 | { | 606 | { |
| 608 | static int printed; | 607 | static int printed; |
| 608 | |||
| 609 | lockdep_assert_held(q->queue_lock); | ||
| 610 | |||
| 609 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) | 611 | while (q->elevator->ops->elevator_dispatch_fn(q, 1)) |
| 610 | ; | 612 | ; |
| 611 | if (q->nr_sorted == 0) | 613 | if (q->nr_sorted && printed++ < 10) { |
| 612 | return; | ||
| 613 | if (printed++ < 10) { | ||
| 614 | printk(KERN_ERR "%s: forced dispatching is broken " | 614 | printk(KERN_ERR "%s: forced dispatching is broken " |
| 615 | "(nr_sorted=%u), please report this\n", | 615 | "(nr_sorted=%u), please report this\n", |
| 616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); | 616 | q->elevator->elevator_type->elevator_name, q->nr_sorted); |
| 617 | } | 617 | } |
| 618 | } | 618 | } |
| 619 | 619 | ||
| 620 | /* | ||
| 621 | * Call with queue lock held, interrupts disabled | ||
| 622 | */ | ||
| 623 | void elv_quiesce_start(struct request_queue *q) | 620 | void elv_quiesce_start(struct request_queue *q) |
| 624 | { | 621 | { |
| 625 | if (!q->elevator) | 622 | if (!q->elevator) |
| 626 | return; | 623 | return; |
| 627 | 624 | ||
| 625 | spin_lock_irq(q->queue_lock); | ||
| 628 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); | 626 | queue_flag_set(QUEUE_FLAG_ELVSWITCH, q); |
| 627 | spin_unlock_irq(q->queue_lock); | ||
| 629 | 628 | ||
| 630 | /* | 629 | blk_drain_queue(q, false); |
| 631 | * make sure we don't have any requests in flight | ||
| 632 | */ | ||
| 633 | elv_drain_elevator(q); | ||
| 634 | while (q->rq.elvpriv) { | ||
| 635 | __blk_run_queue(q); | ||
| 636 | spin_unlock_irq(q->queue_lock); | ||
| 637 | msleep(10); | ||
| 638 | spin_lock_irq(q->queue_lock); | ||
| 639 | elv_drain_elevator(q); | ||
| 640 | } | ||
| 641 | } | 630 | } |
| 642 | 631 | ||
| 643 | void elv_quiesce_end(struct request_queue *q) | 632 | void elv_quiesce_end(struct request_queue *q) |
| 644 | { | 633 | { |
| 634 | spin_lock_irq(q->queue_lock); | ||
| 645 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | 635 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); |
| 636 | spin_unlock_irq(q->queue_lock); | ||
| 646 | } | 637 | } |
| 647 | 638 | ||
| 648 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) | 639 | void __elv_add_request(struct request_queue *q, struct request *rq, int where) |
| @@ -972,7 +963,6 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 972 | /* | 963 | /* |
| 973 | * Turn on BYPASS and drain all requests w/ elevator private data | 964 | * Turn on BYPASS and drain all requests w/ elevator private data |
| 974 | */ | 965 | */ |
| 975 | spin_lock_irq(q->queue_lock); | ||
| 976 | elv_quiesce_start(q); | 966 | elv_quiesce_start(q); |
| 977 | 967 | ||
| 978 | /* | 968 | /* |
| @@ -983,8 +973,8 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 983 | /* | 973 | /* |
| 984 | * attach and start new elevator | 974 | * attach and start new elevator |
| 985 | */ | 975 | */ |
| 976 | spin_lock_irq(q->queue_lock); | ||
| 986 | elevator_attach(q, e, data); | 977 | elevator_attach(q, e, data); |
| 987 | |||
| 988 | spin_unlock_irq(q->queue_lock); | 978 | spin_unlock_irq(q->queue_lock); |
| 989 | 979 | ||
| 990 | if (old_elevator->registered) { | 980 | if (old_elevator->registered) { |
| @@ -999,9 +989,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e) | |||
| 999 | * finally exit old elevator and turn off BYPASS. | 989 | * finally exit old elevator and turn off BYPASS. |
| 1000 | */ | 990 | */ |
| 1001 | elevator_exit(old_elevator); | 991 | elevator_exit(old_elevator); |
| 1002 | spin_lock_irq(q->queue_lock); | ||
| 1003 | elv_quiesce_end(q); | 992 | elv_quiesce_end(q); |
| 1004 | spin_unlock_irq(q->queue_lock); | ||
| 1005 | 993 | ||
| 1006 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); | 994 | blk_add_trace_msg(q, "elv switch: %s", e->elevator_type->elevator_name); |
| 1007 | 995 | ||
| @@ -1015,10 +1003,7 @@ fail_register: | |||
| 1015 | elevator_exit(e); | 1003 | elevator_exit(e); |
| 1016 | q->elevator = old_elevator; | 1004 | q->elevator = old_elevator; |
| 1017 | elv_register_queue(q); | 1005 | elv_register_queue(q); |
| 1018 | 1006 | elv_quiesce_end(q); | |
| 1019 | spin_lock_irq(q->queue_lock); | ||
| 1020 | queue_flag_clear(QUEUE_FLAG_ELVSWITCH, q); | ||
| 1021 | spin_unlock_irq(q->queue_lock); | ||
| 1022 | 1007 | ||
| 1023 | return err; | 1008 | return err; |
| 1024 | } | 1009 | } |
diff --git a/block/genhd.c b/block/genhd.c index 94855a9717de..024fc3944fb5 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -612,6 +612,12 @@ void add_disk(struct gendisk *disk) | |||
| 612 | register_disk(disk); | 612 | register_disk(disk); |
| 613 | blk_register_queue(disk); | 613 | blk_register_queue(disk); |
| 614 | 614 | ||
| 615 | /* | ||
| 616 | * Take an extra ref on queue which will be put on disk_release() | ||
| 617 | * so that it sticks around as long as @disk is there. | ||
| 618 | */ | ||
| 619 | WARN_ON_ONCE(blk_get_queue(disk->queue)); | ||
| 620 | |||
| 615 | retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, | 621 | retval = sysfs_create_link(&disk_to_dev(disk)->kobj, &bdi->dev->kobj, |
| 616 | "bdi"); | 622 | "bdi"); |
| 617 | WARN_ON(retval); | 623 | WARN_ON(retval); |
| @@ -1166,6 +1172,8 @@ static void disk_release(struct device *dev) | |||
| 1166 | disk_replace_part_tbl(disk, NULL); | 1172 | disk_replace_part_tbl(disk, NULL); |
| 1167 | free_part_stats(&disk->part0); | 1173 | free_part_stats(&disk->part0); |
| 1168 | free_part_info(&disk->part0); | 1174 | free_part_info(&disk->part0); |
| 1175 | if (disk->queue) | ||
| 1176 | blk_put_queue(disk->queue); | ||
| 1169 | kfree(disk); | 1177 | kfree(disk); |
| 1170 | } | 1178 | } |
| 1171 | struct class block_class = { | 1179 | struct class block_class = { |
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c index 4f4230b79bb6..fbdf0d802ec4 100644 --- a/block/scsi_ioctl.c +++ b/block/scsi_ioctl.c | |||
| @@ -565,7 +565,7 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
| 565 | { | 565 | { |
| 566 | int err; | 566 | int err; |
| 567 | 567 | ||
| 568 | if (!q || blk_get_queue(q)) | 568 | if (!q) |
| 569 | return -ENXIO; | 569 | return -ENXIO; |
| 570 | 570 | ||
| 571 | switch (cmd) { | 571 | switch (cmd) { |
| @@ -686,7 +686,6 @@ int scsi_cmd_ioctl(struct request_queue *q, struct gendisk *bd_disk, fmode_t mod | |||
| 686 | err = -ENOTTY; | 686 | err = -ENOTTY; |
| 687 | } | 687 | } |
| 688 | 688 | ||
| 689 | blk_put_queue(q); | ||
| 690 | return err; | 689 | return err; |
| 691 | } | 690 | } |
| 692 | EXPORT_SYMBOL(scsi_cmd_ioctl); | 691 | EXPORT_SYMBOL(scsi_cmd_ioctl); |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 528f6318ded1..167ba0af47f5 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
| @@ -159,7 +159,7 @@ aoeblk_release(struct gendisk *disk, fmode_t mode) | |||
| 159 | return 0; | 159 | return 0; |
| 160 | } | 160 | } |
| 161 | 161 | ||
| 162 | static int | 162 | static void |
| 163 | aoeblk_make_request(struct request_queue *q, struct bio *bio) | 163 | aoeblk_make_request(struct request_queue *q, struct bio *bio) |
| 164 | { | 164 | { |
| 165 | struct sk_buff_head queue; | 165 | struct sk_buff_head queue; |
| @@ -172,25 +172,25 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
| 172 | if (bio == NULL) { | 172 | if (bio == NULL) { |
| 173 | printk(KERN_ERR "aoe: bio is NULL\n"); | 173 | printk(KERN_ERR "aoe: bio is NULL\n"); |
| 174 | BUG(); | 174 | BUG(); |
| 175 | return 0; | 175 | return; |
| 176 | } | 176 | } |
| 177 | d = bio->bi_bdev->bd_disk->private_data; | 177 | d = bio->bi_bdev->bd_disk->private_data; |
| 178 | if (d == NULL) { | 178 | if (d == NULL) { |
| 179 | printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); | 179 | printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); |
| 180 | BUG(); | 180 | BUG(); |
| 181 | bio_endio(bio, -ENXIO); | 181 | bio_endio(bio, -ENXIO); |
| 182 | return 0; | 182 | return; |
| 183 | } else if (bio->bi_io_vec == NULL) { | 183 | } else if (bio->bi_io_vec == NULL) { |
| 184 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); | 184 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); |
| 185 | BUG(); | 185 | BUG(); |
| 186 | bio_endio(bio, -ENXIO); | 186 | bio_endio(bio, -ENXIO); |
| 187 | return 0; | 187 | return; |
| 188 | } | 188 | } |
| 189 | buf = mempool_alloc(d->bufpool, GFP_NOIO); | 189 | buf = mempool_alloc(d->bufpool, GFP_NOIO); |
| 190 | if (buf == NULL) { | 190 | if (buf == NULL) { |
| 191 | printk(KERN_INFO "aoe: buf allocation failure\n"); | 191 | printk(KERN_INFO "aoe: buf allocation failure\n"); |
| 192 | bio_endio(bio, -ENOMEM); | 192 | bio_endio(bio, -ENOMEM); |
| 193 | return 0; | 193 | return; |
| 194 | } | 194 | } |
| 195 | memset(buf, 0, sizeof(*buf)); | 195 | memset(buf, 0, sizeof(*buf)); |
| 196 | INIT_LIST_HEAD(&buf->bufs); | 196 | INIT_LIST_HEAD(&buf->bufs); |
| @@ -211,7 +211,7 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
| 211 | spin_unlock_irqrestore(&d->lock, flags); | 211 | spin_unlock_irqrestore(&d->lock, flags); |
| 212 | mempool_free(buf, d->bufpool); | 212 | mempool_free(buf, d->bufpool); |
| 213 | bio_endio(bio, -ENXIO); | 213 | bio_endio(bio, -ENXIO); |
| 214 | return 0; | 214 | return; |
| 215 | } | 215 | } |
| 216 | 216 | ||
| 217 | list_add_tail(&buf->bufs, &d->bufq); | 217 | list_add_tail(&buf->bufs, &d->bufq); |
| @@ -222,8 +222,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
| 222 | 222 | ||
| 223 | spin_unlock_irqrestore(&d->lock, flags); | 223 | spin_unlock_irqrestore(&d->lock, flags); |
| 224 | aoenet_xmit(&queue); | 224 | aoenet_xmit(&queue); |
| 225 | |||
| 226 | return 0; | ||
| 227 | } | 225 | } |
| 228 | 226 | ||
| 229 | static int | 227 | static int |
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index dba1c32e1ddf..d22119d49e53 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
| @@ -323,7 +323,7 @@ out: | |||
| 323 | return err; | 323 | return err; |
| 324 | } | 324 | } |
| 325 | 325 | ||
| 326 | static int brd_make_request(struct request_queue *q, struct bio *bio) | 326 | static void brd_make_request(struct request_queue *q, struct bio *bio) |
| 327 | { | 327 | { |
| 328 | struct block_device *bdev = bio->bi_bdev; | 328 | struct block_device *bdev = bio->bi_bdev; |
| 329 | struct brd_device *brd = bdev->bd_disk->private_data; | 329 | struct brd_device *brd = bdev->bd_disk->private_data; |
| @@ -359,8 +359,6 @@ static int brd_make_request(struct request_queue *q, struct bio *bio) | |||
| 359 | 359 | ||
| 360 | out: | 360 | out: |
| 361 | bio_endio(bio, err); | 361 | bio_endio(bio, err); |
| 362 | |||
| 363 | return 0; | ||
| 364 | } | 362 | } |
| 365 | 363 | ||
| 366 | #ifdef CONFIG_BLK_DEV_XIP | 364 | #ifdef CONFIG_BLK_DEV_XIP |
diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h index 1706d60b8c99..9cf20355ceec 100644 --- a/drivers/block/drbd/drbd_int.h +++ b/drivers/block/drbd/drbd_int.h | |||
| @@ -1506,7 +1506,7 @@ extern void drbd_free_mdev(struct drbd_conf *mdev); | |||
| 1506 | extern int proc_details; | 1506 | extern int proc_details; |
| 1507 | 1507 | ||
| 1508 | /* drbd_req */ | 1508 | /* drbd_req */ |
| 1509 | extern int drbd_make_request(struct request_queue *q, struct bio *bio); | 1509 | extern void drbd_make_request(struct request_queue *q, struct bio *bio); |
| 1510 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); | 1510 | extern int drbd_read_remote(struct drbd_conf *mdev, struct drbd_request *req); |
| 1511 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); | 1511 | extern int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct bio_vec *bvec); |
| 1512 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); | 1512 | extern int is_valid_ar_handle(struct drbd_request *, sector_t); |
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c index 3424d675b769..4a0f314086e5 100644 --- a/drivers/block/drbd/drbd_req.c +++ b/drivers/block/drbd/drbd_req.c | |||
| @@ -1073,7 +1073,7 @@ static int drbd_fail_request_early(struct drbd_conf *mdev, int is_write) | |||
| 1073 | return 0; | 1073 | return 0; |
| 1074 | } | 1074 | } |
| 1075 | 1075 | ||
| 1076 | int drbd_make_request(struct request_queue *q, struct bio *bio) | 1076 | void drbd_make_request(struct request_queue *q, struct bio *bio) |
| 1077 | { | 1077 | { |
| 1078 | unsigned int s_enr, e_enr; | 1078 | unsigned int s_enr, e_enr; |
| 1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; | 1079 | struct drbd_conf *mdev = (struct drbd_conf *) q->queuedata; |
| @@ -1081,7 +1081,7 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
| 1081 | 1081 | ||
| 1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { | 1082 | if (drbd_fail_request_early(mdev, bio_data_dir(bio) & WRITE)) { |
| 1083 | bio_endio(bio, -EPERM); | 1083 | bio_endio(bio, -EPERM); |
| 1084 | return 0; | 1084 | return; |
| 1085 | } | 1085 | } |
| 1086 | 1086 | ||
| 1087 | start_time = jiffies; | 1087 | start_time = jiffies; |
| @@ -1100,7 +1100,8 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
| 1100 | 1100 | ||
| 1101 | if (likely(s_enr == e_enr)) { | 1101 | if (likely(s_enr == e_enr)) { |
| 1102 | inc_ap_bio(mdev, 1); | 1102 | inc_ap_bio(mdev, 1); |
| 1103 | return drbd_make_request_common(mdev, bio, start_time); | 1103 | drbd_make_request_common(mdev, bio, start_time); |
| 1104 | return; | ||
| 1104 | } | 1105 | } |
| 1105 | 1106 | ||
| 1106 | /* can this bio be split generically? | 1107 | /* can this bio be split generically? |
| @@ -1148,7 +1149,6 @@ int drbd_make_request(struct request_queue *q, struct bio *bio) | |||
| 1148 | 1149 | ||
| 1149 | bio_pair_release(bp); | 1150 | bio_pair_release(bp); |
| 1150 | } | 1151 | } |
| 1151 | return 0; | ||
| 1152 | } | 1152 | } |
| 1153 | 1153 | ||
| 1154 | /* This is called by bio_add_page(). With this function we reduce | 1154 | /* This is called by bio_add_page(). With this function we reduce |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 4720c7ade0ae..c77983ea86c8 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -203,74 +203,6 @@ lo_do_transfer(struct loop_device *lo, int cmd, | |||
| 203 | } | 203 | } |
| 204 | 204 | ||
| 205 | /** | 205 | /** |
| 206 | * do_lo_send_aops - helper for writing data to a loop device | ||
| 207 | * | ||
| 208 | * This is the fast version for backing filesystems which implement the address | ||
| 209 | * space operations write_begin and write_end. | ||
| 210 | */ | ||
| 211 | static int do_lo_send_aops(struct loop_device *lo, struct bio_vec *bvec, | ||
| 212 | loff_t pos, struct page *unused) | ||
| 213 | { | ||
| 214 | struct file *file = lo->lo_backing_file; /* kudos to NFsckingS */ | ||
| 215 | struct address_space *mapping = file->f_mapping; | ||
| 216 | pgoff_t index; | ||
| 217 | unsigned offset, bv_offs; | ||
| 218 | int len, ret; | ||
| 219 | |||
| 220 | mutex_lock(&mapping->host->i_mutex); | ||
| 221 | index = pos >> PAGE_CACHE_SHIFT; | ||
| 222 | offset = pos & ((pgoff_t)PAGE_CACHE_SIZE - 1); | ||
| 223 | bv_offs = bvec->bv_offset; | ||
| 224 | len = bvec->bv_len; | ||
| 225 | while (len > 0) { | ||
| 226 | sector_t IV; | ||
| 227 | unsigned size, copied; | ||
| 228 | int transfer_result; | ||
| 229 | struct page *page; | ||
| 230 | void *fsdata; | ||
| 231 | |||
| 232 | IV = ((sector_t)index << (PAGE_CACHE_SHIFT - 9))+(offset >> 9); | ||
| 233 | size = PAGE_CACHE_SIZE - offset; | ||
| 234 | if (size > len) | ||
| 235 | size = len; | ||
| 236 | |||
| 237 | ret = pagecache_write_begin(file, mapping, pos, size, 0, | ||
| 238 | &page, &fsdata); | ||
| 239 | if (ret) | ||
| 240 | goto fail; | ||
| 241 | |||
| 242 | file_update_time(file); | ||
| 243 | |||
| 244 | transfer_result = lo_do_transfer(lo, WRITE, page, offset, | ||
| 245 | bvec->bv_page, bv_offs, size, IV); | ||
| 246 | copied = size; | ||
| 247 | if (unlikely(transfer_result)) | ||
| 248 | copied = 0; | ||
| 249 | |||
| 250 | ret = pagecache_write_end(file, mapping, pos, size, copied, | ||
| 251 | page, fsdata); | ||
| 252 | if (ret < 0 || ret != copied) | ||
| 253 | goto fail; | ||
| 254 | |||
| 255 | if (unlikely(transfer_result)) | ||
| 256 | goto fail; | ||
| 257 | |||
| 258 | bv_offs += copied; | ||
| 259 | len -= copied; | ||
| 260 | offset = 0; | ||
| 261 | index++; | ||
| 262 | pos += copied; | ||
| 263 | } | ||
| 264 | ret = 0; | ||
| 265 | out: | ||
| 266 | mutex_unlock(&mapping->host->i_mutex); | ||
| 267 | return ret; | ||
| 268 | fail: | ||
| 269 | ret = -1; | ||
| 270 | goto out; | ||
| 271 | } | ||
| 272 | |||
| 273 | /** | ||
| 274 | * __do_lo_send_write - helper for writing data to a loop device | 206 | * __do_lo_send_write - helper for writing data to a loop device |
| 275 | * | 207 | * |
| 276 | * This helper just factors out common code between do_lo_send_direct_write() | 208 | * This helper just factors out common code between do_lo_send_direct_write() |
| @@ -297,10 +229,8 @@ static int __do_lo_send_write(struct file *file, | |||
| 297 | /** | 229 | /** |
| 298 | * do_lo_send_direct_write - helper for writing data to a loop device | 230 | * do_lo_send_direct_write - helper for writing data to a loop device |
| 299 | * | 231 | * |
| 300 | * This is the fast, non-transforming version for backing filesystems which do | 232 | * This is the fast, non-transforming version that does not need double |
| 301 | * not implement the address space operations write_begin and write_end. | 233 | * buffering. |
| 302 | * It uses the write file operation which should be present on all writeable | ||
| 303 | * filesystems. | ||
| 304 | */ | 234 | */ |
| 305 | static int do_lo_send_direct_write(struct loop_device *lo, | 235 | static int do_lo_send_direct_write(struct loop_device *lo, |
| 306 | struct bio_vec *bvec, loff_t pos, struct page *page) | 236 | struct bio_vec *bvec, loff_t pos, struct page *page) |
| @@ -316,15 +246,9 @@ static int do_lo_send_direct_write(struct loop_device *lo, | |||
| 316 | /** | 246 | /** |
| 317 | * do_lo_send_write - helper for writing data to a loop device | 247 | * do_lo_send_write - helper for writing data to a loop device |
| 318 | * | 248 | * |
| 319 | * This is the slow, transforming version for filesystems which do not | 249 | * This is the slow, transforming version that needs to double buffer the |
| 320 | * implement the address space operations write_begin and write_end. It | 250 | * data as it cannot do the transformations in place without having direct |
| 321 | * uses the write file operation which should be present on all writeable | 251 | * access to the destination pages of the backing file. |
| 322 | * filesystems. | ||
| 323 | * | ||
| 324 | * Using fops->write is slower than using aops->{prepare,commit}_write in the | ||
| 325 | * transforming case because we need to double buffer the data as we cannot do | ||
| 326 | * the transformations in place as we do not have direct access to the | ||
| 327 | * destination pages of the backing file. | ||
| 328 | */ | 252 | */ |
| 329 | static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, | 253 | static int do_lo_send_write(struct loop_device *lo, struct bio_vec *bvec, |
| 330 | loff_t pos, struct page *page) | 254 | loff_t pos, struct page *page) |
| @@ -350,17 +274,16 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos) | |||
| 350 | struct page *page = NULL; | 274 | struct page *page = NULL; |
| 351 | int i, ret = 0; | 275 | int i, ret = 0; |
| 352 | 276 | ||
| 353 | do_lo_send = do_lo_send_aops; | 277 | if (lo->transfer != transfer_none) { |
| 354 | if (!(lo->lo_flags & LO_FLAGS_USE_AOPS)) { | 278 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); |
| 279 | if (unlikely(!page)) | ||
| 280 | goto fail; | ||
| 281 | kmap(page); | ||
| 282 | do_lo_send = do_lo_send_write; | ||
| 283 | } else { | ||
| 355 | do_lo_send = do_lo_send_direct_write; | 284 | do_lo_send = do_lo_send_direct_write; |
| 356 | if (lo->transfer != transfer_none) { | ||
| 357 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); | ||
| 358 | if (unlikely(!page)) | ||
| 359 | goto fail; | ||
| 360 | kmap(page); | ||
| 361 | do_lo_send = do_lo_send_write; | ||
| 362 | } | ||
| 363 | } | 285 | } |
| 286 | |||
| 364 | bio_for_each_segment(bvec, bio, i) { | 287 | bio_for_each_segment(bvec, bio, i) { |
| 365 | ret = do_lo_send(lo, bvec, pos, page); | 288 | ret = do_lo_send(lo, bvec, pos, page); |
| 366 | if (ret < 0) | 289 | if (ret < 0) |
| @@ -514,7 +437,7 @@ static struct bio *loop_get_bio(struct loop_device *lo) | |||
| 514 | return bio_list_pop(&lo->lo_bio_list); | 437 | return bio_list_pop(&lo->lo_bio_list); |
| 515 | } | 438 | } |
| 516 | 439 | ||
| 517 | static int loop_make_request(struct request_queue *q, struct bio *old_bio) | 440 | static void loop_make_request(struct request_queue *q, struct bio *old_bio) |
| 518 | { | 441 | { |
| 519 | struct loop_device *lo = q->queuedata; | 442 | struct loop_device *lo = q->queuedata; |
| 520 | int rw = bio_rw(old_bio); | 443 | int rw = bio_rw(old_bio); |
| @@ -532,12 +455,11 @@ static int loop_make_request(struct request_queue *q, struct bio *old_bio) | |||
| 532 | loop_add_bio(lo, old_bio); | 455 | loop_add_bio(lo, old_bio); |
| 533 | wake_up(&lo->lo_event); | 456 | wake_up(&lo->lo_event); |
| 534 | spin_unlock_irq(&lo->lo_lock); | 457 | spin_unlock_irq(&lo->lo_lock); |
| 535 | return 0; | 458 | return; |
| 536 | 459 | ||
| 537 | out: | 460 | out: |
| 538 | spin_unlock_irq(&lo->lo_lock); | 461 | spin_unlock_irq(&lo->lo_lock); |
| 539 | bio_io_error(old_bio); | 462 | bio_io_error(old_bio); |
| 540 | return 0; | ||
| 541 | } | 463 | } |
| 542 | 464 | ||
| 543 | struct switch_request { | 465 | struct switch_request { |
| @@ -849,35 +771,23 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, | |||
| 849 | mapping = file->f_mapping; | 771 | mapping = file->f_mapping; |
| 850 | inode = mapping->host; | 772 | inode = mapping->host; |
| 851 | 773 | ||
| 852 | if (!(file->f_mode & FMODE_WRITE)) | ||
| 853 | lo_flags |= LO_FLAGS_READ_ONLY; | ||
| 854 | |||
| 855 | error = -EINVAL; | 774 | error = -EINVAL; |
| 856 | if (S_ISREG(inode->i_mode) || S_ISBLK(inode->i_mode)) { | 775 | if (!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode)) |
| 857 | const struct address_space_operations *aops = mapping->a_ops; | 776 | goto out_putf; |
| 858 | |||
| 859 | if (aops->write_begin) | ||
| 860 | lo_flags |= LO_FLAGS_USE_AOPS; | ||
| 861 | if (!(lo_flags & LO_FLAGS_USE_AOPS) && !file->f_op->write) | ||
| 862 | lo_flags |= LO_FLAGS_READ_ONLY; | ||
| 863 | 777 | ||
| 864 | lo_blocksize = S_ISBLK(inode->i_mode) ? | 778 | if (!(file->f_mode & FMODE_WRITE) || !(mode & FMODE_WRITE) || |
| 865 | inode->i_bdev->bd_block_size : PAGE_SIZE; | 779 | !file->f_op->write) |
| 780 | lo_flags |= LO_FLAGS_READ_ONLY; | ||
| 866 | 781 | ||
| 867 | error = 0; | 782 | lo_blocksize = S_ISBLK(inode->i_mode) ? |
| 868 | } else { | 783 | inode->i_bdev->bd_block_size : PAGE_SIZE; |
| 869 | goto out_putf; | ||
| 870 | } | ||
| 871 | 784 | ||
| 785 | error = -EFBIG; | ||
| 872 | size = get_loop_size(lo, file); | 786 | size = get_loop_size(lo, file); |
| 873 | 787 | if ((loff_t)(sector_t)size != size) | |
| 874 | if ((loff_t)(sector_t)size != size) { | ||
| 875 | error = -EFBIG; | ||
| 876 | goto out_putf; | 788 | goto out_putf; |
| 877 | } | ||
| 878 | 789 | ||
| 879 | if (!(mode & FMODE_WRITE)) | 790 | error = 0; |
| 880 | lo_flags |= LO_FLAGS_READ_ONLY; | ||
| 881 | 791 | ||
| 882 | set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); | 792 | set_device_ro(bdev, (lo_flags & LO_FLAGS_READ_ONLY) != 0); |
| 883 | 793 | ||
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index e133f094ab08..a63b0a2b7805 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
| @@ -2444,7 +2444,7 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err) | |||
| 2444 | pkt_bio_finished(pd); | 2444 | pkt_bio_finished(pd); |
| 2445 | } | 2445 | } |
| 2446 | 2446 | ||
| 2447 | static int pkt_make_request(struct request_queue *q, struct bio *bio) | 2447 | static void pkt_make_request(struct request_queue *q, struct bio *bio) |
| 2448 | { | 2448 | { |
| 2449 | struct pktcdvd_device *pd; | 2449 | struct pktcdvd_device *pd; |
| 2450 | char b[BDEVNAME_SIZE]; | 2450 | char b[BDEVNAME_SIZE]; |
| @@ -2473,7 +2473,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
| 2473 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; | 2473 | cloned_bio->bi_end_io = pkt_end_io_read_cloned; |
| 2474 | pd->stats.secs_r += bio->bi_size >> 9; | 2474 | pd->stats.secs_r += bio->bi_size >> 9; |
| 2475 | pkt_queue_bio(pd, cloned_bio); | 2475 | pkt_queue_bio(pd, cloned_bio); |
| 2476 | return 0; | 2476 | return; |
| 2477 | } | 2477 | } |
| 2478 | 2478 | ||
| 2479 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { | 2479 | if (!test_bit(PACKET_WRITABLE, &pd->flags)) { |
| @@ -2509,7 +2509,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
| 2509 | pkt_make_request(q, &bp->bio1); | 2509 | pkt_make_request(q, &bp->bio1); |
| 2510 | pkt_make_request(q, &bp->bio2); | 2510 | pkt_make_request(q, &bp->bio2); |
| 2511 | bio_pair_release(bp); | 2511 | bio_pair_release(bp); |
| 2512 | return 0; | 2512 | return; |
| 2513 | } | 2513 | } |
| 2514 | } | 2514 | } |
| 2515 | 2515 | ||
| @@ -2533,7 +2533,7 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
| 2533 | } | 2533 | } |
| 2534 | spin_unlock(&pkt->lock); | 2534 | spin_unlock(&pkt->lock); |
| 2535 | spin_unlock(&pd->cdrw.active_list_lock); | 2535 | spin_unlock(&pd->cdrw.active_list_lock); |
| 2536 | return 0; | 2536 | return; |
| 2537 | } else { | 2537 | } else { |
| 2538 | blocked_bio = 1; | 2538 | blocked_bio = 1; |
| 2539 | } | 2539 | } |
| @@ -2584,10 +2584,9 @@ static int pkt_make_request(struct request_queue *q, struct bio *bio) | |||
| 2584 | */ | 2584 | */ |
| 2585 | wake_up(&pd->wqueue); | 2585 | wake_up(&pd->wqueue); |
| 2586 | } | 2586 | } |
| 2587 | return 0; | 2587 | return; |
| 2588 | end_io: | 2588 | end_io: |
| 2589 | bio_io_error(bio); | 2589 | bio_io_error(bio); |
| 2590 | return 0; | ||
| 2591 | } | 2590 | } |
| 2592 | 2591 | ||
| 2593 | 2592 | ||
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c index b3bdb8af89cf..7fad7af87eb2 100644 --- a/drivers/block/ps3vram.c +++ b/drivers/block/ps3vram.c | |||
| @@ -596,7 +596,7 @@ out: | |||
| 596 | return next; | 596 | return next; |
| 597 | } | 597 | } |
| 598 | 598 | ||
| 599 | static int ps3vram_make_request(struct request_queue *q, struct bio *bio) | 599 | static void ps3vram_make_request(struct request_queue *q, struct bio *bio) |
| 600 | { | 600 | { |
| 601 | struct ps3_system_bus_device *dev = q->queuedata; | 601 | struct ps3_system_bus_device *dev = q->queuedata; |
| 602 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); | 602 | struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); |
| @@ -610,13 +610,11 @@ static int ps3vram_make_request(struct request_queue *q, struct bio *bio) | |||
| 610 | spin_unlock_irq(&priv->lock); | 610 | spin_unlock_irq(&priv->lock); |
| 611 | 611 | ||
| 612 | if (busy) | 612 | if (busy) |
| 613 | return 0; | 613 | return; |
| 614 | 614 | ||
| 615 | do { | 615 | do { |
| 616 | bio = ps3vram_do_bio(dev, bio); | 616 | bio = ps3vram_do_bio(dev, bio); |
| 617 | } while (bio); | 617 | } while (bio); |
| 618 | |||
| 619 | return 0; | ||
| 620 | } | 618 | } |
| 621 | 619 | ||
| 622 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) | 620 | static int __devinit ps3vram_probe(struct ps3_system_bus_device *dev) |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 031ca720d926..aa2712060bfb 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
| @@ -513,7 +513,7 @@ static void process_page(unsigned long data) | |||
| 513 | } | 513 | } |
| 514 | } | 514 | } |
| 515 | 515 | ||
| 516 | static int mm_make_request(struct request_queue *q, struct bio *bio) | 516 | static void mm_make_request(struct request_queue *q, struct bio *bio) |
| 517 | { | 517 | { |
| 518 | struct cardinfo *card = q->queuedata; | 518 | struct cardinfo *card = q->queuedata; |
| 519 | pr_debug("mm_make_request %llu %u\n", | 519 | pr_debug("mm_make_request %llu %u\n", |
| @@ -525,7 +525,7 @@ static int mm_make_request(struct request_queue *q, struct bio *bio) | |||
| 525 | card->biotail = &bio->bi_next; | 525 | card->biotail = &bio->bi_next; |
| 526 | spin_unlock_irq(&card->lock); | 526 | spin_unlock_irq(&card->lock); |
| 527 | 527 | ||
| 528 | return 0; | 528 | return; |
| 529 | } | 529 | } |
| 530 | 530 | ||
| 531 | static irqreturn_t mm_interrupt(int irq, void *__card) | 531 | static irqreturn_t mm_interrupt(int irq, void *__card) |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 6b6616a41baa..4720f68f817e 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -192,9 +192,6 @@ struct mapped_device { | |||
| 192 | /* forced geometry settings */ | 192 | /* forced geometry settings */ |
| 193 | struct hd_geometry geometry; | 193 | struct hd_geometry geometry; |
| 194 | 194 | ||
| 195 | /* For saving the address of __make_request for request based dm */ | ||
| 196 | make_request_fn *saved_make_request_fn; | ||
| 197 | |||
| 198 | /* sysfs handle */ | 195 | /* sysfs handle */ |
| 199 | struct kobject kobj; | 196 | struct kobject kobj; |
| 200 | 197 | ||
| @@ -1403,7 +1400,7 @@ out: | |||
| 1403 | * The request function that just remaps the bio built up by | 1400 | * The request function that just remaps the bio built up by |
| 1404 | * dm_merge_bvec. | 1401 | * dm_merge_bvec. |
| 1405 | */ | 1402 | */ |
| 1406 | static int _dm_request(struct request_queue *q, struct bio *bio) | 1403 | static void _dm_request(struct request_queue *q, struct bio *bio) |
| 1407 | { | 1404 | { |
| 1408 | int rw = bio_data_dir(bio); | 1405 | int rw = bio_data_dir(bio); |
| 1409 | struct mapped_device *md = q->queuedata; | 1406 | struct mapped_device *md = q->queuedata; |
| @@ -1424,19 +1421,12 @@ static int _dm_request(struct request_queue *q, struct bio *bio) | |||
| 1424 | queue_io(md, bio); | 1421 | queue_io(md, bio); |
| 1425 | else | 1422 | else |
| 1426 | bio_io_error(bio); | 1423 | bio_io_error(bio); |
| 1427 | return 0; | 1424 | return; |
| 1428 | } | 1425 | } |
| 1429 | 1426 | ||
| 1430 | __split_and_process_bio(md, bio); | 1427 | __split_and_process_bio(md, bio); |
| 1431 | up_read(&md->io_lock); | 1428 | up_read(&md->io_lock); |
| 1432 | return 0; | 1429 | return; |
| 1433 | } | ||
| 1434 | |||
| 1435 | static int dm_make_request(struct request_queue *q, struct bio *bio) | ||
| 1436 | { | ||
| 1437 | struct mapped_device *md = q->queuedata; | ||
| 1438 | |||
| 1439 | return md->saved_make_request_fn(q, bio); /* call __make_request() */ | ||
| 1440 | } | 1430 | } |
| 1441 | 1431 | ||
| 1442 | static int dm_request_based(struct mapped_device *md) | 1432 | static int dm_request_based(struct mapped_device *md) |
| @@ -1444,14 +1434,14 @@ static int dm_request_based(struct mapped_device *md) | |||
| 1444 | return blk_queue_stackable(md->queue); | 1434 | return blk_queue_stackable(md->queue); |
| 1445 | } | 1435 | } |
| 1446 | 1436 | ||
| 1447 | static int dm_request(struct request_queue *q, struct bio *bio) | 1437 | static void dm_request(struct request_queue *q, struct bio *bio) |
| 1448 | { | 1438 | { |
| 1449 | struct mapped_device *md = q->queuedata; | 1439 | struct mapped_device *md = q->queuedata; |
| 1450 | 1440 | ||
| 1451 | if (dm_request_based(md)) | 1441 | if (dm_request_based(md)) |
| 1452 | return dm_make_request(q, bio); | 1442 | blk_queue_bio(q, bio); |
| 1453 | 1443 | else | |
| 1454 | return _dm_request(q, bio); | 1444 | _dm_request(q, bio); |
| 1455 | } | 1445 | } |
| 1456 | 1446 | ||
| 1457 | void dm_dispatch_request(struct request *rq) | 1447 | void dm_dispatch_request(struct request *rq) |
| @@ -2191,7 +2181,6 @@ static int dm_init_request_based_queue(struct mapped_device *md) | |||
| 2191 | return 0; | 2181 | return 0; |
| 2192 | 2182 | ||
| 2193 | md->queue = q; | 2183 | md->queue = q; |
| 2194 | md->saved_make_request_fn = md->queue->make_request_fn; | ||
| 2195 | dm_init_md_queue(md); | 2184 | dm_init_md_queue(md); |
| 2196 | blk_queue_softirq_done(md->queue, dm_softirq_done); | 2185 | blk_queue_softirq_done(md->queue, dm_softirq_done); |
| 2197 | blk_queue_prep_rq(md->queue, dm_prep_fn); | 2186 | blk_queue_prep_rq(md->queue, dm_prep_fn); |
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 60816b132c2e..918fb8ac6607 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
| @@ -169,7 +169,7 @@ static void add_sector(struct faulty_conf *conf, sector_t start, int mode) | |||
| 169 | conf->nfaults = n+1; | 169 | conf->nfaults = n+1; |
| 170 | } | 170 | } |
| 171 | 171 | ||
| 172 | static int make_request(struct mddev *mddev, struct bio *bio) | 172 | static void make_request(struct mddev *mddev, struct bio *bio) |
| 173 | { | 173 | { |
| 174 | struct faulty_conf *conf = mddev->private; | 174 | struct faulty_conf *conf = mddev->private; |
| 175 | int failit = 0; | 175 | int failit = 0; |
| @@ -181,7 +181,7 @@ static int make_request(struct mddev *mddev, struct bio *bio) | |||
| 181 | * just fail immediately | 181 | * just fail immediately |
| 182 | */ | 182 | */ |
| 183 | bio_endio(bio, -EIO); | 183 | bio_endio(bio, -EIO); |
| 184 | return 0; | 184 | return; |
| 185 | } | 185 | } |
| 186 | 186 | ||
| 187 | if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), | 187 | if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), |
| @@ -211,15 +211,15 @@ static int make_request(struct mddev *mddev, struct bio *bio) | |||
| 211 | } | 211 | } |
| 212 | if (failit) { | 212 | if (failit) { |
| 213 | struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); | 213 | struct bio *b = bio_clone_mddev(bio, GFP_NOIO, mddev); |
| 214 | |||
| 214 | b->bi_bdev = conf->rdev->bdev; | 215 | b->bi_bdev = conf->rdev->bdev; |
| 215 | b->bi_private = bio; | 216 | b->bi_private = bio; |
| 216 | b->bi_end_io = faulty_fail; | 217 | b->bi_end_io = faulty_fail; |
| 217 | generic_make_request(b); | 218 | bio = b; |
| 218 | return 0; | 219 | } else |
| 219 | } else { | ||
| 220 | bio->bi_bdev = conf->rdev->bdev; | 220 | bio->bi_bdev = conf->rdev->bdev; |
| 221 | return 1; | 221 | |
| 222 | } | 222 | generic_make_request(bio); |
| 223 | } | 223 | } |
| 224 | 224 | ||
| 225 | static void status(struct seq_file *seq, struct mddev *mddev) | 225 | static void status(struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index 10c5844460cb..a82035867519 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
| @@ -264,14 +264,14 @@ static int linear_stop (struct mddev *mddev) | |||
| 264 | return 0; | 264 | return 0; |
| 265 | } | 265 | } |
| 266 | 266 | ||
| 267 | static int linear_make_request (struct mddev *mddev, struct bio *bio) | 267 | static void linear_make_request(struct mddev *mddev, struct bio *bio) |
| 268 | { | 268 | { |
| 269 | struct dev_info *tmp_dev; | 269 | struct dev_info *tmp_dev; |
| 270 | sector_t start_sector; | 270 | sector_t start_sector; |
| 271 | 271 | ||
| 272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 272 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 273 | md_flush_request(mddev, bio); | 273 | md_flush_request(mddev, bio); |
| 274 | return 0; | 274 | return; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | rcu_read_lock(); | 277 | rcu_read_lock(); |
| @@ -293,7 +293,7 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio) | |||
| 293 | (unsigned long long)start_sector); | 293 | (unsigned long long)start_sector); |
| 294 | rcu_read_unlock(); | 294 | rcu_read_unlock(); |
| 295 | bio_io_error(bio); | 295 | bio_io_error(bio); |
| 296 | return 0; | 296 | return; |
| 297 | } | 297 | } |
| 298 | if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > | 298 | if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > |
| 299 | tmp_dev->end_sector)) { | 299 | tmp_dev->end_sector)) { |
| @@ -307,20 +307,17 @@ static int linear_make_request (struct mddev *mddev, struct bio *bio) | |||
| 307 | 307 | ||
| 308 | bp = bio_split(bio, end_sector - bio->bi_sector); | 308 | bp = bio_split(bio, end_sector - bio->bi_sector); |
| 309 | 309 | ||
| 310 | if (linear_make_request(mddev, &bp->bio1)) | 310 | linear_make_request(mddev, &bp->bio1); |
| 311 | generic_make_request(&bp->bio1); | 311 | linear_make_request(mddev, &bp->bio2); |
| 312 | if (linear_make_request(mddev, &bp->bio2)) | ||
| 313 | generic_make_request(&bp->bio2); | ||
| 314 | bio_pair_release(bp); | 312 | bio_pair_release(bp); |
| 315 | return 0; | 313 | return; |
| 316 | } | 314 | } |
| 317 | 315 | ||
| 318 | bio->bi_bdev = tmp_dev->rdev->bdev; | 316 | bio->bi_bdev = tmp_dev->rdev->bdev; |
| 319 | bio->bi_sector = bio->bi_sector - start_sector | 317 | bio->bi_sector = bio->bi_sector - start_sector |
| 320 | + tmp_dev->rdev->data_offset; | 318 | + tmp_dev->rdev->data_offset; |
| 321 | rcu_read_unlock(); | 319 | rcu_read_unlock(); |
| 322 | 320 | generic_make_request(bio); | |
| 323 | return 1; | ||
| 324 | } | 321 | } |
| 325 | 322 | ||
| 326 | static void linear_status (struct seq_file *seq, struct mddev *mddev) | 323 | static void linear_status (struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 266e82ebaf11..2acb32827fde 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -332,18 +332,17 @@ static DEFINE_SPINLOCK(all_mddevs_lock); | |||
| 332 | * call has finished, the bio has been linked into some internal structure | 332 | * call has finished, the bio has been linked into some internal structure |
| 333 | * and so is visible to ->quiesce(), so we don't need the refcount any more. | 333 | * and so is visible to ->quiesce(), so we don't need the refcount any more. |
| 334 | */ | 334 | */ |
| 335 | static int md_make_request(struct request_queue *q, struct bio *bio) | 335 | static void md_make_request(struct request_queue *q, struct bio *bio) |
| 336 | { | 336 | { |
| 337 | const int rw = bio_data_dir(bio); | 337 | const int rw = bio_data_dir(bio); |
| 338 | struct mddev *mddev = q->queuedata; | 338 | struct mddev *mddev = q->queuedata; |
| 339 | int rv; | ||
| 340 | int cpu; | 339 | int cpu; |
| 341 | unsigned int sectors; | 340 | unsigned int sectors; |
| 342 | 341 | ||
| 343 | if (mddev == NULL || mddev->pers == NULL | 342 | if (mddev == NULL || mddev->pers == NULL |
| 344 | || !mddev->ready) { | 343 | || !mddev->ready) { |
| 345 | bio_io_error(bio); | 344 | bio_io_error(bio); |
| 346 | return 0; | 345 | return; |
| 347 | } | 346 | } |
| 348 | smp_rmb(); /* Ensure implications of 'active' are visible */ | 347 | smp_rmb(); /* Ensure implications of 'active' are visible */ |
| 349 | rcu_read_lock(); | 348 | rcu_read_lock(); |
| @@ -368,7 +367,7 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
| 368 | * go away inside make_request | 367 | * go away inside make_request |
| 369 | */ | 368 | */ |
| 370 | sectors = bio_sectors(bio); | 369 | sectors = bio_sectors(bio); |
| 371 | rv = mddev->pers->make_request(mddev, bio); | 370 | mddev->pers->make_request(mddev, bio); |
| 372 | 371 | ||
| 373 | cpu = part_stat_lock(); | 372 | cpu = part_stat_lock(); |
| 374 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); | 373 | part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); |
| @@ -377,8 +376,6 @@ static int md_make_request(struct request_queue *q, struct bio *bio) | |||
| 377 | 376 | ||
| 378 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) | 377 | if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) |
| 379 | wake_up(&mddev->sb_wait); | 378 | wake_up(&mddev->sb_wait); |
| 380 | |||
| 381 | return rv; | ||
| 382 | } | 379 | } |
| 383 | 380 | ||
| 384 | /* mddev_suspend makes sure no new requests are submitted | 381 | /* mddev_suspend makes sure no new requests are submitted |
| @@ -477,8 +474,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
| 477 | bio_endio(bio, 0); | 474 | bio_endio(bio, 0); |
| 478 | else { | 475 | else { |
| 479 | bio->bi_rw &= ~REQ_FLUSH; | 476 | bio->bi_rw &= ~REQ_FLUSH; |
| 480 | if (mddev->pers->make_request(mddev, bio)) | 477 | mddev->pers->make_request(mddev, bio); |
| 481 | generic_make_request(bio); | ||
| 482 | } | 478 | } |
| 483 | 479 | ||
| 484 | mddev->flush_bio = NULL; | 480 | mddev->flush_bio = NULL; |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 51c1d91557e0..cf742d9306ec 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
| @@ -419,7 +419,7 @@ struct md_personality | |||
| 419 | int level; | 419 | int level; |
| 420 | struct list_head list; | 420 | struct list_head list; |
| 421 | struct module *owner; | 421 | struct module *owner; |
| 422 | int (*make_request)(struct mddev *mddev, struct bio *bio); | 422 | void (*make_request)(struct mddev *mddev, struct bio *bio); |
| 423 | int (*run)(struct mddev *mddev); | 423 | int (*run)(struct mddev *mddev); |
| 424 | int (*stop)(struct mddev *mddev); | 424 | int (*stop)(struct mddev *mddev); |
| 425 | void (*status)(struct seq_file *seq, struct mddev *mddev); | 425 | void (*status)(struct seq_file *seq, struct mddev *mddev); |
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index d32c785e17d4..ad20a28fbf2a 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
| @@ -106,7 +106,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
| 106 | rdev_dec_pending(rdev, conf->mddev); | 106 | rdev_dec_pending(rdev, conf->mddev); |
| 107 | } | 107 | } |
| 108 | 108 | ||
| 109 | static int multipath_make_request(struct mddev *mddev, struct bio * bio) | 109 | static void multipath_make_request(struct mddev *mddev, struct bio * bio) |
| 110 | { | 110 | { |
| 111 | struct mpconf *conf = mddev->private; | 111 | struct mpconf *conf = mddev->private; |
| 112 | struct multipath_bh * mp_bh; | 112 | struct multipath_bh * mp_bh; |
| @@ -114,7 +114,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
| 114 | 114 | ||
| 115 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 115 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 116 | md_flush_request(mddev, bio); | 116 | md_flush_request(mddev, bio); |
| 117 | return 0; | 117 | return; |
| 118 | } | 118 | } |
| 119 | 119 | ||
| 120 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); | 120 | mp_bh = mempool_alloc(conf->pool, GFP_NOIO); |
| @@ -126,7 +126,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
| 126 | if (mp_bh->path < 0) { | 126 | if (mp_bh->path < 0) { |
| 127 | bio_endio(bio, -EIO); | 127 | bio_endio(bio, -EIO); |
| 128 | mempool_free(mp_bh, conf->pool); | 128 | mempool_free(mp_bh, conf->pool); |
| 129 | return 0; | 129 | return; |
| 130 | } | 130 | } |
| 131 | multipath = conf->multipaths + mp_bh->path; | 131 | multipath = conf->multipaths + mp_bh->path; |
| 132 | 132 | ||
| @@ -137,7 +137,7 @@ static int multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
| 137 | mp_bh->bio.bi_end_io = multipath_end_request; | 137 | mp_bh->bio.bi_end_io = multipath_end_request; |
| 138 | mp_bh->bio.bi_private = mp_bh; | 138 | mp_bh->bio.bi_private = mp_bh; |
| 139 | generic_make_request(&mp_bh->bio); | 139 | generic_make_request(&mp_bh->bio); |
| 140 | return 0; | 140 | return; |
| 141 | } | 141 | } |
| 142 | 142 | ||
| 143 | static void multipath_status (struct seq_file *seq, struct mddev *mddev) | 143 | static void multipath_status (struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index 0eb08a4df759..27e19e2b51d4 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
| @@ -468,7 +468,7 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, | |||
| 468 | } | 468 | } |
| 469 | } | 469 | } |
| 470 | 470 | ||
| 471 | static int raid0_make_request(struct mddev *mddev, struct bio *bio) | 471 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
| 472 | { | 472 | { |
| 473 | unsigned int chunk_sects; | 473 | unsigned int chunk_sects; |
| 474 | sector_t sector_offset; | 474 | sector_t sector_offset; |
| @@ -477,7 +477,7 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 477 | 477 | ||
| 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 478 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 479 | md_flush_request(mddev, bio); | 479 | md_flush_request(mddev, bio); |
| 480 | return 0; | 480 | return; |
| 481 | } | 481 | } |
| 482 | 482 | ||
| 483 | chunk_sects = mddev->chunk_sectors; | 483 | chunk_sects = mddev->chunk_sectors; |
| @@ -497,13 +497,10 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 497 | else | 497 | else |
| 498 | bp = bio_split(bio, chunk_sects - | 498 | bp = bio_split(bio, chunk_sects - |
| 499 | sector_div(sector, chunk_sects)); | 499 | sector_div(sector, chunk_sects)); |
| 500 | if (raid0_make_request(mddev, &bp->bio1)) | 500 | raid0_make_request(mddev, &bp->bio1); |
| 501 | generic_make_request(&bp->bio1); | 501 | raid0_make_request(mddev, &bp->bio2); |
| 502 | if (raid0_make_request(mddev, &bp->bio2)) | ||
| 503 | generic_make_request(&bp->bio2); | ||
| 504 | |||
| 505 | bio_pair_release(bp); | 502 | bio_pair_release(bp); |
| 506 | return 0; | 503 | return; |
| 507 | } | 504 | } |
| 508 | 505 | ||
| 509 | sector_offset = bio->bi_sector; | 506 | sector_offset = bio->bi_sector; |
| @@ -513,10 +510,9 @@ static int raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
| 513 | bio->bi_bdev = tmp_dev->bdev; | 510 | bio->bi_bdev = tmp_dev->bdev; |
| 514 | bio->bi_sector = sector_offset + zone->dev_start + | 511 | bio->bi_sector = sector_offset + zone->dev_start + |
| 515 | tmp_dev->data_offset; | 512 | tmp_dev->data_offset; |
| 516 | /* | 513 | |
| 517 | * Let the main block layer submit the IO and resolve recursion: | 514 | generic_make_request(bio); |
| 518 | */ | 515 | return; |
| 519 | return 1; | ||
| 520 | 516 | ||
| 521 | bad_map: | 517 | bad_map: |
| 522 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" | 518 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" |
| @@ -525,7 +521,7 @@ bad_map: | |||
| 525 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 521 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
| 526 | 522 | ||
| 527 | bio_io_error(bio); | 523 | bio_io_error(bio); |
| 528 | return 0; | 524 | return; |
| 529 | } | 525 | } |
| 530 | 526 | ||
| 531 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) | 527 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4602fc57c961..cae874646d9e 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
| @@ -807,7 +807,7 @@ do_sync_io: | |||
| 807 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 807 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); |
| 808 | } | 808 | } |
| 809 | 809 | ||
| 810 | static int make_request(struct mddev *mddev, struct bio * bio) | 810 | static void make_request(struct mddev *mddev, struct bio * bio) |
| 811 | { | 811 | { |
| 812 | struct r1conf *conf = mddev->private; | 812 | struct r1conf *conf = mddev->private; |
| 813 | struct mirror_info *mirror; | 813 | struct mirror_info *mirror; |
| @@ -892,7 +892,7 @@ read_again: | |||
| 892 | if (rdisk < 0) { | 892 | if (rdisk < 0) { |
| 893 | /* couldn't find anywhere to read from */ | 893 | /* couldn't find anywhere to read from */ |
| 894 | raid_end_bio_io(r1_bio); | 894 | raid_end_bio_io(r1_bio); |
| 895 | return 0; | 895 | return; |
| 896 | } | 896 | } |
| 897 | mirror = conf->mirrors + rdisk; | 897 | mirror = conf->mirrors + rdisk; |
| 898 | 898 | ||
| @@ -950,7 +950,7 @@ read_again: | |||
| 950 | goto read_again; | 950 | goto read_again; |
| 951 | } else | 951 | } else |
| 952 | generic_make_request(read_bio); | 952 | generic_make_request(read_bio); |
| 953 | return 0; | 953 | return; |
| 954 | } | 954 | } |
| 955 | 955 | ||
| 956 | /* | 956 | /* |
| @@ -1151,8 +1151,6 @@ read_again: | |||
| 1151 | 1151 | ||
| 1152 | if (do_sync || !bitmap || !plugged) | 1152 | if (do_sync || !bitmap || !plugged) |
| 1153 | md_wakeup_thread(mddev->thread); | 1153 | md_wakeup_thread(mddev->thread); |
| 1154 | |||
| 1155 | return 0; | ||
| 1156 | } | 1154 | } |
| 1157 | 1155 | ||
| 1158 | static void status(struct seq_file *seq, struct mddev *mddev) | 1156 | static void status(struct seq_file *seq, struct mddev *mddev) |
| @@ -2193,7 +2191,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
| 2193 | bio->bi_next = NULL; | 2191 | bio->bi_next = NULL; |
| 2194 | bio->bi_flags &= ~(BIO_POOL_MASK-1); | 2192 | bio->bi_flags &= ~(BIO_POOL_MASK-1); |
| 2195 | bio->bi_flags |= 1 << BIO_UPTODATE; | 2193 | bio->bi_flags |= 1 << BIO_UPTODATE; |
| 2196 | bio->bi_comp_cpu = -1; | ||
| 2197 | bio->bi_rw = READ; | 2194 | bio->bi_rw = READ; |
| 2198 | bio->bi_vcnt = 0; | 2195 | bio->bi_vcnt = 0; |
| 2199 | bio->bi_idx = 0; | 2196 | bio->bi_idx = 0; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c025a8276dc1..dde6dd4b47ec 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
| @@ -842,7 +842,7 @@ static void unfreeze_array(struct r10conf *conf) | |||
| 842 | spin_unlock_irq(&conf->resync_lock); | 842 | spin_unlock_irq(&conf->resync_lock); |
| 843 | } | 843 | } |
| 844 | 844 | ||
| 845 | static int make_request(struct mddev *mddev, struct bio * bio) | 845 | static void make_request(struct mddev *mddev, struct bio * bio) |
| 846 | { | 846 | { |
| 847 | struct r10conf *conf = mddev->private; | 847 | struct r10conf *conf = mddev->private; |
| 848 | struct mirror_info *mirror; | 848 | struct mirror_info *mirror; |
| @@ -861,7 +861,7 @@ static int make_request(struct mddev *mddev, struct bio * bio) | |||
| 861 | 861 | ||
| 862 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 862 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
| 863 | md_flush_request(mddev, bio); | 863 | md_flush_request(mddev, bio); |
| 864 | return 0; | 864 | return; |
| 865 | } | 865 | } |
| 866 | 866 | ||
| 867 | /* If this request crosses a chunk boundary, we need to | 867 | /* If this request crosses a chunk boundary, we need to |
| @@ -893,10 +893,8 @@ static int make_request(struct mddev *mddev, struct bio * bio) | |||
| 893 | conf->nr_waiting++; | 893 | conf->nr_waiting++; |
| 894 | spin_unlock_irq(&conf->resync_lock); | 894 | spin_unlock_irq(&conf->resync_lock); |
| 895 | 895 | ||
| 896 | if (make_request(mddev, &bp->bio1)) | 896 | make_request(mddev, &bp->bio1); |
| 897 | generic_make_request(&bp->bio1); | 897 | make_request(mddev, &bp->bio2); |
| 898 | if (make_request(mddev, &bp->bio2)) | ||
| 899 | generic_make_request(&bp->bio2); | ||
| 900 | 898 | ||
| 901 | spin_lock_irq(&conf->resync_lock); | 899 | spin_lock_irq(&conf->resync_lock); |
| 902 | conf->nr_waiting--; | 900 | conf->nr_waiting--; |
| @@ -904,14 +902,14 @@ static int make_request(struct mddev *mddev, struct bio * bio) | |||
| 904 | spin_unlock_irq(&conf->resync_lock); | 902 | spin_unlock_irq(&conf->resync_lock); |
| 905 | 903 | ||
| 906 | bio_pair_release(bp); | 904 | bio_pair_release(bp); |
| 907 | return 0; | 905 | return; |
| 908 | bad_map: | 906 | bad_map: |
| 909 | printk("md/raid10:%s: make_request bug: can't convert block across chunks" | 907 | printk("md/raid10:%s: make_request bug: can't convert block across chunks" |
| 910 | " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, | 908 | " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, |
| 911 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); | 909 | (unsigned long long)bio->bi_sector, bio->bi_size >> 10); |
| 912 | 910 | ||
| 913 | bio_io_error(bio); | 911 | bio_io_error(bio); |
| 914 | return 0; | 912 | return; |
| 915 | } | 913 | } |
| 916 | 914 | ||
| 917 | md_write_start(mddev, bio); | 915 | md_write_start(mddev, bio); |
| @@ -954,7 +952,7 @@ read_again: | |||
| 954 | slot = r10_bio->read_slot; | 952 | slot = r10_bio->read_slot; |
| 955 | if (disk < 0) { | 953 | if (disk < 0) { |
| 956 | raid_end_bio_io(r10_bio); | 954 | raid_end_bio_io(r10_bio); |
| 957 | return 0; | 955 | return; |
| 958 | } | 956 | } |
| 959 | mirror = conf->mirrors + disk; | 957 | mirror = conf->mirrors + disk; |
| 960 | 958 | ||
| @@ -1002,7 +1000,7 @@ read_again: | |||
| 1002 | goto read_again; | 1000 | goto read_again; |
| 1003 | } else | 1001 | } else |
| 1004 | generic_make_request(read_bio); | 1002 | generic_make_request(read_bio); |
| 1005 | return 0; | 1003 | return; |
| 1006 | } | 1004 | } |
| 1007 | 1005 | ||
| 1008 | /* | 1006 | /* |
| @@ -1176,7 +1174,6 @@ retry_write: | |||
| 1176 | 1174 | ||
| 1177 | if (do_sync || !mddev->bitmap || !plugged) | 1175 | if (do_sync || !mddev->bitmap || !plugged) |
| 1178 | md_wakeup_thread(mddev->thread); | 1176 | md_wakeup_thread(mddev->thread); |
| 1179 | return 0; | ||
| 1180 | } | 1177 | } |
| 1181 | 1178 | ||
| 1182 | static void status(struct seq_file *seq, struct mddev *mddev) | 1179 | static void status(struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index f6fe053a5bed..bb1b46143fb6 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
| @@ -3688,7 +3688,7 @@ static struct stripe_head *__get_priority_stripe(struct r5conf *conf) | |||
| 3688 | return sh; | 3688 | return sh; |
| 3689 | } | 3689 | } |
| 3690 | 3690 | ||
| 3691 | static int make_request(struct mddev *mddev, struct bio * bi) | 3691 | static void make_request(struct mddev *mddev, struct bio * bi) |
| 3692 | { | 3692 | { |
| 3693 | struct r5conf *conf = mddev->private; | 3693 | struct r5conf *conf = mddev->private; |
| 3694 | int dd_idx; | 3694 | int dd_idx; |
| @@ -3701,7 +3701,7 @@ static int make_request(struct mddev *mddev, struct bio * bi) | |||
| 3701 | 3701 | ||
| 3702 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { | 3702 | if (unlikely(bi->bi_rw & REQ_FLUSH)) { |
| 3703 | md_flush_request(mddev, bi); | 3703 | md_flush_request(mddev, bi); |
| 3704 | return 0; | 3704 | return; |
| 3705 | } | 3705 | } |
| 3706 | 3706 | ||
| 3707 | md_write_start(mddev, bi); | 3707 | md_write_start(mddev, bi); |
| @@ -3709,7 +3709,7 @@ static int make_request(struct mddev *mddev, struct bio * bi) | |||
| 3709 | if (rw == READ && | 3709 | if (rw == READ && |
| 3710 | mddev->reshape_position == MaxSector && | 3710 | mddev->reshape_position == MaxSector && |
| 3711 | chunk_aligned_read(mddev,bi)) | 3711 | chunk_aligned_read(mddev,bi)) |
| 3712 | return 0; | 3712 | return; |
| 3713 | 3713 | ||
| 3714 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 3714 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
| 3715 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 3715 | last_sector = bi->bi_sector + (bi->bi_size>>9); |
| @@ -3844,8 +3844,6 @@ static int make_request(struct mddev *mddev, struct bio * bi) | |||
| 3844 | 3844 | ||
| 3845 | bio_endio(bi, 0); | 3845 | bio_endio(bi, 0); |
| 3846 | } | 3846 | } |
| 3847 | |||
| 3848 | return 0; | ||
| 3849 | } | 3847 | } |
| 3850 | 3848 | ||
| 3851 | static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); | 3849 | static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 9b43ae94beba..a5a55da2a1ac 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
| @@ -27,7 +27,7 @@ | |||
| 27 | 27 | ||
| 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); | 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); |
| 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); |
| 30 | static int dcssblk_make_request(struct request_queue *q, struct bio *bio); | 30 | static void dcssblk_make_request(struct request_queue *q, struct bio *bio); |
| 31 | static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, | 31 | static int dcssblk_direct_access(struct block_device *bdev, sector_t secnum, |
| 32 | void **kaddr, unsigned long *pfn); | 32 | void **kaddr, unsigned long *pfn); |
| 33 | 33 | ||
| @@ -814,7 +814,7 @@ out: | |||
| 814 | return rc; | 814 | return rc; |
| 815 | } | 815 | } |
| 816 | 816 | ||
| 817 | static int | 817 | static void |
| 818 | dcssblk_make_request(struct request_queue *q, struct bio *bio) | 818 | dcssblk_make_request(struct request_queue *q, struct bio *bio) |
| 819 | { | 819 | { |
| 820 | struct dcssblk_dev_info *dev_info; | 820 | struct dcssblk_dev_info *dev_info; |
| @@ -871,10 +871,9 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
| 871 | bytes_done += bvec->bv_len; | 871 | bytes_done += bvec->bv_len; |
| 872 | } | 872 | } |
| 873 | bio_endio(bio, 0); | 873 | bio_endio(bio, 0); |
| 874 | return 0; | 874 | return; |
| 875 | fail: | 875 | fail: |
| 876 | bio_io_error(bio); | 876 | bio_io_error(bio); |
| 877 | return 0; | ||
| 878 | } | 877 | } |
| 879 | 878 | ||
| 880 | static int | 879 | static int |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 1f6a4d894e73..98f3e4ade924 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
| @@ -181,7 +181,7 @@ static unsigned long xpram_highest_page_index(void) | |||
| 181 | /* | 181 | /* |
| 182 | * Block device make request function. | 182 | * Block device make request function. |
| 183 | */ | 183 | */ |
| 184 | static int xpram_make_request(struct request_queue *q, struct bio *bio) | 184 | static void xpram_make_request(struct request_queue *q, struct bio *bio) |
| 185 | { | 185 | { |
| 186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; | 186 | xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; |
| 187 | struct bio_vec *bvec; | 187 | struct bio_vec *bvec; |
| @@ -221,10 +221,9 @@ static int xpram_make_request(struct request_queue *q, struct bio *bio) | |||
| 221 | } | 221 | } |
| 222 | set_bit(BIO_UPTODATE, &bio->bi_flags); | 222 | set_bit(BIO_UPTODATE, &bio->bi_flags); |
| 223 | bio_endio(bio, 0); | 223 | bio_endio(bio, 0); |
| 224 | return 0; | 224 | return; |
| 225 | fail: | 225 | fail: |
| 226 | bio_io_error(bio); | 226 | bio_io_error(bio); |
| 227 | return 0; | ||
| 228 | } | 227 | } |
| 229 | 228 | ||
| 230 | static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) | 229 | static int xpram_getgeo(struct block_device *bdev, struct hd_geometry *geo) |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index b9926ee0052c..09de99fbb7e0 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
| @@ -556,7 +556,7 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio) | |||
| 556 | /* | 556 | /* |
| 557 | * Handler function for all zram I/O requests. | 557 | * Handler function for all zram I/O requests. |
| 558 | */ | 558 | */ |
| 559 | static int zram_make_request(struct request_queue *queue, struct bio *bio) | 559 | static void zram_make_request(struct request_queue *queue, struct bio *bio) |
| 560 | { | 560 | { |
| 561 | struct zram *zram = queue->queuedata; | 561 | struct zram *zram = queue->queuedata; |
| 562 | 562 | ||
| @@ -575,13 +575,12 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio) | |||
| 575 | __zram_make_request(zram, bio, bio_data_dir(bio)); | 575 | __zram_make_request(zram, bio, bio_data_dir(bio)); |
| 576 | up_read(&zram->init_lock); | 576 | up_read(&zram->init_lock); |
| 577 | 577 | ||
| 578 | return 0; | 578 | return; |
| 579 | 579 | ||
| 580 | error_unlock: | 580 | error_unlock: |
| 581 | up_read(&zram->init_lock); | 581 | up_read(&zram->init_lock); |
| 582 | error: | 582 | error: |
| 583 | bio_io_error(bio); | 583 | bio_io_error(bio); |
| 584 | return 0; | ||
| 585 | } | 584 | } |
| 586 | 585 | ||
| 587 | void __zram_reset_device(struct zram *zram) | 586 | void __zram_reset_device(struct zram *zram) |
| @@ -255,7 +255,6 @@ void bio_init(struct bio *bio) | |||
| 255 | { | 255 | { |
| 256 | memset(bio, 0, sizeof(*bio)); | 256 | memset(bio, 0, sizeof(*bio)); |
| 257 | bio->bi_flags = 1 << BIO_UPTODATE; | 257 | bio->bi_flags = 1 << BIO_UPTODATE; |
| 258 | bio->bi_comp_cpu = -1; | ||
| 259 | atomic_set(&bio->bi_cnt, 1); | 258 | atomic_set(&bio->bi_cnt, 1); |
| 260 | } | 259 | } |
| 261 | EXPORT_SYMBOL(bio_init); | 260 | EXPORT_SYMBOL(bio_init); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 95f786ec7f08..1c44b8d54504 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -1085,6 +1085,7 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part); | |||
| 1085 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | 1085 | static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) |
| 1086 | { | 1086 | { |
| 1087 | struct gendisk *disk; | 1087 | struct gendisk *disk; |
| 1088 | struct module *owner; | ||
| 1088 | int ret; | 1089 | int ret; |
| 1089 | int partno; | 1090 | int partno; |
| 1090 | int perm = 0; | 1091 | int perm = 0; |
| @@ -1110,6 +1111,7 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1110 | disk = get_gendisk(bdev->bd_dev, &partno); | 1111 | disk = get_gendisk(bdev->bd_dev, &partno); |
| 1111 | if (!disk) | 1112 | if (!disk) |
| 1112 | goto out; | 1113 | goto out; |
| 1114 | owner = disk->fops->owner; | ||
| 1113 | 1115 | ||
| 1114 | disk_block_events(disk); | 1116 | disk_block_events(disk); |
| 1115 | mutex_lock_nested(&bdev->bd_mutex, for_part); | 1117 | mutex_lock_nested(&bdev->bd_mutex, for_part); |
| @@ -1137,8 +1139,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1137 | bdev->bd_disk = NULL; | 1139 | bdev->bd_disk = NULL; |
| 1138 | mutex_unlock(&bdev->bd_mutex); | 1140 | mutex_unlock(&bdev->bd_mutex); |
| 1139 | disk_unblock_events(disk); | 1141 | disk_unblock_events(disk); |
| 1140 | module_put(disk->fops->owner); | ||
| 1141 | put_disk(disk); | 1142 | put_disk(disk); |
| 1143 | module_put(owner); | ||
| 1142 | goto restart; | 1144 | goto restart; |
| 1143 | } | 1145 | } |
| 1144 | } | 1146 | } |
| @@ -1194,8 +1196,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1194 | goto out_unlock_bdev; | 1196 | goto out_unlock_bdev; |
| 1195 | } | 1197 | } |
| 1196 | /* only one opener holds refs to the module and disk */ | 1198 | /* only one opener holds refs to the module and disk */ |
| 1197 | module_put(disk->fops->owner); | ||
| 1198 | put_disk(disk); | 1199 | put_disk(disk); |
| 1200 | module_put(owner); | ||
| 1199 | } | 1201 | } |
| 1200 | bdev->bd_openers++; | 1202 | bdev->bd_openers++; |
| 1201 | if (for_part) | 1203 | if (for_part) |
| @@ -1215,8 +1217,8 @@ static int __blkdev_get(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1215 | out_unlock_bdev: | 1217 | out_unlock_bdev: |
| 1216 | mutex_unlock(&bdev->bd_mutex); | 1218 | mutex_unlock(&bdev->bd_mutex); |
| 1217 | disk_unblock_events(disk); | 1219 | disk_unblock_events(disk); |
| 1218 | module_put(disk->fops->owner); | ||
| 1219 | put_disk(disk); | 1220 | put_disk(disk); |
| 1221 | module_put(owner); | ||
| 1220 | out: | 1222 | out: |
| 1221 | bdput(bdev); | 1223 | bdput(bdev); |
| 1222 | 1224 | ||
| @@ -1442,14 +1444,15 @@ static int __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part) | |||
| 1442 | if (!bdev->bd_openers) { | 1444 | if (!bdev->bd_openers) { |
| 1443 | struct module *owner = disk->fops->owner; | 1445 | struct module *owner = disk->fops->owner; |
| 1444 | 1446 | ||
| 1445 | put_disk(disk); | ||
| 1446 | module_put(owner); | ||
| 1447 | disk_put_part(bdev->bd_part); | 1447 | disk_put_part(bdev->bd_part); |
| 1448 | bdev->bd_part = NULL; | 1448 | bdev->bd_part = NULL; |
| 1449 | bdev->bd_disk = NULL; | 1449 | bdev->bd_disk = NULL; |
| 1450 | if (bdev != bdev->bd_contains) | 1450 | if (bdev != bdev->bd_contains) |
| 1451 | victim = bdev->bd_contains; | 1451 | victim = bdev->bd_contains; |
| 1452 | bdev->bd_contains = NULL; | 1452 | bdev->bd_contains = NULL; |
| 1453 | |||
| 1454 | put_disk(disk); | ||
| 1455 | module_put(owner); | ||
| 1453 | } | 1456 | } |
| 1454 | mutex_unlock(&bdev->bd_mutex); | 1457 | mutex_unlock(&bdev->bd_mutex); |
| 1455 | bdput(bdev); | 1458 | bdput(bdev); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index ce33e6868a2f..a3c071c9e189 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -269,14 +269,6 @@ extern void bvec_free_bs(struct bio_set *, struct bio_vec *, unsigned int); | |||
| 269 | extern unsigned int bvec_nr_vecs(unsigned short idx); | 269 | extern unsigned int bvec_nr_vecs(unsigned short idx); |
| 270 | 270 | ||
| 271 | /* | 271 | /* |
| 272 | * Allow queuer to specify a completion CPU for this bio | ||
| 273 | */ | ||
| 274 | static inline void bio_set_completion_cpu(struct bio *bio, unsigned int cpu) | ||
| 275 | { | ||
| 276 | bio->bi_comp_cpu = cpu; | ||
| 277 | } | ||
| 278 | |||
| 279 | /* | ||
| 280 | * bio_set is used to allow other portions of the IO system to | 272 | * bio_set is used to allow other portions of the IO system to |
| 281 | * allocate their own private memory pools for bio and iovec structures. | 273 | * allocate their own private memory pools for bio and iovec structures. |
| 282 | * These memory pools in turn all allocate from the bio_slab | 274 | * These memory pools in turn all allocate from the bio_slab |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 71fc53bb8f1c..4053cbd4490e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -59,8 +59,6 @@ struct bio { | |||
| 59 | 59 | ||
| 60 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ | 60 | unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ |
| 61 | 61 | ||
| 62 | unsigned int bi_comp_cpu; /* completion CPU */ | ||
| 63 | |||
| 64 | atomic_t bi_cnt; /* pin count */ | 62 | atomic_t bi_cnt; /* pin count */ |
| 65 | 63 | ||
| 66 | struct bio_vec *bi_io_vec; /* the actual vec list */ | 64 | struct bio_vec *bi_io_vec; /* the actual vec list */ |
| @@ -93,11 +91,10 @@ struct bio { | |||
| 93 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ | 91 | #define BIO_BOUNCED 5 /* bio is a bounce bio */ |
| 94 | #define BIO_USER_MAPPED 6 /* contains user pages */ | 92 | #define BIO_USER_MAPPED 6 /* contains user pages */ |
| 95 | #define BIO_EOPNOTSUPP 7 /* not supported */ | 93 | #define BIO_EOPNOTSUPP 7 /* not supported */ |
| 96 | #define BIO_CPU_AFFINE 8 /* complete bio on same CPU as submitted */ | 94 | #define BIO_NULL_MAPPED 8 /* contains invalid user pages */ |
| 97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | 95 | #define BIO_FS_INTEGRITY 9 /* fs owns integrity data, not block layer */ |
| 98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | 96 | #define BIO_QUIET 10 /* Make BIO Quiet */ |
| 99 | #define BIO_QUIET 11 /* Make BIO Quiet */ | 97 | #define BIO_MAPPED_INTEGRITY 11/* integrity metadata has been remapped */ |
| 100 | #define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */ | ||
| 101 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | 98 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) |
| 102 | 99 | ||
| 103 | /* | 100 | /* |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 7fbaa9103344..5267cd2f20dc 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -195,7 +195,7 @@ struct request_pm_state | |||
| 195 | #include <linux/elevator.h> | 195 | #include <linux/elevator.h> |
| 196 | 196 | ||
| 197 | typedef void (request_fn_proc) (struct request_queue *q); | 197 | typedef void (request_fn_proc) (struct request_queue *q); |
| 198 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 198 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); |
| 199 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 199 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
| 200 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 200 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
| 201 | 201 | ||
| @@ -680,6 +680,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
| 680 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 680 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
| 681 | struct scsi_ioctl_command __user *); | 681 | struct scsi_ioctl_command __user *); |
| 682 | 682 | ||
| 683 | extern void blk_queue_bio(struct request_queue *q, struct bio *bio); | ||
| 684 | |||
| 683 | /* | 685 | /* |
| 684 | * A queue has just exitted congestion. Note this in the global counter of | 686 | * A queue has just exitted congestion. Note this in the global counter of |
| 685 | * congested queues, and wake up anyone who was waiting for requests to be | 687 | * congested queues, and wake up anyone who was waiting for requests to be |
| @@ -863,16 +865,22 @@ struct request_queue *blk_alloc_queue_node(gfp_t, int); | |||
| 863 | extern void blk_put_queue(struct request_queue *); | 865 | extern void blk_put_queue(struct request_queue *); |
| 864 | 866 | ||
| 865 | /* | 867 | /* |
| 866 | * Note: Code in between changing the blk_plug list/cb_list or element of such | 868 | * blk_plug permits building a queue of related requests by holding the I/O |
| 867 | * lists is preemptable, but such code can't do sleep (or be very careful), | 869 | * fragments for a short period. This allows merging of sequential requests |
| 868 | * otherwise data is corrupted. For details, please check schedule() where | 870 | * into single larger request. As the requests are moved from a per-task list to |
| 869 | * blk_schedule_flush_plug() is called. | 871 | * the device's request_queue in a batch, this results in improved scalability |
| 872 | * as the lock contention for request_queue lock is reduced. | ||
| 873 | * | ||
| 874 | * It is ok not to disable preemption when adding the request to the plug list | ||
| 875 | * or when attempting a merge, because blk_schedule_flush_list() will only flush | ||
| 876 | * the plug list when the task sleeps by itself. For details, please see | ||
| 877 | * schedule() where blk_schedule_flush_plug() is called. | ||
| 870 | */ | 878 | */ |
| 871 | struct blk_plug { | 879 | struct blk_plug { |
| 872 | unsigned long magic; | 880 | unsigned long magic; /* detect uninitialized use-cases */ |
| 873 | struct list_head list; | 881 | struct list_head list; /* requests */ |
| 874 | struct list_head cb_list; | 882 | struct list_head cb_list; /* md requires an unplug callback */ |
| 875 | unsigned int should_sort; | 883 | unsigned int should_sort; /* list to be sorted before flushing? */ |
| 876 | }; | 884 | }; |
| 877 | #define BLK_MAX_REQUEST_COUNT 16 | 885 | #define BLK_MAX_REQUEST_COUNT 16 |
| 878 | 886 | ||
| @@ -1189,20 +1197,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
| 1189 | } | 1197 | } |
| 1190 | #endif | 1198 | #endif |
| 1191 | 1199 | ||
| 1192 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
| 1193 | extern int blk_throtl_init(struct request_queue *q); | ||
| 1194 | extern void blk_throtl_exit(struct request_queue *q); | ||
| 1195 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
| 1196 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 1197 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
| 1198 | { | ||
| 1199 | return 0; | ||
| 1200 | } | ||
| 1201 | |||
| 1202 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
| 1203 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
| 1204 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
| 1205 | |||
| 1206 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1200 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
| 1207 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1201 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
| 1208 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1202 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index d800d5142184..1d0f7a2ff73b 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -38,6 +38,12 @@ struct elevator_ops | |||
| 38 | elevator_merged_fn *elevator_merged_fn; | 38 | elevator_merged_fn *elevator_merged_fn; |
| 39 | elevator_merge_req_fn *elevator_merge_req_fn; | 39 | elevator_merge_req_fn *elevator_merge_req_fn; |
| 40 | elevator_allow_merge_fn *elevator_allow_merge_fn; | 40 | elevator_allow_merge_fn *elevator_allow_merge_fn; |
| 41 | |||
| 42 | /* | ||
| 43 | * Used for both plugged list and elevator merging and in the | ||
| 44 | * former case called without queue_lock. Read comment on top of | ||
| 45 | * attempt_plug_merge() for details. | ||
| 46 | */ | ||
| 41 | elevator_bio_merged_fn *elevator_bio_merged_fn; | 47 | elevator_bio_merged_fn *elevator_bio_merged_fn; |
| 42 | 48 | ||
| 43 | elevator_dispatch_fn *elevator_dispatch_fn; | 49 | elevator_dispatch_fn *elevator_dispatch_fn; |
diff --git a/include/linux/loop.h b/include/linux/loop.h index 683d69890119..a06880689115 100644 --- a/include/linux/loop.h +++ b/include/linux/loop.h | |||
| @@ -73,7 +73,6 @@ struct loop_device { | |||
| 73 | */ | 73 | */ |
| 74 | enum { | 74 | enum { |
| 75 | LO_FLAGS_READ_ONLY = 1, | 75 | LO_FLAGS_READ_ONLY = 1, |
| 76 | LO_FLAGS_USE_AOPS = 2, | ||
| 77 | LO_FLAGS_AUTOCLEAR = 4, | 76 | LO_FLAGS_AUTOCLEAR = 4, |
| 78 | }; | 77 | }; |
| 79 | 78 | ||
diff --git a/mm/bounce.c b/mm/bounce.c index 1481de68184b..434fb4f0c5e4 100644 --- a/mm/bounce.c +++ b/mm/bounce.c | |||
| @@ -14,6 +14,7 @@ | |||
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <linux/hash.h> | 15 | #include <linux/hash.h> |
| 16 | #include <linux/highmem.h> | 16 | #include <linux/highmem.h> |
| 17 | #include <linux/bootmem.h> | ||
| 17 | #include <asm/tlbflush.h> | 18 | #include <asm/tlbflush.h> |
| 18 | 19 | ||
| 19 | #include <trace/events/block.h> | 20 | #include <trace/events/block.h> |
| @@ -26,12 +27,10 @@ static mempool_t *page_pool, *isa_page_pool; | |||
| 26 | #ifdef CONFIG_HIGHMEM | 27 | #ifdef CONFIG_HIGHMEM |
| 27 | static __init int init_emergency_pool(void) | 28 | static __init int init_emergency_pool(void) |
| 28 | { | 29 | { |
| 29 | struct sysinfo i; | 30 | #ifndef CONFIG_MEMORY_HOTPLUG |
| 30 | si_meminfo(&i); | 31 | if (max_pfn <= max_low_pfn) |
| 31 | si_swapinfo(&i); | ||
| 32 | |||
| 33 | if (!i.totalhigh) | ||
| 34 | return 0; | 32 | return 0; |
| 33 | #endif | ||
| 35 | 34 | ||
| 36 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); | 35 | page_pool = mempool_create_page_pool(POOL_SIZE, 0); |
| 37 | BUG_ON(!page_pool); | 36 | BUG_ON(!page_pool); |
