diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-cgroup.c | 6 | ||||
-rw-r--r-- | block/blk-core.c | 8 | ||||
-rw-r--r-- | block/blk-exec.c | 1 | ||||
-rw-r--r-- | block/blk-integrity.c | 4 | ||||
-rw-r--r-- | block/blk-ioc.c | 3 | ||||
-rw-r--r-- | block/bsg.c | 29 | ||||
-rw-r--r-- | block/cfq-iosched.c | 3 | ||||
-rw-r--r-- | block/elevator.c | 39 | ||||
-rw-r--r-- | block/genhd.c | 76 | ||||
-rw-r--r-- | block/partition-generic.c | 6 | ||||
-rw-r--r-- | block/partitions/check.c | 37 | ||||
-rw-r--r-- | block/partitions/check.h | 4 | ||||
-rw-r--r-- | block/partitions/efi.c | 12 | ||||
-rw-r--r-- | block/partitions/mac.c | 4 | ||||
-rw-r--r-- | block/partitions/msdos.c | 11 |
15 files changed, 164 insertions, 79 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 87ea95d1f533..b2b9837f9dd3 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -459,7 +459,6 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, | |||
459 | { | 459 | { |
460 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); | 460 | struct blkcg *blkcg = cgroup_to_blkcg(cgroup); |
461 | struct blkcg_gq *blkg; | 461 | struct blkcg_gq *blkg; |
462 | struct hlist_node *n; | ||
463 | int i; | 462 | int i; |
464 | 463 | ||
465 | mutex_lock(&blkcg_pol_mutex); | 464 | mutex_lock(&blkcg_pol_mutex); |
@@ -470,7 +469,7 @@ static int blkcg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, | |||
470 | * stat updates. This is a debug feature which shouldn't exist | 469 | * stat updates. This is a debug feature which shouldn't exist |
471 | * anyway. If you get hit by a race, retry. | 470 | * anyway. If you get hit by a race, retry. |
472 | */ | 471 | */ |
473 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | 472 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
474 | for (i = 0; i < BLKCG_MAX_POLS; i++) { | 473 | for (i = 0; i < BLKCG_MAX_POLS; i++) { |
475 | struct blkcg_policy *pol = blkcg_policy[i]; | 474 | struct blkcg_policy *pol = blkcg_policy[i]; |
476 | 475 | ||
@@ -518,11 +517,10 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg, | |||
518 | bool show_total) | 517 | bool show_total) |
519 | { | 518 | { |
520 | struct blkcg_gq *blkg; | 519 | struct blkcg_gq *blkg; |
521 | struct hlist_node *n; | ||
522 | u64 total = 0; | 520 | u64 total = 0; |
523 | 521 | ||
524 | rcu_read_lock(); | 522 | rcu_read_lock(); |
525 | hlist_for_each_entry_rcu(blkg, n, &blkcg->blkg_list, blkcg_node) { | 523 | hlist_for_each_entry_rcu(blkg, &blkcg->blkg_list, blkcg_node) { |
526 | spin_lock_irq(blkg->q->queue_lock); | 524 | spin_lock_irq(blkg->q->queue_lock); |
527 | if (blkcg_policy_enabled(blkg->q, pol)) | 525 | if (blkcg_policy_enabled(blkg->q, pol)) |
528 | total += prfill(sf, blkg->pd[pol->plid], data); | 526 | total += prfill(sf, blkg->pd[pol->plid], data); |
diff --git a/block/blk-core.c b/block/blk-core.c index 66d31687cf6b..074b758efc42 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -1473,6 +1473,11 @@ void blk_queue_bio(struct request_queue *q, struct bio *bio) | |||
1473 | */ | 1473 | */ |
1474 | blk_queue_bounce(q, &bio); | 1474 | blk_queue_bounce(q, &bio); |
1475 | 1475 | ||
1476 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) { | ||
1477 | bio_endio(bio, -EIO); | ||
1478 | return; | ||
1479 | } | ||
1480 | |||
1476 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { | 1481 | if (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) { |
1477 | spin_lock_irq(q->queue_lock); | 1482 | spin_lock_irq(q->queue_lock); |
1478 | where = ELEVATOR_INSERT_FLUSH; | 1483 | where = ELEVATOR_INSERT_FLUSH; |
@@ -1706,9 +1711,6 @@ generic_make_request_checks(struct bio *bio) | |||
1706 | */ | 1711 | */ |
1707 | blk_partition_remap(bio); | 1712 | blk_partition_remap(bio); |
1708 | 1713 | ||
1709 | if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) | ||
1710 | goto end_io; | ||
1711 | |||
1712 | if (bio_check_eod(bio, nr_sectors)) | 1714 | if (bio_check_eod(bio, nr_sectors)) |
1713 | goto end_io; | 1715 | goto end_io; |
1714 | 1716 | ||
diff --git a/block/blk-exec.c b/block/blk-exec.c index f634de772b0c..e70621396129 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <linux/module.h> | 5 | #include <linux/module.h> |
6 | #include <linux/bio.h> | 6 | #include <linux/bio.h> |
7 | #include <linux/blkdev.h> | 7 | #include <linux/blkdev.h> |
8 | #include <linux/sched/sysctl.h> | ||
8 | 9 | ||
9 | #include "blk.h" | 10 | #include "blk.h" |
10 | 11 | ||
diff --git a/block/blk-integrity.c b/block/blk-integrity.c index da2a818c3a92..dabd221857e1 100644 --- a/block/blk-integrity.c +++ b/block/blk-integrity.c | |||
@@ -420,6 +420,8 @@ int blk_integrity_register(struct gendisk *disk, struct blk_integrity *template) | |||
420 | } else | 420 | } else |
421 | bi->name = bi_unsupported_name; | 421 | bi->name = bi_unsupported_name; |
422 | 422 | ||
423 | disk->queue->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES; | ||
424 | |||
423 | return 0; | 425 | return 0; |
424 | } | 426 | } |
425 | EXPORT_SYMBOL(blk_integrity_register); | 427 | EXPORT_SYMBOL(blk_integrity_register); |
@@ -438,6 +440,8 @@ void blk_integrity_unregister(struct gendisk *disk) | |||
438 | if (!disk || !disk->integrity) | 440 | if (!disk || !disk->integrity) |
439 | return; | 441 | return; |
440 | 442 | ||
443 | disk->queue->backing_dev_info.capabilities &= ~BDI_CAP_STABLE_WRITES; | ||
444 | |||
441 | bi = disk->integrity; | 445 | bi = disk->integrity; |
442 | 446 | ||
443 | kobject_uevent(&bi->kobj, KOBJ_REMOVE); | 447 | kobject_uevent(&bi->kobj, KOBJ_REMOVE); |
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index fab4cdd3f7bb..9c4bb8266bc8 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -164,7 +164,6 @@ EXPORT_SYMBOL(put_io_context); | |||
164 | */ | 164 | */ |
165 | void put_io_context_active(struct io_context *ioc) | 165 | void put_io_context_active(struct io_context *ioc) |
166 | { | 166 | { |
167 | struct hlist_node *n; | ||
168 | unsigned long flags; | 167 | unsigned long flags; |
169 | struct io_cq *icq; | 168 | struct io_cq *icq; |
170 | 169 | ||
@@ -180,7 +179,7 @@ void put_io_context_active(struct io_context *ioc) | |||
180 | */ | 179 | */ |
181 | retry: | 180 | retry: |
182 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | 181 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
183 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { | 182 | hlist_for_each_entry(icq, &ioc->icq_list, ioc_node) { |
184 | if (icq->flags & ICQ_EXITED) | 183 | if (icq->flags & ICQ_EXITED) |
185 | continue; | 184 | continue; |
186 | if (spin_trylock(icq->q->queue_lock)) { | 185 | if (spin_trylock(icq->q->queue_lock)) { |
diff --git a/block/bsg.c b/block/bsg.c index ff64ae3bacee..420a5a9f1b23 100644 --- a/block/bsg.c +++ b/block/bsg.c | |||
@@ -800,11 +800,10 @@ static struct bsg_device *bsg_add_device(struct inode *inode, | |||
800 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) | 800 | static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) |
801 | { | 801 | { |
802 | struct bsg_device *bd; | 802 | struct bsg_device *bd; |
803 | struct hlist_node *entry; | ||
804 | 803 | ||
805 | mutex_lock(&bsg_mutex); | 804 | mutex_lock(&bsg_mutex); |
806 | 805 | ||
807 | hlist_for_each_entry(bd, entry, bsg_dev_idx_hash(minor), dev_list) { | 806 | hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { |
808 | if (bd->queue == q) { | 807 | if (bd->queue == q) { |
809 | atomic_inc(&bd->ref_count); | 808 | atomic_inc(&bd->ref_count); |
810 | goto found; | 809 | goto found; |
@@ -997,7 +996,7 @@ int bsg_register_queue(struct request_queue *q, struct device *parent, | |||
997 | { | 996 | { |
998 | struct bsg_class_device *bcd; | 997 | struct bsg_class_device *bcd; |
999 | dev_t dev; | 998 | dev_t dev; |
1000 | int ret, minor; | 999 | int ret; |
1001 | struct device *class_dev = NULL; | 1000 | struct device *class_dev = NULL; |
1002 | const char *devname; | 1001 | const char *devname; |
1003 | 1002 | ||
@@ -1017,23 +1016,16 @@ int bsg_register_queue(struct request_queue *q, struct device *parent, | |||
1017 | 1016 | ||
1018 | mutex_lock(&bsg_mutex); | 1017 | mutex_lock(&bsg_mutex); |
1019 | 1018 | ||
1020 | ret = idr_pre_get(&bsg_minor_idr, GFP_KERNEL); | 1019 | ret = idr_alloc(&bsg_minor_idr, bcd, 0, BSG_MAX_DEVS, GFP_KERNEL); |
1021 | if (!ret) { | 1020 | if (ret < 0) { |
1022 | ret = -ENOMEM; | 1021 | if (ret == -ENOSPC) { |
1023 | goto unlock; | 1022 | printk(KERN_ERR "bsg: too many bsg devices\n"); |
1024 | } | 1023 | ret = -EINVAL; |
1025 | 1024 | } | |
1026 | ret = idr_get_new(&bsg_minor_idr, bcd, &minor); | ||
1027 | if (ret < 0) | ||
1028 | goto unlock; | 1025 | goto unlock; |
1029 | |||
1030 | if (minor >= BSG_MAX_DEVS) { | ||
1031 | printk(KERN_ERR "bsg: too many bsg devices\n"); | ||
1032 | ret = -EINVAL; | ||
1033 | goto remove_idr; | ||
1034 | } | 1026 | } |
1035 | 1027 | ||
1036 | bcd->minor = minor; | 1028 | bcd->minor = ret; |
1037 | bcd->queue = q; | 1029 | bcd->queue = q; |
1038 | bcd->parent = get_device(parent); | 1030 | bcd->parent = get_device(parent); |
1039 | bcd->release = release; | 1031 | bcd->release = release; |
@@ -1059,8 +1051,7 @@ unregister_class_dev: | |||
1059 | device_unregister(class_dev); | 1051 | device_unregister(class_dev); |
1060 | put_dev: | 1052 | put_dev: |
1061 | put_device(parent); | 1053 | put_device(parent); |
1062 | remove_idr: | 1054 | idr_remove(&bsg_minor_idr, bcd->minor); |
1063 | idr_remove(&bsg_minor_idr, minor); | ||
1064 | unlock: | 1055 | unlock: |
1065 | mutex_unlock(&bsg_mutex); | 1056 | mutex_unlock(&bsg_mutex); |
1066 | return ret; | 1057 | return ret; |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 1bf9307e8f56..4f0ade74cfd0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -1697,7 +1697,6 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, | |||
1697 | { | 1697 | { |
1698 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); | 1698 | struct blkcg *blkcg = cgroup_to_blkcg(cgrp); |
1699 | struct blkcg_gq *blkg; | 1699 | struct blkcg_gq *blkg; |
1700 | struct hlist_node *n; | ||
1701 | 1700 | ||
1702 | if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) | 1701 | if (val < CFQ_WEIGHT_MIN || val > CFQ_WEIGHT_MAX) |
1703 | return -EINVAL; | 1702 | return -EINVAL; |
@@ -1709,7 +1708,7 @@ static int __cfq_set_weight(struct cgroup *cgrp, struct cftype *cft, u64 val, | |||
1709 | else | 1708 | else |
1710 | blkcg->cfq_leaf_weight = val; | 1709 | blkcg->cfq_leaf_weight = val; |
1711 | 1710 | ||
1712 | hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { | 1711 | hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { |
1713 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); | 1712 | struct cfq_group *cfqg = blkg_to_cfqg(blkg); |
1714 | 1713 | ||
1715 | if (!cfqg) | 1714 | if (!cfqg) |
diff --git a/block/elevator.c b/block/elevator.c index 11683bb10b7b..a0ffdd943c98 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
@@ -95,14 +95,14 @@ static void elevator_put(struct elevator_type *e) | |||
95 | module_put(e->elevator_owner); | 95 | module_put(e->elevator_owner); |
96 | } | 96 | } |
97 | 97 | ||
98 | static struct elevator_type *elevator_get(const char *name) | 98 | static struct elevator_type *elevator_get(const char *name, bool try_loading) |
99 | { | 99 | { |
100 | struct elevator_type *e; | 100 | struct elevator_type *e; |
101 | 101 | ||
102 | spin_lock(&elv_list_lock); | 102 | spin_lock(&elv_list_lock); |
103 | 103 | ||
104 | e = elevator_find(name); | 104 | e = elevator_find(name); |
105 | if (!e) { | 105 | if (!e && try_loading) { |
106 | spin_unlock(&elv_list_lock); | 106 | spin_unlock(&elv_list_lock); |
107 | request_module("%s-iosched", name); | 107 | request_module("%s-iosched", name); |
108 | spin_lock(&elv_list_lock); | 108 | spin_lock(&elv_list_lock); |
@@ -131,6 +131,22 @@ static int __init elevator_setup(char *str) | |||
131 | 131 | ||
132 | __setup("elevator=", elevator_setup); | 132 | __setup("elevator=", elevator_setup); |
133 | 133 | ||
134 | /* called during boot to load the elevator chosen by the elevator param */ | ||
135 | void __init load_default_elevator_module(void) | ||
136 | { | ||
137 | struct elevator_type *e; | ||
138 | |||
139 | if (!chosen_elevator[0]) | ||
140 | return; | ||
141 | |||
142 | spin_lock(&elv_list_lock); | ||
143 | e = elevator_find(chosen_elevator); | ||
144 | spin_unlock(&elv_list_lock); | ||
145 | |||
146 | if (!e) | ||
147 | request_module("%s-iosched", chosen_elevator); | ||
148 | } | ||
149 | |||
134 | static struct kobj_type elv_ktype; | 150 | static struct kobj_type elv_ktype; |
135 | 151 | ||
136 | static struct elevator_queue *elevator_alloc(struct request_queue *q, | 152 | static struct elevator_queue *elevator_alloc(struct request_queue *q, |
@@ -177,25 +193,30 @@ int elevator_init(struct request_queue *q, char *name) | |||
177 | q->boundary_rq = NULL; | 193 | q->boundary_rq = NULL; |
178 | 194 | ||
179 | if (name) { | 195 | if (name) { |
180 | e = elevator_get(name); | 196 | e = elevator_get(name, true); |
181 | if (!e) | 197 | if (!e) |
182 | return -EINVAL; | 198 | return -EINVAL; |
183 | } | 199 | } |
184 | 200 | ||
201 | /* | ||
202 | * Use the default elevator specified by config boot param or | ||
203 | * config option. Don't try to load modules as we could be running | ||
204 | * off async and request_module() isn't allowed from async. | ||
205 | */ | ||
185 | if (!e && *chosen_elevator) { | 206 | if (!e && *chosen_elevator) { |
186 | e = elevator_get(chosen_elevator); | 207 | e = elevator_get(chosen_elevator, false); |
187 | if (!e) | 208 | if (!e) |
188 | printk(KERN_ERR "I/O scheduler %s not found\n", | 209 | printk(KERN_ERR "I/O scheduler %s not found\n", |
189 | chosen_elevator); | 210 | chosen_elevator); |
190 | } | 211 | } |
191 | 212 | ||
192 | if (!e) { | 213 | if (!e) { |
193 | e = elevator_get(CONFIG_DEFAULT_IOSCHED); | 214 | e = elevator_get(CONFIG_DEFAULT_IOSCHED, false); |
194 | if (!e) { | 215 | if (!e) { |
195 | printk(KERN_ERR | 216 | printk(KERN_ERR |
196 | "Default I/O scheduler not found. " \ | 217 | "Default I/O scheduler not found. " \ |
197 | "Using noop.\n"); | 218 | "Using noop.\n"); |
198 | e = elevator_get("noop"); | 219 | e = elevator_get("noop", false); |
199 | } | 220 | } |
200 | } | 221 | } |
201 | 222 | ||
@@ -252,10 +273,10 @@ static void elv_rqhash_reposition(struct request_queue *q, struct request *rq) | |||
252 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) | 273 | static struct request *elv_rqhash_find(struct request_queue *q, sector_t offset) |
253 | { | 274 | { |
254 | struct elevator_queue *e = q->elevator; | 275 | struct elevator_queue *e = q->elevator; |
255 | struct hlist_node *entry, *next; | 276 | struct hlist_node *next; |
256 | struct request *rq; | 277 | struct request *rq; |
257 | 278 | ||
258 | hash_for_each_possible_safe(e->hash, rq, entry, next, hash, offset) { | 279 | hash_for_each_possible_safe(e->hash, rq, next, hash, offset) { |
259 | BUG_ON(!ELV_ON_HASH(rq)); | 280 | BUG_ON(!ELV_ON_HASH(rq)); |
260 | 281 | ||
261 | if (unlikely(!rq_mergeable(rq))) { | 282 | if (unlikely(!rq_mergeable(rq))) { |
@@ -936,7 +957,7 @@ int elevator_change(struct request_queue *q, const char *name) | |||
936 | return -ENXIO; | 957 | return -ENXIO; |
937 | 958 | ||
938 | strlcpy(elevator_name, name, sizeof(elevator_name)); | 959 | strlcpy(elevator_name, name, sizeof(elevator_name)); |
939 | e = elevator_get(strstrip(elevator_name)); | 960 | e = elevator_get(strstrip(elevator_name), true); |
940 | if (!e) { | 961 | if (!e) { |
941 | printk(KERN_ERR "elevator: type %s not found\n", elevator_name); | 962 | printk(KERN_ERR "elevator: type %s not found\n", elevator_name); |
942 | return -EINVAL; | 963 | return -EINVAL; |
diff --git a/block/genhd.c b/block/genhd.c index 9a289d7c84bb..3c001fba80c7 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/mutex.h> | 18 | #include <linux/mutex.h> |
19 | #include <linux/idr.h> | 19 | #include <linux/idr.h> |
20 | #include <linux/log2.h> | 20 | #include <linux/log2.h> |
21 | #include <linux/pm_runtime.h> | ||
21 | 22 | ||
22 | #include "blk.h" | 23 | #include "blk.h" |
23 | 24 | ||
@@ -25,7 +26,7 @@ static DEFINE_MUTEX(block_class_lock); | |||
25 | struct kobject *block_depr; | 26 | struct kobject *block_depr; |
26 | 27 | ||
27 | /* for extended dynamic devt allocation, currently only one major is used */ | 28 | /* for extended dynamic devt allocation, currently only one major is used */ |
28 | #define MAX_EXT_DEVT (1 << MINORBITS) | 29 | #define NR_EXT_DEVT (1 << MINORBITS) |
29 | 30 | ||
30 | /* For extended devt allocation. ext_devt_mutex prevents look up | 31 | /* For extended devt allocation. ext_devt_mutex prevents look up |
31 | * results from going away underneath its user. | 32 | * results from going away underneath its user. |
@@ -35,6 +36,8 @@ static DEFINE_IDR(ext_devt_idr); | |||
35 | 36 | ||
36 | static struct device_type disk_type; | 37 | static struct device_type disk_type; |
37 | 38 | ||
39 | static void disk_check_events(struct disk_events *ev, | ||
40 | unsigned int *clearing_ptr); | ||
38 | static void disk_alloc_events(struct gendisk *disk); | 41 | static void disk_alloc_events(struct gendisk *disk); |
39 | static void disk_add_events(struct gendisk *disk); | 42 | static void disk_add_events(struct gendisk *disk); |
40 | static void disk_del_events(struct gendisk *disk); | 43 | static void disk_del_events(struct gendisk *disk); |
@@ -408,7 +411,7 @@ static int blk_mangle_minor(int minor) | |||
408 | int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | 411 | int blk_alloc_devt(struct hd_struct *part, dev_t *devt) |
409 | { | 412 | { |
410 | struct gendisk *disk = part_to_disk(part); | 413 | struct gendisk *disk = part_to_disk(part); |
411 | int idx, rc; | 414 | int idx; |
412 | 415 | ||
413 | /* in consecutive minor range? */ | 416 | /* in consecutive minor range? */ |
414 | if (part->partno < disk->minors) { | 417 | if (part->partno < disk->minors) { |
@@ -417,19 +420,11 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt) | |||
417 | } | 420 | } |
418 | 421 | ||
419 | /* allocate ext devt */ | 422 | /* allocate ext devt */ |
420 | do { | 423 | mutex_lock(&ext_devt_mutex); |
421 | if (!idr_pre_get(&ext_devt_idr, GFP_KERNEL)) | 424 | idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_KERNEL); |
422 | return -ENOMEM; | 425 | mutex_unlock(&ext_devt_mutex); |
423 | rc = idr_get_new(&ext_devt_idr, part, &idx); | 426 | if (idx < 0) |
424 | } while (rc == -EAGAIN); | 427 | return idx == -ENOSPC ? -EBUSY : idx; |
425 | |||
426 | if (rc) | ||
427 | return rc; | ||
428 | |||
429 | if (idx > MAX_EXT_DEVT) { | ||
430 | idr_remove(&ext_devt_idr, idx); | ||
431 | return -EBUSY; | ||
432 | } | ||
433 | 428 | ||
434 | *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); | 429 | *devt = MKDEV(BLOCK_EXT_MAJOR, blk_mangle_minor(idx)); |
435 | return 0; | 430 | return 0; |
@@ -532,6 +527,14 @@ static void register_disk(struct gendisk *disk) | |||
532 | return; | 527 | return; |
533 | } | 528 | } |
534 | } | 529 | } |
530 | |||
531 | /* | ||
532 | * avoid probable deadlock caused by allocating memory with | ||
533 | * GFP_KERNEL in runtime_resume callback of its all ancestor | ||
534 | * devices | ||
535 | */ | ||
536 | pm_runtime_set_memalloc_noio(ddev, true); | ||
537 | |||
535 | disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); | 538 | disk->part0.holder_dir = kobject_create_and_add("holders", &ddev->kobj); |
536 | disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); | 539 | disk->slave_dir = kobject_create_and_add("slaves", &ddev->kobj); |
537 | 540 | ||
@@ -644,7 +647,6 @@ void del_gendisk(struct gendisk *disk) | |||
644 | disk_part_iter_exit(&piter); | 647 | disk_part_iter_exit(&piter); |
645 | 648 | ||
646 | invalidate_partition(disk, 0); | 649 | invalidate_partition(disk, 0); |
647 | blk_free_devt(disk_to_dev(disk)->devt); | ||
648 | set_capacity(disk, 0); | 650 | set_capacity(disk, 0); |
649 | disk->flags &= ~GENHD_FL_UP; | 651 | disk->flags &= ~GENHD_FL_UP; |
650 | 652 | ||
@@ -661,7 +663,9 @@ void del_gendisk(struct gendisk *disk) | |||
661 | disk->driverfs_dev = NULL; | 663 | disk->driverfs_dev = NULL; |
662 | if (!sysfs_deprecated) | 664 | if (!sysfs_deprecated) |
663 | sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); | 665 | sysfs_remove_link(block_depr, dev_name(disk_to_dev(disk))); |
666 | pm_runtime_set_memalloc_noio(disk_to_dev(disk), false); | ||
664 | device_del(disk_to_dev(disk)); | 667 | device_del(disk_to_dev(disk)); |
668 | blk_free_devt(disk_to_dev(disk)->devt); | ||
665 | } | 669 | } |
666 | EXPORT_SYMBOL(del_gendisk); | 670 | EXPORT_SYMBOL(del_gendisk); |
667 | 671 | ||
@@ -1549,6 +1553,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1549 | const struct block_device_operations *bdops = disk->fops; | 1553 | const struct block_device_operations *bdops = disk->fops; |
1550 | struct disk_events *ev = disk->ev; | 1554 | struct disk_events *ev = disk->ev; |
1551 | unsigned int pending; | 1555 | unsigned int pending; |
1556 | unsigned int clearing = mask; | ||
1552 | 1557 | ||
1553 | if (!ev) { | 1558 | if (!ev) { |
1554 | /* for drivers still using the old ->media_changed method */ | 1559 | /* for drivers still using the old ->media_changed method */ |
@@ -1558,34 +1563,53 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1558 | return 0; | 1563 | return 0; |
1559 | } | 1564 | } |
1560 | 1565 | ||
1561 | /* tell the workfn about the events being cleared */ | 1566 | disk_block_events(disk); |
1567 | |||
1568 | /* | ||
1569 | * store the union of mask and ev->clearing on the stack so that the | ||
1570 | * race with disk_flush_events does not cause ambiguity (ev->clearing | ||
1571 | * can still be modified even if events are blocked). | ||
1572 | */ | ||
1562 | spin_lock_irq(&ev->lock); | 1573 | spin_lock_irq(&ev->lock); |
1563 | ev->clearing |= mask; | 1574 | clearing |= ev->clearing; |
1575 | ev->clearing = 0; | ||
1564 | spin_unlock_irq(&ev->lock); | 1576 | spin_unlock_irq(&ev->lock); |
1565 | 1577 | ||
1566 | /* uncondtionally schedule event check and wait for it to finish */ | 1578 | disk_check_events(ev, &clearing); |
1567 | disk_block_events(disk); | 1579 | /* |
1568 | queue_delayed_work(system_freezable_wq, &ev->dwork, 0); | 1580 | * if ev->clearing is not 0, the disk_flush_events got called in the |
1569 | flush_delayed_work(&ev->dwork); | 1581 | * middle of this function, so we want to run the workfn without delay. |
1570 | __disk_unblock_events(disk, false); | 1582 | */ |
1583 | __disk_unblock_events(disk, ev->clearing ? true : false); | ||
1571 | 1584 | ||
1572 | /* then, fetch and clear pending events */ | 1585 | /* then, fetch and clear pending events */ |
1573 | spin_lock_irq(&ev->lock); | 1586 | spin_lock_irq(&ev->lock); |
1574 | WARN_ON_ONCE(ev->clearing & mask); /* cleared by workfn */ | ||
1575 | pending = ev->pending & mask; | 1587 | pending = ev->pending & mask; |
1576 | ev->pending &= ~mask; | 1588 | ev->pending &= ~mask; |
1577 | spin_unlock_irq(&ev->lock); | 1589 | spin_unlock_irq(&ev->lock); |
1590 | WARN_ON_ONCE(clearing & mask); | ||
1578 | 1591 | ||
1579 | return pending; | 1592 | return pending; |
1580 | } | 1593 | } |
1581 | 1594 | ||
1595 | /* | ||
1596 | * Separate this part out so that a different pointer for clearing_ptr can be | ||
1597 | * passed in for disk_clear_events. | ||
1598 | */ | ||
1582 | static void disk_events_workfn(struct work_struct *work) | 1599 | static void disk_events_workfn(struct work_struct *work) |
1583 | { | 1600 | { |
1584 | struct delayed_work *dwork = to_delayed_work(work); | 1601 | struct delayed_work *dwork = to_delayed_work(work); |
1585 | struct disk_events *ev = container_of(dwork, struct disk_events, dwork); | 1602 | struct disk_events *ev = container_of(dwork, struct disk_events, dwork); |
1603 | |||
1604 | disk_check_events(ev, &ev->clearing); | ||
1605 | } | ||
1606 | |||
1607 | static void disk_check_events(struct disk_events *ev, | ||
1608 | unsigned int *clearing_ptr) | ||
1609 | { | ||
1586 | struct gendisk *disk = ev->disk; | 1610 | struct gendisk *disk = ev->disk; |
1587 | char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; | 1611 | char *envp[ARRAY_SIZE(disk_uevents) + 1] = { }; |
1588 | unsigned int clearing = ev->clearing; | 1612 | unsigned int clearing = *clearing_ptr; |
1589 | unsigned int events; | 1613 | unsigned int events; |
1590 | unsigned long intv; | 1614 | unsigned long intv; |
1591 | int nr_events = 0, i; | 1615 | int nr_events = 0, i; |
@@ -1598,7 +1622,7 @@ static void disk_events_workfn(struct work_struct *work) | |||
1598 | 1622 | ||
1599 | events &= ~ev->pending; | 1623 | events &= ~ev->pending; |
1600 | ev->pending |= events; | 1624 | ev->pending |= events; |
1601 | ev->clearing &= ~clearing; | 1625 | *clearing_ptr &= ~clearing; |
1602 | 1626 | ||
1603 | intv = disk_events_poll_jiffies(disk); | 1627 | intv = disk_events_poll_jiffies(disk); |
1604 | if (!ev->block && intv) | 1628 | if (!ev->block && intv) |
diff --git a/block/partition-generic.c b/block/partition-generic.c index f1d14519cc04..789cdea05893 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -249,11 +249,11 @@ void delete_partition(struct gendisk *disk, int partno) | |||
249 | if (!part) | 249 | if (!part) |
250 | return; | 250 | return; |
251 | 251 | ||
252 | blk_free_devt(part_devt(part)); | ||
253 | rcu_assign_pointer(ptbl->part[partno], NULL); | 252 | rcu_assign_pointer(ptbl->part[partno], NULL); |
254 | rcu_assign_pointer(ptbl->last_lookup, NULL); | 253 | rcu_assign_pointer(ptbl->last_lookup, NULL); |
255 | kobject_put(part->holder_dir); | 254 | kobject_put(part->holder_dir); |
256 | device_del(part_to_dev(part)); | 255 | device_del(part_to_dev(part)); |
256 | blk_free_devt(part_devt(part)); | ||
257 | 257 | ||
258 | hd_struct_put(part); | 258 | hd_struct_put(part); |
259 | } | 259 | } |
@@ -418,7 +418,7 @@ int rescan_partitions(struct gendisk *disk, struct block_device *bdev) | |||
418 | int p, highest, res; | 418 | int p, highest, res; |
419 | rescan: | 419 | rescan: |
420 | if (state && !IS_ERR(state)) { | 420 | if (state && !IS_ERR(state)) { |
421 | kfree(state); | 421 | free_partitions(state); |
422 | state = NULL; | 422 | state = NULL; |
423 | } | 423 | } |
424 | 424 | ||
@@ -525,7 +525,7 @@ rescan: | |||
525 | md_autodetect_dev(part_to_dev(part)->devt); | 525 | md_autodetect_dev(part_to_dev(part)->devt); |
526 | #endif | 526 | #endif |
527 | } | 527 | } |
528 | kfree(state); | 528 | free_partitions(state); |
529 | return 0; | 529 | return 0; |
530 | } | 530 | } |
531 | 531 | ||
diff --git a/block/partitions/check.c b/block/partitions/check.c index bc908672c976..19ba207ea7d1 100644 --- a/block/partitions/check.c +++ b/block/partitions/check.c | |||
@@ -14,6 +14,7 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/slab.h> | 16 | #include <linux/slab.h> |
17 | #include <linux/vmalloc.h> | ||
17 | #include <linux/ctype.h> | 18 | #include <linux/ctype.h> |
18 | #include <linux/genhd.h> | 19 | #include <linux/genhd.h> |
19 | 20 | ||
@@ -106,18 +107,45 @@ static int (*check_part[])(struct parsed_partitions *) = { | |||
106 | NULL | 107 | NULL |
107 | }; | 108 | }; |
108 | 109 | ||
110 | static struct parsed_partitions *allocate_partitions(struct gendisk *hd) | ||
111 | { | ||
112 | struct parsed_partitions *state; | ||
113 | int nr; | ||
114 | |||
115 | state = kzalloc(sizeof(*state), GFP_KERNEL); | ||
116 | if (!state) | ||
117 | return NULL; | ||
118 | |||
119 | nr = disk_max_parts(hd); | ||
120 | state->parts = vzalloc(nr * sizeof(state->parts[0])); | ||
121 | if (!state->parts) { | ||
122 | kfree(state); | ||
123 | return NULL; | ||
124 | } | ||
125 | |||
126 | state->limit = nr; | ||
127 | |||
128 | return state; | ||
129 | } | ||
130 | |||
131 | void free_partitions(struct parsed_partitions *state) | ||
132 | { | ||
133 | vfree(state->parts); | ||
134 | kfree(state); | ||
135 | } | ||
136 | |||
109 | struct parsed_partitions * | 137 | struct parsed_partitions * |
110 | check_partition(struct gendisk *hd, struct block_device *bdev) | 138 | check_partition(struct gendisk *hd, struct block_device *bdev) |
111 | { | 139 | { |
112 | struct parsed_partitions *state; | 140 | struct parsed_partitions *state; |
113 | int i, res, err; | 141 | int i, res, err; |
114 | 142 | ||
115 | state = kzalloc(sizeof(struct parsed_partitions), GFP_KERNEL); | 143 | state = allocate_partitions(hd); |
116 | if (!state) | 144 | if (!state) |
117 | return NULL; | 145 | return NULL; |
118 | state->pp_buf = (char *)__get_free_page(GFP_KERNEL); | 146 | state->pp_buf = (char *)__get_free_page(GFP_KERNEL); |
119 | if (!state->pp_buf) { | 147 | if (!state->pp_buf) { |
120 | kfree(state); | 148 | free_partitions(state); |
121 | return NULL; | 149 | return NULL; |
122 | } | 150 | } |
123 | state->pp_buf[0] = '\0'; | 151 | state->pp_buf[0] = '\0'; |
@@ -128,10 +156,9 @@ check_partition(struct gendisk *hd, struct block_device *bdev) | |||
128 | if (isdigit(state->name[strlen(state->name)-1])) | 156 | if (isdigit(state->name[strlen(state->name)-1])) |
129 | sprintf(state->name, "p"); | 157 | sprintf(state->name, "p"); |
130 | 158 | ||
131 | state->limit = disk_max_parts(hd); | ||
132 | i = res = err = 0; | 159 | i = res = err = 0; |
133 | while (!res && check_part[i]) { | 160 | while (!res && check_part[i]) { |
134 | memset(&state->parts, 0, sizeof(state->parts)); | 161 | memset(state->parts, 0, state->limit * sizeof(state->parts[0])); |
135 | res = check_part[i++](state); | 162 | res = check_part[i++](state); |
136 | if (res < 0) { | 163 | if (res < 0) { |
137 | /* We have hit an I/O error which we don't report now. | 164 | /* We have hit an I/O error which we don't report now. |
@@ -161,6 +188,6 @@ check_partition(struct gendisk *hd, struct block_device *bdev) | |||
161 | printk(KERN_INFO "%s", state->pp_buf); | 188 | printk(KERN_INFO "%s", state->pp_buf); |
162 | 189 | ||
163 | free_page((unsigned long)state->pp_buf); | 190 | free_page((unsigned long)state->pp_buf); |
164 | kfree(state); | 191 | free_partitions(state); |
165 | return ERR_PTR(res); | 192 | return ERR_PTR(res); |
166 | } | 193 | } |
diff --git a/block/partitions/check.h b/block/partitions/check.h index 52b100311ec3..eade17ea910b 100644 --- a/block/partitions/check.h +++ b/block/partitions/check.h | |||
@@ -15,13 +15,15 @@ struct parsed_partitions { | |||
15 | int flags; | 15 | int flags; |
16 | bool has_info; | 16 | bool has_info; |
17 | struct partition_meta_info info; | 17 | struct partition_meta_info info; |
18 | } parts[DISK_MAX_PARTS]; | 18 | } *parts; |
19 | int next; | 19 | int next; |
20 | int limit; | 20 | int limit; |
21 | bool access_beyond_eod; | 21 | bool access_beyond_eod; |
22 | char *pp_buf; | 22 | char *pp_buf; |
23 | }; | 23 | }; |
24 | 24 | ||
25 | void free_partitions(struct parsed_partitions *state); | ||
26 | |||
25 | struct parsed_partitions * | 27 | struct parsed_partitions * |
26 | check_partition(struct gendisk *, struct block_device *); | 28 | check_partition(struct gendisk *, struct block_device *); |
27 | 29 | ||
diff --git a/block/partitions/efi.c b/block/partitions/efi.c index b62fb88b8711..ff5804e2f1d2 100644 --- a/block/partitions/efi.c +++ b/block/partitions/efi.c | |||
@@ -310,15 +310,23 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, | |||
310 | goto fail; | 310 | goto fail; |
311 | } | 311 | } |
312 | 312 | ||
313 | /* Check the GUID Partition Table header size */ | 313 | /* Check the GUID Partition Table header size is too big */ |
314 | if (le32_to_cpu((*gpt)->header_size) > | 314 | if (le32_to_cpu((*gpt)->header_size) > |
315 | bdev_logical_block_size(state->bdev)) { | 315 | bdev_logical_block_size(state->bdev)) { |
316 | pr_debug("GUID Partition Table Header size is wrong: %u > %u\n", | 316 | pr_debug("GUID Partition Table Header size is too large: %u > %u\n", |
317 | le32_to_cpu((*gpt)->header_size), | 317 | le32_to_cpu((*gpt)->header_size), |
318 | bdev_logical_block_size(state->bdev)); | 318 | bdev_logical_block_size(state->bdev)); |
319 | goto fail; | 319 | goto fail; |
320 | } | 320 | } |
321 | 321 | ||
322 | /* Check the GUID Partition Table header size is too small */ | ||
323 | if (le32_to_cpu((*gpt)->header_size) < sizeof(gpt_header)) { | ||
324 | pr_debug("GUID Partition Table Header size is too small: %u < %zu\n", | ||
325 | le32_to_cpu((*gpt)->header_size), | ||
326 | sizeof(gpt_header)); | ||
327 | goto fail; | ||
328 | } | ||
329 | |||
322 | /* Check the GUID Partition Table CRC */ | 330 | /* Check the GUID Partition Table CRC */ |
323 | origcrc = le32_to_cpu((*gpt)->header_crc32); | 331 | origcrc = le32_to_cpu((*gpt)->header_crc32); |
324 | (*gpt)->header_crc32 = 0; | 332 | (*gpt)->header_crc32 = 0; |
diff --git a/block/partitions/mac.c b/block/partitions/mac.c index 11f688bd76c5..76d8ba6379a9 100644 --- a/block/partitions/mac.c +++ b/block/partitions/mac.c | |||
@@ -63,6 +63,10 @@ int mac_partition(struct parsed_partitions *state) | |||
63 | put_dev_sector(sect); | 63 | put_dev_sector(sect); |
64 | return 0; | 64 | return 0; |
65 | } | 65 | } |
66 | |||
67 | if (blocks_in_map >= state->limit) | ||
68 | blocks_in_map = state->limit - 1; | ||
69 | |||
66 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); | 70 | strlcat(state->pp_buf, " [mac]", PAGE_SIZE); |
67 | for (slot = 1; slot <= blocks_in_map; ++slot) { | 71 | for (slot = 1; slot <= blocks_in_map; ++slot) { |
68 | int pos = slot * secsize; | 72 | int pos = slot * secsize; |
diff --git a/block/partitions/msdos.c b/block/partitions/msdos.c index 8752a5d26565..7681cd295ab8 100644 --- a/block/partitions/msdos.c +++ b/block/partitions/msdos.c | |||
@@ -455,14 +455,19 @@ int msdos_partition(struct parsed_partitions *state) | |||
455 | data = read_part_sector(state, 0, §); | 455 | data = read_part_sector(state, 0, §); |
456 | if (!data) | 456 | if (!data) |
457 | return -1; | 457 | return -1; |
458 | if (!msdos_magic_present(data + 510)) { | 458 | |
459 | /* | ||
460 | * Note order! (some AIX disks, e.g. unbootable kind, | ||
461 | * have no MSDOS 55aa) | ||
462 | */ | ||
463 | if (aix_magic_present(state, data)) { | ||
459 | put_dev_sector(sect); | 464 | put_dev_sector(sect); |
465 | strlcat(state->pp_buf, " [AIX]", PAGE_SIZE); | ||
460 | return 0; | 466 | return 0; |
461 | } | 467 | } |
462 | 468 | ||
463 | if (aix_magic_present(state, data)) { | 469 | if (!msdos_magic_present(data + 510)) { |
464 | put_dev_sector(sect); | 470 | put_dev_sector(sect); |
465 | strlcat(state->pp_buf, " [AIX]", PAGE_SIZE); | ||
466 | return 0; | 471 | return 0; |
467 | } | 472 | } |
468 | 473 | ||