diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-14 20:16:45 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-14 20:16:45 -0400 |
commit | f1cbd03f5eabb75ea8ace23b47d2209f10871c16 (patch) | |
tree | 2ac398bd1a50ce135461fae5b5e91ba05831af84 /block | |
parent | ff398c45b03d9d64135d928c0146d8c38a70fd3b (diff) | |
parent | ff8c1474cc2f5e11414c71ec4d739c18e6e669c0 (diff) |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
Pull block fixes from Jens Axboe:
"Been sitting on this for a while, but lets get this out the door.
This fixes various important bugs for 3.3 final, along with a few more
trivial ones. Please pull!"
* 'for-linus' of git://git.kernel.dk/linux-block:
block: fix ioc leak in put_io_context
block, sx8: fix pointer math issue getting fw version
Block: use a freezable workqueue for disk-event polling
drivers/block/DAC960: fix -Wuninitialized warning
drivers/block/DAC960: fix DAC960_V2_IOCTL_Opcode_T -Wenum-compare warning
block: fix __blkdev_get and add_disk race condition
block: Fix setting bio flags in drivers (sd_dif/floppy)
block: Fix NULL pointer dereference in sd_revalidate_disk
block: exit_io_context() should call elevator_exit_icq_fn()
block: simplify ioc_release_fn()
block: replace icq->changed with icq->flags
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-ioc.c | 135 | ||||
-rw-r--r-- | block/cfq-iosched.c | 12 | ||||
-rw-r--r-- | block/genhd.c | 42 | ||||
-rw-r--r-- | block/partition-generic.c | 48 |
4 files changed, 158 insertions, 79 deletions
diff --git a/block/blk-ioc.c b/block/blk-ioc.c index 8b782a63c297..fb95dd2f889a 100644 --- a/block/blk-ioc.c +++ b/block/blk-ioc.c | |||
@@ -36,11 +36,23 @@ static void icq_free_icq_rcu(struct rcu_head *head) | |||
36 | kmem_cache_free(icq->__rcu_icq_cache, icq); | 36 | kmem_cache_free(icq->__rcu_icq_cache, icq); |
37 | } | 37 | } |
38 | 38 | ||
39 | /* | 39 | /* Exit an icq. Called with both ioc and q locked. */ |
40 | * Exit and free an icq. Called with both ioc and q locked. | ||
41 | */ | ||
42 | static void ioc_exit_icq(struct io_cq *icq) | 40 | static void ioc_exit_icq(struct io_cq *icq) |
43 | { | 41 | { |
42 | struct elevator_type *et = icq->q->elevator->type; | ||
43 | |||
44 | if (icq->flags & ICQ_EXITED) | ||
45 | return; | ||
46 | |||
47 | if (et->ops.elevator_exit_icq_fn) | ||
48 | et->ops.elevator_exit_icq_fn(icq); | ||
49 | |||
50 | icq->flags |= ICQ_EXITED; | ||
51 | } | ||
52 | |||
53 | /* Release an icq. Called with both ioc and q locked. */ | ||
54 | static void ioc_destroy_icq(struct io_cq *icq) | ||
55 | { | ||
44 | struct io_context *ioc = icq->ioc; | 56 | struct io_context *ioc = icq->ioc; |
45 | struct request_queue *q = icq->q; | 57 | struct request_queue *q = icq->q; |
46 | struct elevator_type *et = q->elevator->type; | 58 | struct elevator_type *et = q->elevator->type; |
@@ -60,8 +72,7 @@ static void ioc_exit_icq(struct io_cq *icq) | |||
60 | if (rcu_dereference_raw(ioc->icq_hint) == icq) | 72 | if (rcu_dereference_raw(ioc->icq_hint) == icq) |
61 | rcu_assign_pointer(ioc->icq_hint, NULL); | 73 | rcu_assign_pointer(ioc->icq_hint, NULL); |
62 | 74 | ||
63 | if (et->ops.elevator_exit_icq_fn) | 75 | ioc_exit_icq(icq); |
64 | et->ops.elevator_exit_icq_fn(icq); | ||
65 | 76 | ||
66 | /* | 77 | /* |
67 | * @icq->q might have gone away by the time RCU callback runs | 78 | * @icq->q might have gone away by the time RCU callback runs |
@@ -79,7 +90,6 @@ static void ioc_release_fn(struct work_struct *work) | |||
79 | { | 90 | { |
80 | struct io_context *ioc = container_of(work, struct io_context, | 91 | struct io_context *ioc = container_of(work, struct io_context, |
81 | release_work); | 92 | release_work); |
82 | struct request_queue *last_q = NULL; | ||
83 | unsigned long flags; | 93 | unsigned long flags; |
84 | 94 | ||
85 | /* | 95 | /* |
@@ -93,44 +103,19 @@ static void ioc_release_fn(struct work_struct *work) | |||
93 | while (!hlist_empty(&ioc->icq_list)) { | 103 | while (!hlist_empty(&ioc->icq_list)) { |
94 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, | 104 | struct io_cq *icq = hlist_entry(ioc->icq_list.first, |
95 | struct io_cq, ioc_node); | 105 | struct io_cq, ioc_node); |
96 | struct request_queue *this_q = icq->q; | 106 | struct request_queue *q = icq->q; |
97 | 107 | ||
98 | if (this_q != last_q) { | 108 | if (spin_trylock(q->queue_lock)) { |
99 | /* | 109 | ioc_destroy_icq(icq); |
100 | * Need to switch to @this_q. Once we release | 110 | spin_unlock(q->queue_lock); |
101 | * @ioc->lock, it can go away along with @cic. | 111 | } else { |
102 | * Hold on to it. | 112 | spin_unlock_irqrestore(&ioc->lock, flags); |
103 | */ | 113 | cpu_relax(); |
104 | __blk_get_queue(this_q); | 114 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); |
105 | |||
106 | /* | ||
107 | * blk_put_queue() might sleep thanks to kobject | ||
108 | * idiocy. Always release both locks, put and | ||
109 | * restart. | ||
110 | */ | ||
111 | if (last_q) { | ||
112 | spin_unlock(last_q->queue_lock); | ||
113 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
114 | blk_put_queue(last_q); | ||
115 | } else { | ||
116 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
117 | } | ||
118 | |||
119 | last_q = this_q; | ||
120 | spin_lock_irqsave(this_q->queue_lock, flags); | ||
121 | spin_lock_nested(&ioc->lock, 1); | ||
122 | continue; | ||
123 | } | 115 | } |
124 | ioc_exit_icq(icq); | ||
125 | } | 116 | } |
126 | 117 | ||
127 | if (last_q) { | 118 | spin_unlock_irqrestore(&ioc->lock, flags); |
128 | spin_unlock(last_q->queue_lock); | ||
129 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
130 | blk_put_queue(last_q); | ||
131 | } else { | ||
132 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
133 | } | ||
134 | 119 | ||
135 | kmem_cache_free(iocontext_cachep, ioc); | 120 | kmem_cache_free(iocontext_cachep, ioc); |
136 | } | 121 | } |
@@ -145,6 +130,7 @@ static void ioc_release_fn(struct work_struct *work) | |||
145 | void put_io_context(struct io_context *ioc) | 130 | void put_io_context(struct io_context *ioc) |
146 | { | 131 | { |
147 | unsigned long flags; | 132 | unsigned long flags; |
133 | bool free_ioc = false; | ||
148 | 134 | ||
149 | if (ioc == NULL) | 135 | if (ioc == NULL) |
150 | return; | 136 | return; |
@@ -159,8 +145,13 @@ void put_io_context(struct io_context *ioc) | |||
159 | spin_lock_irqsave(&ioc->lock, flags); | 145 | spin_lock_irqsave(&ioc->lock, flags); |
160 | if (!hlist_empty(&ioc->icq_list)) | 146 | if (!hlist_empty(&ioc->icq_list)) |
161 | schedule_work(&ioc->release_work); | 147 | schedule_work(&ioc->release_work); |
148 | else | ||
149 | free_ioc = true; | ||
162 | spin_unlock_irqrestore(&ioc->lock, flags); | 150 | spin_unlock_irqrestore(&ioc->lock, flags); |
163 | } | 151 | } |
152 | |||
153 | if (free_ioc) | ||
154 | kmem_cache_free(iocontext_cachep, ioc); | ||
164 | } | 155 | } |
165 | EXPORT_SYMBOL(put_io_context); | 156 | EXPORT_SYMBOL(put_io_context); |
166 | 157 | ||
@@ -168,13 +159,41 @@ EXPORT_SYMBOL(put_io_context); | |||
168 | void exit_io_context(struct task_struct *task) | 159 | void exit_io_context(struct task_struct *task) |
169 | { | 160 | { |
170 | struct io_context *ioc; | 161 | struct io_context *ioc; |
162 | struct io_cq *icq; | ||
163 | struct hlist_node *n; | ||
164 | unsigned long flags; | ||
171 | 165 | ||
172 | task_lock(task); | 166 | task_lock(task); |
173 | ioc = task->io_context; | 167 | ioc = task->io_context; |
174 | task->io_context = NULL; | 168 | task->io_context = NULL; |
175 | task_unlock(task); | 169 | task_unlock(task); |
176 | 170 | ||
177 | atomic_dec(&ioc->nr_tasks); | 171 | if (!atomic_dec_and_test(&ioc->nr_tasks)) { |
172 | put_io_context(ioc); | ||
173 | return; | ||
174 | } | ||
175 | |||
176 | /* | ||
177 | * Need ioc lock to walk icq_list and q lock to exit icq. Perform | ||
178 | * reverse double locking. Read comment in ioc_release_fn() for | ||
179 | * explanation on the nested locking annotation. | ||
180 | */ | ||
181 | retry: | ||
182 | spin_lock_irqsave_nested(&ioc->lock, flags, 1); | ||
183 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) { | ||
184 | if (icq->flags & ICQ_EXITED) | ||
185 | continue; | ||
186 | if (spin_trylock(icq->q->queue_lock)) { | ||
187 | ioc_exit_icq(icq); | ||
188 | spin_unlock(icq->q->queue_lock); | ||
189 | } else { | ||
190 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
191 | cpu_relax(); | ||
192 | goto retry; | ||
193 | } | ||
194 | } | ||
195 | spin_unlock_irqrestore(&ioc->lock, flags); | ||
196 | |||
178 | put_io_context(ioc); | 197 | put_io_context(ioc); |
179 | } | 198 | } |
180 | 199 | ||
@@ -194,7 +213,7 @@ void ioc_clear_queue(struct request_queue *q) | |||
194 | struct io_context *ioc = icq->ioc; | 213 | struct io_context *ioc = icq->ioc; |
195 | 214 | ||
196 | spin_lock(&ioc->lock); | 215 | spin_lock(&ioc->lock); |
197 | ioc_exit_icq(icq); | 216 | ioc_destroy_icq(icq); |
198 | spin_unlock(&ioc->lock); | 217 | spin_unlock(&ioc->lock); |
199 | } | 218 | } |
200 | } | 219 | } |
@@ -363,13 +382,13 @@ struct io_cq *ioc_create_icq(struct request_queue *q, gfp_t gfp_mask) | |||
363 | return icq; | 382 | return icq; |
364 | } | 383 | } |
365 | 384 | ||
366 | void ioc_set_changed(struct io_context *ioc, int which) | 385 | void ioc_set_icq_flags(struct io_context *ioc, unsigned int flags) |
367 | { | 386 | { |
368 | struct io_cq *icq; | 387 | struct io_cq *icq; |
369 | struct hlist_node *n; | 388 | struct hlist_node *n; |
370 | 389 | ||
371 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) | 390 | hlist_for_each_entry(icq, n, &ioc->icq_list, ioc_node) |
372 | set_bit(which, &icq->changed); | 391 | icq->flags |= flags; |
373 | } | 392 | } |
374 | 393 | ||
375 | /** | 394 | /** |
@@ -387,7 +406,7 @@ void ioc_ioprio_changed(struct io_context *ioc, int ioprio) | |||
387 | 406 | ||
388 | spin_lock_irqsave(&ioc->lock, flags); | 407 | spin_lock_irqsave(&ioc->lock, flags); |
389 | ioc->ioprio = ioprio; | 408 | ioc->ioprio = ioprio; |
390 | ioc_set_changed(ioc, ICQ_IOPRIO_CHANGED); | 409 | ioc_set_icq_flags(ioc, ICQ_IOPRIO_CHANGED); |
391 | spin_unlock_irqrestore(&ioc->lock, flags); | 410 | spin_unlock_irqrestore(&ioc->lock, flags); |
392 | } | 411 | } |
393 | 412 | ||
@@ -404,11 +423,33 @@ void ioc_cgroup_changed(struct io_context *ioc) | |||
404 | unsigned long flags; | 423 | unsigned long flags; |
405 | 424 | ||
406 | spin_lock_irqsave(&ioc->lock, flags); | 425 | spin_lock_irqsave(&ioc->lock, flags); |
407 | ioc_set_changed(ioc, ICQ_CGROUP_CHANGED); | 426 | ioc_set_icq_flags(ioc, ICQ_CGROUP_CHANGED); |
408 | spin_unlock_irqrestore(&ioc->lock, flags); | 427 | spin_unlock_irqrestore(&ioc->lock, flags); |
409 | } | 428 | } |
410 | EXPORT_SYMBOL(ioc_cgroup_changed); | 429 | EXPORT_SYMBOL(ioc_cgroup_changed); |
411 | 430 | ||
431 | /** | ||
432 | * icq_get_changed - fetch and clear icq changed mask | ||
433 | * @icq: icq of interest | ||
434 | * | ||
435 | * Fetch and clear ICQ_*_CHANGED bits from @icq. Grabs and releases | ||
436 | * @icq->ioc->lock. | ||
437 | */ | ||
438 | unsigned icq_get_changed(struct io_cq *icq) | ||
439 | { | ||
440 | unsigned int changed = 0; | ||
441 | unsigned long flags; | ||
442 | |||
443 | if (unlikely(icq->flags & ICQ_CHANGED_MASK)) { | ||
444 | spin_lock_irqsave(&icq->ioc->lock, flags); | ||
445 | changed = icq->flags & ICQ_CHANGED_MASK; | ||
446 | icq->flags &= ~ICQ_CHANGED_MASK; | ||
447 | spin_unlock_irqrestore(&icq->ioc->lock, flags); | ||
448 | } | ||
449 | return changed; | ||
450 | } | ||
451 | EXPORT_SYMBOL(icq_get_changed); | ||
452 | |||
412 | static int __init blk_ioc_init(void) | 453 | static int __init blk_ioc_init(void) |
413 | { | 454 | { |
414 | iocontext_cachep = kmem_cache_create("blkdev_ioc", | 455 | iocontext_cachep = kmem_cache_create("blkdev_ioc", |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index d0ba50533668..457295253566 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -3470,20 +3470,20 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
3470 | const int rw = rq_data_dir(rq); | 3470 | const int rw = rq_data_dir(rq); |
3471 | const bool is_sync = rq_is_sync(rq); | 3471 | const bool is_sync = rq_is_sync(rq); |
3472 | struct cfq_queue *cfqq; | 3472 | struct cfq_queue *cfqq; |
3473 | unsigned int changed; | ||
3473 | 3474 | ||
3474 | might_sleep_if(gfp_mask & __GFP_WAIT); | 3475 | might_sleep_if(gfp_mask & __GFP_WAIT); |
3475 | 3476 | ||
3476 | spin_lock_irq(q->queue_lock); | 3477 | spin_lock_irq(q->queue_lock); |
3477 | 3478 | ||
3478 | /* handle changed notifications */ | 3479 | /* handle changed notifications */ |
3479 | if (unlikely(cic->icq.changed)) { | 3480 | changed = icq_get_changed(&cic->icq); |
3480 | if (test_and_clear_bit(ICQ_IOPRIO_CHANGED, &cic->icq.changed)) | 3481 | if (unlikely(changed & ICQ_IOPRIO_CHANGED)) |
3481 | changed_ioprio(cic); | 3482 | changed_ioprio(cic); |
3482 | #ifdef CONFIG_CFQ_GROUP_IOSCHED | 3483 | #ifdef CONFIG_CFQ_GROUP_IOSCHED |
3483 | if (test_and_clear_bit(ICQ_CGROUP_CHANGED, &cic->icq.changed)) | 3484 | if (unlikely(changed & ICQ_CGROUP_CHANGED)) |
3484 | changed_cgroup(cic); | 3485 | changed_cgroup(cic); |
3485 | #endif | 3486 | #endif |
3486 | } | ||
3487 | 3487 | ||
3488 | new_queue: | 3488 | new_queue: |
3489 | cfqq = cic_to_cfqq(cic, is_sync); | 3489 | cfqq = cic_to_cfqq(cic, is_sync); |
diff --git a/block/genhd.c b/block/genhd.c index 23b4f7063322..df9816ede75b 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -35,6 +35,7 @@ static DEFINE_IDR(ext_devt_idr); | |||
35 | 35 | ||
36 | static struct device_type disk_type; | 36 | static struct device_type disk_type; |
37 | 37 | ||
38 | static void disk_alloc_events(struct gendisk *disk); | ||
38 | static void disk_add_events(struct gendisk *disk); | 39 | static void disk_add_events(struct gendisk *disk); |
39 | static void disk_del_events(struct gendisk *disk); | 40 | static void disk_del_events(struct gendisk *disk); |
40 | static void disk_release_events(struct gendisk *disk); | 41 | static void disk_release_events(struct gendisk *disk); |
@@ -601,6 +602,8 @@ void add_disk(struct gendisk *disk) | |||
601 | disk->major = MAJOR(devt); | 602 | disk->major = MAJOR(devt); |
602 | disk->first_minor = MINOR(devt); | 603 | disk->first_minor = MINOR(devt); |
603 | 604 | ||
605 | disk_alloc_events(disk); | ||
606 | |||
604 | /* Register BDI before referencing it from bdev */ | 607 | /* Register BDI before referencing it from bdev */ |
605 | bdi = &disk->queue->backing_dev_info; | 608 | bdi = &disk->queue->backing_dev_info; |
606 | bdi_register_dev(bdi, disk_devt(disk)); | 609 | bdi_register_dev(bdi, disk_devt(disk)); |
@@ -1475,9 +1478,9 @@ static void __disk_unblock_events(struct gendisk *disk, bool check_now) | |||
1475 | intv = disk_events_poll_jiffies(disk); | 1478 | intv = disk_events_poll_jiffies(disk); |
1476 | set_timer_slack(&ev->dwork.timer, intv / 4); | 1479 | set_timer_slack(&ev->dwork.timer, intv / 4); |
1477 | if (check_now) | 1480 | if (check_now) |
1478 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1481 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); |
1479 | else if (intv) | 1482 | else if (intv) |
1480 | queue_delayed_work(system_nrt_wq, &ev->dwork, intv); | 1483 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); |
1481 | out_unlock: | 1484 | out_unlock: |
1482 | spin_unlock_irqrestore(&ev->lock, flags); | 1485 | spin_unlock_irqrestore(&ev->lock, flags); |
1483 | } | 1486 | } |
@@ -1521,7 +1524,7 @@ void disk_flush_events(struct gendisk *disk, unsigned int mask) | |||
1521 | ev->clearing |= mask; | 1524 | ev->clearing |= mask; |
1522 | if (!ev->block) { | 1525 | if (!ev->block) { |
1523 | cancel_delayed_work(&ev->dwork); | 1526 | cancel_delayed_work(&ev->dwork); |
1524 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1527 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); |
1525 | } | 1528 | } |
1526 | spin_unlock_irq(&ev->lock); | 1529 | spin_unlock_irq(&ev->lock); |
1527 | } | 1530 | } |
@@ -1558,7 +1561,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
1558 | 1561 | ||
1559 | /* uncondtionally schedule event check and wait for it to finish */ | 1562 | /* uncondtionally schedule event check and wait for it to finish */ |
1560 | disk_block_events(disk); | 1563 | disk_block_events(disk); |
1561 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1564 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, 0); |
1562 | flush_delayed_work(&ev->dwork); | 1565 | flush_delayed_work(&ev->dwork); |
1563 | __disk_unblock_events(disk, false); | 1566 | __disk_unblock_events(disk, false); |
1564 | 1567 | ||
@@ -1595,7 +1598,7 @@ static void disk_events_workfn(struct work_struct *work) | |||
1595 | 1598 | ||
1596 | intv = disk_events_poll_jiffies(disk); | 1599 | intv = disk_events_poll_jiffies(disk); |
1597 | if (!ev->block && intv) | 1600 | if (!ev->block && intv) |
1598 | queue_delayed_work(system_nrt_wq, &ev->dwork, intv); | 1601 | queue_delayed_work(system_nrt_freezable_wq, &ev->dwork, intv); |
1599 | 1602 | ||
1600 | spin_unlock_irq(&ev->lock); | 1603 | spin_unlock_irq(&ev->lock); |
1601 | 1604 | ||
@@ -1733,9 +1736,9 @@ module_param_cb(events_dfl_poll_msecs, &disk_events_dfl_poll_msecs_param_ops, | |||
1733 | &disk_events_dfl_poll_msecs, 0644); | 1736 | &disk_events_dfl_poll_msecs, 0644); |
1734 | 1737 | ||
1735 | /* | 1738 | /* |
1736 | * disk_{add|del|release}_events - initialize and destroy disk_events. | 1739 | * disk_{alloc|add|del|release}_events - initialize and destroy disk_events. |
1737 | */ | 1740 | */ |
1738 | static void disk_add_events(struct gendisk *disk) | 1741 | static void disk_alloc_events(struct gendisk *disk) |
1739 | { | 1742 | { |
1740 | struct disk_events *ev; | 1743 | struct disk_events *ev; |
1741 | 1744 | ||
@@ -1748,16 +1751,6 @@ static void disk_add_events(struct gendisk *disk) | |||
1748 | return; | 1751 | return; |
1749 | } | 1752 | } |
1750 | 1753 | ||
1751 | if (sysfs_create_files(&disk_to_dev(disk)->kobj, | ||
1752 | disk_events_attrs) < 0) { | ||
1753 | pr_warn("%s: failed to create sysfs files for events\n", | ||
1754 | disk->disk_name); | ||
1755 | kfree(ev); | ||
1756 | return; | ||
1757 | } | ||
1758 | |||
1759 | disk->ev = ev; | ||
1760 | |||
1761 | INIT_LIST_HEAD(&ev->node); | 1754 | INIT_LIST_HEAD(&ev->node); |
1762 | ev->disk = disk; | 1755 | ev->disk = disk; |
1763 | spin_lock_init(&ev->lock); | 1756 | spin_lock_init(&ev->lock); |
@@ -1766,8 +1759,21 @@ static void disk_add_events(struct gendisk *disk) | |||
1766 | ev->poll_msecs = -1; | 1759 | ev->poll_msecs = -1; |
1767 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); | 1760 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); |
1768 | 1761 | ||
1762 | disk->ev = ev; | ||
1763 | } | ||
1764 | |||
1765 | static void disk_add_events(struct gendisk *disk) | ||
1766 | { | ||
1767 | if (!disk->ev) | ||
1768 | return; | ||
1769 | |||
1770 | /* FIXME: error handling */ | ||
1771 | if (sysfs_create_files(&disk_to_dev(disk)->kobj, disk_events_attrs) < 0) | ||
1772 | pr_warn("%s: failed to create sysfs files for events\n", | ||
1773 | disk->disk_name); | ||
1774 | |||
1769 | mutex_lock(&disk_events_mutex); | 1775 | mutex_lock(&disk_events_mutex); |
1770 | list_add_tail(&ev->node, &disk_events); | 1776 | list_add_tail(&disk->ev->node, &disk_events); |
1771 | mutex_unlock(&disk_events_mutex); | 1777 | mutex_unlock(&disk_events_mutex); |
1772 | 1778 | ||
1773 | /* | 1779 | /* |
diff --git a/block/partition-generic.c b/block/partition-generic.c index d06ec1c829c2..6df5d6928a44 100644 --- a/block/partition-generic.c +++ b/block/partition-generic.c | |||
@@ -389,17 +389,11 @@ static bool disk_unlock_native_capacity(struct gendisk *disk) | |||
389 | } | 389 | } |
390 | } | 390 | } |
391 | 391 | ||
392 | int rescan_partitions(struct gendisk *disk, struct block_device *bdev) | 392 | static int drop_partitions(struct gendisk *disk, struct block_device *bdev) |
393 | { | 393 | { |
394 | struct parsed_partitions *state = NULL; | ||
395 | struct disk_part_iter piter; | 394 | struct disk_part_iter piter; |
396 | struct hd_struct *part; | 395 | struct hd_struct *part; |
397 | int p, highest, res; | 396 | int res; |
398 | rescan: | ||
399 | if (state && !IS_ERR(state)) { | ||
400 | kfree(state); | ||
401 | state = NULL; | ||
402 | } | ||
403 | 397 | ||
404 | if (bdev->bd_part_count) | 398 | if (bdev->bd_part_count) |
405 | return -EBUSY; | 399 | return -EBUSY; |
@@ -412,6 +406,24 @@ rescan: | |||
412 | delete_partition(disk, part->partno); | 406 | delete_partition(disk, part->partno); |
413 | disk_part_iter_exit(&piter); | 407 | disk_part_iter_exit(&piter); |
414 | 408 | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | int rescan_partitions(struct gendisk *disk, struct block_device *bdev) | ||
413 | { | ||
414 | struct parsed_partitions *state = NULL; | ||
415 | struct hd_struct *part; | ||
416 | int p, highest, res; | ||
417 | rescan: | ||
418 | if (state && !IS_ERR(state)) { | ||
419 | kfree(state); | ||
420 | state = NULL; | ||
421 | } | ||
422 | |||
423 | res = drop_partitions(disk, bdev); | ||
424 | if (res) | ||
425 | return res; | ||
426 | |||
415 | if (disk->fops->revalidate_disk) | 427 | if (disk->fops->revalidate_disk) |
416 | disk->fops->revalidate_disk(disk); | 428 | disk->fops->revalidate_disk(disk); |
417 | check_disk_size_change(disk, bdev); | 429 | check_disk_size_change(disk, bdev); |
@@ -515,6 +527,26 @@ rescan: | |||
515 | return 0; | 527 | return 0; |
516 | } | 528 | } |
517 | 529 | ||
530 | int invalidate_partitions(struct gendisk *disk, struct block_device *bdev) | ||
531 | { | ||
532 | int res; | ||
533 | |||
534 | if (!bdev->bd_invalidated) | ||
535 | return 0; | ||
536 | |||
537 | res = drop_partitions(disk, bdev); | ||
538 | if (res) | ||
539 | return res; | ||
540 | |||
541 | set_capacity(disk, 0); | ||
542 | check_disk_size_change(disk, bdev); | ||
543 | bdev->bd_invalidated = 0; | ||
544 | /* tell userspace that the media / partition table may have changed */ | ||
545 | kobject_uevent(&disk_to_dev(disk)->kobj, KOBJ_CHANGE); | ||
546 | |||
547 | return 0; | ||
548 | } | ||
549 | |||
518 | unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) | 550 | unsigned char *read_dev_sector(struct block_device *bdev, sector_t n, Sector *p) |
519 | { | 551 | { |
520 | struct address_space *mapping = bdev->bd_inode->i_mapping; | 552 | struct address_space *mapping = bdev->bd_inode->i_mapping; |