aboutsummaryrefslogtreecommitdiffstats
path: root/block/ll_rw_blk.c
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2006-03-18 18:34:37 -0500
committerAl Viro <viro@zeniv.linux.org.uk>2006-03-18 18:34:37 -0500
commit483f4afc421435b7cfe5e88f74eea0b73a476d75 (patch)
tree9a3bbcf8ae83c5edb9ab1f9e98333d6f5673211b /block/ll_rw_blk.c
parent1312f40e11c57edb5c3250f1b782cef8e3efea82 (diff)
[PATCH] fix sysfs interaction and lifetime rules handling for queues
Diffstat (limited to 'block/ll_rw_blk.c')
-rw-r--r--block/ll_rw_blk.c83
1 files changed, 58 insertions, 25 deletions
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c
index 6dc769182052..6c793b196aa9 100644
--- a/block/ll_rw_blk.c
+++ b/block/ll_rw_blk.c
@@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue);
1740 * Hopefully the low level driver will have finished any 1740 * Hopefully the low level driver will have finished any
1741 * outstanding requests first... 1741 * outstanding requests first...
1742 **/ 1742 **/
1743void blk_cleanup_queue(request_queue_t * q) 1743static void blk_release_queue(struct kobject *kobj)
1744{ 1744{
1745 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
1745 struct request_list *rl = &q->rq; 1746 struct request_list *rl = &q->rq;
1746 1747
1747 if (!atomic_dec_and_test(&q->refcnt))
1748 return;
1749
1750 if (q->elevator)
1751 elevator_exit(q->elevator);
1752
1753 blk_sync_queue(q); 1748 blk_sync_queue(q);
1754 1749
1755 if (rl->rq_pool) 1750 if (rl->rq_pool)
@@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q)
1761 kmem_cache_free(requestq_cachep, q); 1756 kmem_cache_free(requestq_cachep, q);
1762} 1757}
1763 1758
1759void blk_put_queue(request_queue_t *q)
1760{
1761 kobject_put(&q->kobj);
1762}
1763EXPORT_SYMBOL(blk_put_queue);
1764
1765void blk_cleanup_queue(request_queue_t * q)
1766{
1767 mutex_lock(&q->sysfs_lock);
1768 set_bit(QUEUE_FLAG_DEAD, &q->queue_flags);
1769 mutex_unlock(&q->sysfs_lock);
1770
1771 if (q->elevator)
1772 elevator_exit(q->elevator);
1773
1774 blk_put_queue(q);
1775}
1776
1764EXPORT_SYMBOL(blk_cleanup_queue); 1777EXPORT_SYMBOL(blk_cleanup_queue);
1765 1778
1766static int blk_init_free_list(request_queue_t *q) 1779static int blk_init_free_list(request_queue_t *q)
@@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask)
1788} 1801}
1789EXPORT_SYMBOL(blk_alloc_queue); 1802EXPORT_SYMBOL(blk_alloc_queue);
1790 1803
1804static struct kobj_type queue_ktype;
1805
1791request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) 1806request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1792{ 1807{
1793 request_queue_t *q; 1808 request_queue_t *q;
@@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
1798 1813
1799 memset(q, 0, sizeof(*q)); 1814 memset(q, 0, sizeof(*q));
1800 init_timer(&q->unplug_timer); 1815 init_timer(&q->unplug_timer);
1801 atomic_set(&q->refcnt, 1); 1816
1817 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue");
1818 q->kobj.ktype = &queue_ktype;
1819 kobject_init(&q->kobj);
1802 1820
1803 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; 1821 q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug;
1804 q->backing_dev_info.unplug_io_data = q; 1822 q->backing_dev_info.unplug_io_data = q;
1805 1823
1824 mutex_init(&q->sysfs_lock);
1825
1806 return q; 1826 return q;
1807} 1827}
1808EXPORT_SYMBOL(blk_alloc_queue_node); 1828EXPORT_SYMBOL(blk_alloc_queue_node);
@@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node);
1901int blk_get_queue(request_queue_t *q) 1921int blk_get_queue(request_queue_t *q)
1902{ 1922{
1903 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { 1923 if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
1904 atomic_inc(&q->refcnt); 1924 kobject_get(&q->kobj);
1905 return 0; 1925 return 0;
1906 } 1926 }
1907 1927
@@ -3764,13 +3784,19 @@ static ssize_t
3764queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3784queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
3765{ 3785{
3766 struct queue_sysfs_entry *entry = to_queue(attr); 3786 struct queue_sysfs_entry *entry = to_queue(attr);
3767 struct request_queue *q; 3787 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
3788 ssize_t res;
3768 3789
3769 q = container_of(kobj, struct request_queue, kobj);
3770 if (!entry->show) 3790 if (!entry->show)
3771 return -EIO; 3791 return -EIO;
3772 3792 mutex_lock(&q->sysfs_lock);
3773 return entry->show(q, page); 3793 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
3794 mutex_unlock(&q->sysfs_lock);
3795 return -ENOENT;
3796 }
3797 res = entry->show(q, page);
3798 mutex_unlock(&q->sysfs_lock);
3799 return res;
3774} 3800}
3775 3801
3776static ssize_t 3802static ssize_t
@@ -3778,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
3778 const char *page, size_t length) 3804 const char *page, size_t length)
3779{ 3805{
3780 struct queue_sysfs_entry *entry = to_queue(attr); 3806 struct queue_sysfs_entry *entry = to_queue(attr);
3781 struct request_queue *q; 3807 request_queue_t *q = container_of(kobj, struct request_queue, kobj);
3808
3809 ssize_t res;
3782 3810
3783 q = container_of(kobj, struct request_queue, kobj);
3784 if (!entry->store) 3811 if (!entry->store)
3785 return -EIO; 3812 return -EIO;
3786 3813 mutex_lock(&q->sysfs_lock);
3787 return entry->store(q, page, length); 3814 if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) {
3815 mutex_unlock(&q->sysfs_lock);
3816 return -ENOENT;
3817 }
3818 res = entry->store(q, page, length);
3819 mutex_unlock(&q->sysfs_lock);
3820 return res;
3788} 3821}
3789 3822
3790static struct sysfs_ops queue_sysfs_ops = { 3823static struct sysfs_ops queue_sysfs_ops = {
@@ -3795,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = {
3795static struct kobj_type queue_ktype = { 3828static struct kobj_type queue_ktype = {
3796 .sysfs_ops = &queue_sysfs_ops, 3829 .sysfs_ops = &queue_sysfs_ops,
3797 .default_attrs = default_attrs, 3830 .default_attrs = default_attrs,
3831 .release = blk_release_queue,
3798}; 3832};
3799 3833
3800int blk_register_queue(struct gendisk *disk) 3834int blk_register_queue(struct gendisk *disk)
@@ -3807,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk)
3807 return -ENXIO; 3841 return -ENXIO;
3808 3842
3809 q->kobj.parent = kobject_get(&disk->kobj); 3843 q->kobj.parent = kobject_get(&disk->kobj);
3810 if (!q->kobj.parent)
3811 return -EBUSY;
3812 3844
3813 snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); 3845 ret = kobject_add(&q->kobj);
3814 q->kobj.ktype = &queue_ktype;
3815
3816 ret = kobject_register(&q->kobj);
3817 if (ret < 0) 3846 if (ret < 0)
3818 return ret; 3847 return ret;
3819 3848
3849 kobject_uevent(&q->kobj, KOBJ_ADD);
3850
3820 ret = elv_register_queue(q); 3851 ret = elv_register_queue(q);
3821 if (ret) { 3852 if (ret) {
3822 kobject_unregister(&q->kobj); 3853 kobject_uevent(&q->kobj, KOBJ_REMOVE);
3854 kobject_del(&q->kobj);
3823 return ret; 3855 return ret;
3824 } 3856 }
3825 3857
@@ -3833,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk)
3833 if (q && q->request_fn) { 3865 if (q && q->request_fn) {
3834 elv_unregister_queue(q); 3866 elv_unregister_queue(q);
3835 3867
3836 kobject_unregister(&q->kobj); 3868 kobject_uevent(&q->kobj, KOBJ_REMOVE);
3869 kobject_del(&q->kobj);
3837 kobject_put(&disk->kobj); 3870 kobject_put(&disk->kobj);
3838 } 3871 }
3839} 3872}