diff options
| -rw-r--r-- | block/as-iosched.c | 144 | ||||
| -rw-r--r-- | block/cfq-iosched.c | 354 | ||||
| -rw-r--r-- | block/deadline-iosched.c | 116 | ||||
| -rw-r--r-- | block/elevator.c | 171 | ||||
| -rw-r--r-- | block/ll_rw_blk.c | 105 | ||||
| -rw-r--r-- | drivers/block/loop.c | 4 | ||||
| -rw-r--r-- | drivers/block/pktcdvd.c | 4 | ||||
| -rw-r--r-- | drivers/block/umem.c | 2 | ||||
| -rw-r--r-- | drivers/md/dm.c | 4 | ||||
| -rw-r--r-- | drivers/md/md.c | 5 | ||||
| -rw-r--r-- | drivers/s390/block/dcssblk.c | 8 | ||||
| -rw-r--r-- | include/linux/blkdev.h | 10 | ||||
| -rw-r--r-- | include/linux/elevator.h | 10 | ||||
| -rw-r--r-- | kernel/exit.c | 7 |
14 files changed, 476 insertions, 468 deletions
diff --git a/block/as-iosched.c b/block/as-iosched.c index 8da3cf66894c..296708ceceb2 100644 --- a/block/as-iosched.c +++ b/block/as-iosched.c | |||
| @@ -182,6 +182,9 @@ struct as_rq { | |||
| 182 | 182 | ||
| 183 | static kmem_cache_t *arq_pool; | 183 | static kmem_cache_t *arq_pool; |
| 184 | 184 | ||
| 185 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
| 186 | static struct completion *ioc_gone; | ||
| 187 | |||
| 185 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); | 188 | static void as_move_to_dispatch(struct as_data *ad, struct as_rq *arq); |
| 186 | static void as_antic_stop(struct as_data *ad); | 189 | static void as_antic_stop(struct as_data *ad); |
| 187 | 190 | ||
| @@ -193,6 +196,15 @@ static void as_antic_stop(struct as_data *ad); | |||
| 193 | static void free_as_io_context(struct as_io_context *aic) | 196 | static void free_as_io_context(struct as_io_context *aic) |
| 194 | { | 197 | { |
| 195 | kfree(aic); | 198 | kfree(aic); |
| 199 | if (atomic_dec_and_test(&ioc_count) && ioc_gone) | ||
| 200 | complete(ioc_gone); | ||
| 201 | } | ||
| 202 | |||
| 203 | static void as_trim(struct io_context *ioc) | ||
| 204 | { | ||
| 205 | if (ioc->aic) | ||
| 206 | free_as_io_context(ioc->aic); | ||
| 207 | ioc->aic = NULL; | ||
| 196 | } | 208 | } |
| 197 | 209 | ||
| 198 | /* Called when the task exits */ | 210 | /* Called when the task exits */ |
| @@ -220,6 +232,7 @@ static struct as_io_context *alloc_as_io_context(void) | |||
| 220 | ret->seek_total = 0; | 232 | ret->seek_total = 0; |
| 221 | ret->seek_samples = 0; | 233 | ret->seek_samples = 0; |
| 222 | ret->seek_mean = 0; | 234 | ret->seek_mean = 0; |
| 235 | atomic_inc(&ioc_count); | ||
| 223 | } | 236 | } |
| 224 | 237 | ||
| 225 | return ret; | 238 | return ret; |
| @@ -1696,11 +1709,6 @@ static int as_init_queue(request_queue_t *q, elevator_t *e) | |||
| 1696 | /* | 1709 | /* |
| 1697 | * sysfs parts below | 1710 | * sysfs parts below |
| 1698 | */ | 1711 | */ |
| 1699 | struct as_fs_entry { | ||
| 1700 | struct attribute attr; | ||
| 1701 | ssize_t (*show)(struct as_data *, char *); | ||
| 1702 | ssize_t (*store)(struct as_data *, const char *, size_t); | ||
| 1703 | }; | ||
| 1704 | 1712 | ||
| 1705 | static ssize_t | 1713 | static ssize_t |
| 1706 | as_var_show(unsigned int var, char *page) | 1714 | as_var_show(unsigned int var, char *page) |
| @@ -1717,8 +1725,9 @@ as_var_store(unsigned long *var, const char *page, size_t count) | |||
| 1717 | return count; | 1725 | return count; |
| 1718 | } | 1726 | } |
| 1719 | 1727 | ||
| 1720 | static ssize_t as_est_show(struct as_data *ad, char *page) | 1728 | static ssize_t est_time_show(elevator_t *e, char *page) |
| 1721 | { | 1729 | { |
| 1730 | struct as_data *ad = e->elevator_data; | ||
| 1722 | int pos = 0; | 1731 | int pos = 0; |
| 1723 | 1732 | ||
| 1724 | pos += sprintf(page+pos, "%lu %% exit probability\n", | 1733 | pos += sprintf(page+pos, "%lu %% exit probability\n", |
| @@ -1734,21 +1743,23 @@ static ssize_t as_est_show(struct as_data *ad, char *page) | |||
| 1734 | } | 1743 | } |
| 1735 | 1744 | ||
| 1736 | #define SHOW_FUNCTION(__FUNC, __VAR) \ | 1745 | #define SHOW_FUNCTION(__FUNC, __VAR) \ |
| 1737 | static ssize_t __FUNC(struct as_data *ad, char *page) \ | 1746 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
| 1738 | { \ | 1747 | { \ |
| 1748 | struct as_data *ad = e->elevator_data; \ | ||
| 1739 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ | 1749 | return as_var_show(jiffies_to_msecs((__VAR)), (page)); \ |
| 1740 | } | 1750 | } |
| 1741 | SHOW_FUNCTION(as_readexpire_show, ad->fifo_expire[REQ_SYNC]); | 1751 | SHOW_FUNCTION(as_read_expire_show, ad->fifo_expire[REQ_SYNC]); |
| 1742 | SHOW_FUNCTION(as_writeexpire_show, ad->fifo_expire[REQ_ASYNC]); | 1752 | SHOW_FUNCTION(as_write_expire_show, ad->fifo_expire[REQ_ASYNC]); |
| 1743 | SHOW_FUNCTION(as_anticexpire_show, ad->antic_expire); | 1753 | SHOW_FUNCTION(as_antic_expire_show, ad->antic_expire); |
| 1744 | SHOW_FUNCTION(as_read_batchexpire_show, ad->batch_expire[REQ_SYNC]); | 1754 | SHOW_FUNCTION(as_read_batch_expire_show, ad->batch_expire[REQ_SYNC]); |
| 1745 | SHOW_FUNCTION(as_write_batchexpire_show, ad->batch_expire[REQ_ASYNC]); | 1755 | SHOW_FUNCTION(as_write_batch_expire_show, ad->batch_expire[REQ_ASYNC]); |
| 1746 | #undef SHOW_FUNCTION | 1756 | #undef SHOW_FUNCTION |
| 1747 | 1757 | ||
| 1748 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ | 1758 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
| 1749 | static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | 1759 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
| 1750 | { \ | 1760 | { \ |
| 1751 | int ret = as_var_store(__PTR, (page), count); \ | 1761 | struct as_data *ad = e->elevator_data; \ |
| 1762 | int ret = as_var_store(__PTR, (page), count); \ | ||
| 1752 | if (*(__PTR) < (MIN)) \ | 1763 | if (*(__PTR) < (MIN)) \ |
| 1753 | *(__PTR) = (MIN); \ | 1764 | *(__PTR) = (MIN); \ |
| 1754 | else if (*(__PTR) > (MAX)) \ | 1765 | else if (*(__PTR) > (MAX)) \ |
| @@ -1756,90 +1767,26 @@ static ssize_t __FUNC(struct as_data *ad, const char *page, size_t count) \ | |||
| 1756 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ | 1767 | *(__PTR) = msecs_to_jiffies(*(__PTR)); \ |
| 1757 | return ret; \ | 1768 | return ret; \ |
| 1758 | } | 1769 | } |
| 1759 | STORE_FUNCTION(as_readexpire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); | 1770 | STORE_FUNCTION(as_read_expire_store, &ad->fifo_expire[REQ_SYNC], 0, INT_MAX); |
| 1760 | STORE_FUNCTION(as_writeexpire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); | 1771 | STORE_FUNCTION(as_write_expire_store, &ad->fifo_expire[REQ_ASYNC], 0, INT_MAX); |
| 1761 | STORE_FUNCTION(as_anticexpire_store, &ad->antic_expire, 0, INT_MAX); | 1772 | STORE_FUNCTION(as_antic_expire_store, &ad->antic_expire, 0, INT_MAX); |
| 1762 | STORE_FUNCTION(as_read_batchexpire_store, | 1773 | STORE_FUNCTION(as_read_batch_expire_store, |
| 1763 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); | 1774 | &ad->batch_expire[REQ_SYNC], 0, INT_MAX); |
| 1764 | STORE_FUNCTION(as_write_batchexpire_store, | 1775 | STORE_FUNCTION(as_write_batch_expire_store, |
| 1765 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); | 1776 | &ad->batch_expire[REQ_ASYNC], 0, INT_MAX); |
| 1766 | #undef STORE_FUNCTION | 1777 | #undef STORE_FUNCTION |
| 1767 | 1778 | ||
| 1768 | static struct as_fs_entry as_est_entry = { | 1779 | #define AS_ATTR(name) \ |
| 1769 | .attr = {.name = "est_time", .mode = S_IRUGO }, | 1780 | __ATTR(name, S_IRUGO|S_IWUSR, as_##name##_show, as_##name##_store) |
| 1770 | .show = as_est_show, | 1781 | |
| 1771 | }; | 1782 | static struct elv_fs_entry as_attrs[] = { |
| 1772 | static struct as_fs_entry as_readexpire_entry = { | 1783 | __ATTR_RO(est_time), |
| 1773 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 1784 | AS_ATTR(read_expire), |
| 1774 | .show = as_readexpire_show, | 1785 | AS_ATTR(write_expire), |
| 1775 | .store = as_readexpire_store, | 1786 | AS_ATTR(antic_expire), |
| 1776 | }; | 1787 | AS_ATTR(read_batch_expire), |
| 1777 | static struct as_fs_entry as_writeexpire_entry = { | 1788 | AS_ATTR(write_batch_expire), |
| 1778 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 1789 | __ATTR_NULL |
| 1779 | .show = as_writeexpire_show, | ||
| 1780 | .store = as_writeexpire_store, | ||
| 1781 | }; | ||
| 1782 | static struct as_fs_entry as_anticexpire_entry = { | ||
| 1783 | .attr = {.name = "antic_expire", .mode = S_IRUGO | S_IWUSR }, | ||
| 1784 | .show = as_anticexpire_show, | ||
| 1785 | .store = as_anticexpire_store, | ||
| 1786 | }; | ||
| 1787 | static struct as_fs_entry as_read_batchexpire_entry = { | ||
| 1788 | .attr = {.name = "read_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
| 1789 | .show = as_read_batchexpire_show, | ||
| 1790 | .store = as_read_batchexpire_store, | ||
| 1791 | }; | ||
| 1792 | static struct as_fs_entry as_write_batchexpire_entry = { | ||
| 1793 | .attr = {.name = "write_batch_expire", .mode = S_IRUGO | S_IWUSR }, | ||
| 1794 | .show = as_write_batchexpire_show, | ||
| 1795 | .store = as_write_batchexpire_store, | ||
| 1796 | }; | ||
| 1797 | |||
| 1798 | static struct attribute *default_attrs[] = { | ||
| 1799 | &as_est_entry.attr, | ||
| 1800 | &as_readexpire_entry.attr, | ||
| 1801 | &as_writeexpire_entry.attr, | ||
| 1802 | &as_anticexpire_entry.attr, | ||
| 1803 | &as_read_batchexpire_entry.attr, | ||
| 1804 | &as_write_batchexpire_entry.attr, | ||
| 1805 | NULL, | ||
| 1806 | }; | ||
| 1807 | |||
| 1808 | #define to_as(atr) container_of((atr), struct as_fs_entry, attr) | ||
| 1809 | |||
| 1810 | static ssize_t | ||
| 1811 | as_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
| 1812 | { | ||
| 1813 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 1814 | struct as_fs_entry *entry = to_as(attr); | ||
| 1815 | |||
| 1816 | if (!entry->show) | ||
| 1817 | return -EIO; | ||
| 1818 | |||
| 1819 | return entry->show(e->elevator_data, page); | ||
| 1820 | } | ||
| 1821 | |||
| 1822 | static ssize_t | ||
| 1823 | as_attr_store(struct kobject *kobj, struct attribute *attr, | ||
| 1824 | const char *page, size_t length) | ||
| 1825 | { | ||
| 1826 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 1827 | struct as_fs_entry *entry = to_as(attr); | ||
| 1828 | |||
| 1829 | if (!entry->store) | ||
| 1830 | return -EIO; | ||
| 1831 | |||
| 1832 | return entry->store(e->elevator_data, page, length); | ||
| 1833 | } | ||
| 1834 | |||
| 1835 | static struct sysfs_ops as_sysfs_ops = { | ||
| 1836 | .show = as_attr_show, | ||
| 1837 | .store = as_attr_store, | ||
| 1838 | }; | ||
| 1839 | |||
| 1840 | static struct kobj_type as_ktype = { | ||
| 1841 | .sysfs_ops = &as_sysfs_ops, | ||
| 1842 | .default_attrs = default_attrs, | ||
| 1843 | }; | 1790 | }; |
| 1844 | 1791 | ||
| 1845 | static struct elevator_type iosched_as = { | 1792 | static struct elevator_type iosched_as = { |
| @@ -1860,9 +1807,10 @@ static struct elevator_type iosched_as = { | |||
| 1860 | .elevator_may_queue_fn = as_may_queue, | 1807 | .elevator_may_queue_fn = as_may_queue, |
| 1861 | .elevator_init_fn = as_init_queue, | 1808 | .elevator_init_fn = as_init_queue, |
| 1862 | .elevator_exit_fn = as_exit_queue, | 1809 | .elevator_exit_fn = as_exit_queue, |
| 1810 | .trim = as_trim, | ||
| 1863 | }, | 1811 | }, |
| 1864 | 1812 | ||
| 1865 | .elevator_ktype = &as_ktype, | 1813 | .elevator_attrs = as_attrs, |
| 1866 | .elevator_name = "anticipatory", | 1814 | .elevator_name = "anticipatory", |
| 1867 | .elevator_owner = THIS_MODULE, | 1815 | .elevator_owner = THIS_MODULE, |
| 1868 | }; | 1816 | }; |
| @@ -1893,7 +1841,13 @@ static int __init as_init(void) | |||
| 1893 | 1841 | ||
| 1894 | static void __exit as_exit(void) | 1842 | static void __exit as_exit(void) |
| 1895 | { | 1843 | { |
| 1844 | DECLARE_COMPLETION(all_gone); | ||
| 1896 | elv_unregister(&iosched_as); | 1845 | elv_unregister(&iosched_as); |
| 1846 | ioc_gone = &all_gone; | ||
| 1847 | barrier(); | ||
| 1848 | if (atomic_read(&ioc_count)) | ||
| 1849 | complete(ioc_gone); | ||
| 1850 | synchronize_rcu(); | ||
| 1897 | kmem_cache_destroy(arq_pool); | 1851 | kmem_cache_destroy(arq_pool); |
| 1898 | } | 1852 | } |
| 1899 | 1853 | ||
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index c8dbe38c81c8..c4a0d5d8d7f0 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -6,21 +6,13 @@ | |||
| 6 | * | 6 | * |
| 7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> | 7 | * Copyright (C) 2003 Jens Axboe <axboe@suse.de> |
| 8 | */ | 8 | */ |
| 9 | #include <linux/kernel.h> | ||
| 10 | #include <linux/fs.h> | ||
| 11 | #include <linux/blkdev.h> | ||
| 12 | #include <linux/elevator.h> | ||
| 13 | #include <linux/bio.h> | ||
| 14 | #include <linux/config.h> | 9 | #include <linux/config.h> |
| 15 | #include <linux/module.h> | 10 | #include <linux/module.h> |
| 16 | #include <linux/slab.h> | 11 | #include <linux/blkdev.h> |
| 17 | #include <linux/init.h> | 12 | #include <linux/elevator.h> |
| 18 | #include <linux/compiler.h> | ||
| 19 | #include <linux/hash.h> | 13 | #include <linux/hash.h> |
| 20 | #include <linux/rbtree.h> | 14 | #include <linux/rbtree.h> |
| 21 | #include <linux/mempool.h> | ||
| 22 | #include <linux/ioprio.h> | 15 | #include <linux/ioprio.h> |
| 23 | #include <linux/writeback.h> | ||
| 24 | 16 | ||
| 25 | /* | 17 | /* |
| 26 | * tunables | 18 | * tunables |
| @@ -47,6 +39,8 @@ static int cfq_slice_idle = HZ / 100; | |||
| 47 | */ | 39 | */ |
| 48 | static const int cfq_max_depth = 2; | 40 | static const int cfq_max_depth = 2; |
| 49 | 41 | ||
| 42 | static DEFINE_RWLOCK(cfq_exit_lock); | ||
| 43 | |||
| 50 | /* | 44 | /* |
| 51 | * for the hash of cfqq inside the cfqd | 45 | * for the hash of cfqq inside the cfqd |
| 52 | */ | 46 | */ |
| @@ -89,6 +83,9 @@ static kmem_cache_t *crq_pool; | |||
| 89 | static kmem_cache_t *cfq_pool; | 83 | static kmem_cache_t *cfq_pool; |
| 90 | static kmem_cache_t *cfq_ioc_pool; | 84 | static kmem_cache_t *cfq_ioc_pool; |
| 91 | 85 | ||
| 86 | static atomic_t ioc_count = ATOMIC_INIT(0); | ||
| 87 | static struct completion *ioc_gone; | ||
| 88 | |||
| 92 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR | 89 | #define CFQ_PRIO_LISTS IOPRIO_BE_NR |
| 93 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) | 90 | #define cfq_class_idle(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
| 94 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) | 91 | #define cfq_class_be(cfqq) ((cfqq)->ioprio_class == IOPRIO_CLASS_BE) |
| @@ -109,7 +106,6 @@ static kmem_cache_t *cfq_ioc_pool; | |||
| 109 | * Per block device queue structure | 106 | * Per block device queue structure |
| 110 | */ | 107 | */ |
| 111 | struct cfq_data { | 108 | struct cfq_data { |
| 112 | atomic_t ref; | ||
| 113 | request_queue_t *queue; | 109 | request_queue_t *queue; |
| 114 | 110 | ||
| 115 | /* | 111 | /* |
| @@ -175,6 +171,8 @@ struct cfq_data { | |||
| 175 | unsigned int cfq_slice_async_rq; | 171 | unsigned int cfq_slice_async_rq; |
| 176 | unsigned int cfq_slice_idle; | 172 | unsigned int cfq_slice_idle; |
| 177 | unsigned int cfq_max_depth; | 173 | unsigned int cfq_max_depth; |
| 174 | |||
| 175 | struct list_head cic_list; | ||
| 178 | }; | 176 | }; |
| 179 | 177 | ||
| 180 | /* | 178 | /* |
| @@ -288,7 +286,7 @@ CFQ_CRQ_FNS(is_sync); | |||
| 288 | 286 | ||
| 289 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); | 287 | static struct cfq_queue *cfq_find_cfq_hash(struct cfq_data *, unsigned int, unsigned short); |
| 290 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); | 288 | static void cfq_dispatch_insert(request_queue_t *, struct cfq_rq *); |
| 291 | static void cfq_put_cfqd(struct cfq_data *cfqd); | 289 | static struct cfq_queue *cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, gfp_t gfp_mask); |
| 292 | 290 | ||
| 293 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) | 291 | #define process_sync(tsk) ((tsk)->flags & PF_SYNCWRITE) |
| 294 | 292 | ||
| @@ -1160,8 +1158,6 @@ static void cfq_put_queue(struct cfq_queue *cfqq) | |||
| 1160 | if (unlikely(cfqd->active_queue == cfqq)) | 1158 | if (unlikely(cfqd->active_queue == cfqq)) |
| 1161 | __cfq_slice_expired(cfqd, cfqq, 0); | 1159 | __cfq_slice_expired(cfqd, cfqq, 0); |
| 1162 | 1160 | ||
| 1163 | cfq_put_cfqd(cfqq->cfqd); | ||
| 1164 | |||
| 1165 | /* | 1161 | /* |
| 1166 | * it's on the empty list and still hashed | 1162 | * it's on the empty list and still hashed |
| 1167 | */ | 1163 | */ |
| @@ -1179,7 +1175,7 @@ __cfq_find_cfq_hash(struct cfq_data *cfqd, unsigned int key, unsigned int prio, | |||
| 1179 | 1175 | ||
| 1180 | hlist_for_each_safe(entry, next, hash_list) { | 1176 | hlist_for_each_safe(entry, next, hash_list) { |
| 1181 | struct cfq_queue *__cfqq = list_entry_qhash(entry); | 1177 | struct cfq_queue *__cfqq = list_entry_qhash(entry); |
| 1182 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->ioprio_class, __cfqq->ioprio); | 1178 | const unsigned short __p = IOPRIO_PRIO_VALUE(__cfqq->org_ioprio_class, __cfqq->org_ioprio); |
| 1183 | 1179 | ||
| 1184 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) | 1180 | if (__cfqq->key == key && (__p == prio || prio == CFQ_KEY_ANY)) |
| 1185 | return __cfqq; | 1181 | return __cfqq; |
| @@ -1198,13 +1194,24 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
| 1198 | { | 1194 | { |
| 1199 | struct cfq_io_context *__cic; | 1195 | struct cfq_io_context *__cic; |
| 1200 | struct list_head *entry, *next; | 1196 | struct list_head *entry, *next; |
| 1197 | int freed = 1; | ||
| 1201 | 1198 | ||
| 1202 | list_for_each_safe(entry, next, &cic->list) { | 1199 | list_for_each_safe(entry, next, &cic->list) { |
| 1203 | __cic = list_entry(entry, struct cfq_io_context, list); | 1200 | __cic = list_entry(entry, struct cfq_io_context, list); |
| 1204 | kmem_cache_free(cfq_ioc_pool, __cic); | 1201 | kmem_cache_free(cfq_ioc_pool, __cic); |
| 1202 | freed++; | ||
| 1205 | } | 1203 | } |
| 1206 | 1204 | ||
| 1207 | kmem_cache_free(cfq_ioc_pool, cic); | 1205 | kmem_cache_free(cfq_ioc_pool, cic); |
| 1206 | if (atomic_sub_and_test(freed, &ioc_count) && ioc_gone) | ||
| 1207 | complete(ioc_gone); | ||
| 1208 | } | ||
| 1209 | |||
| 1210 | static void cfq_trim(struct io_context *ioc) | ||
| 1211 | { | ||
| 1212 | ioc->set_ioprio = NULL; | ||
| 1213 | if (ioc->cic) | ||
| 1214 | cfq_free_io_context(ioc->cic); | ||
| 1208 | } | 1215 | } |
| 1209 | 1216 | ||
| 1210 | /* | 1217 | /* |
| @@ -1212,25 +1219,37 @@ static void cfq_free_io_context(struct cfq_io_context *cic) | |||
| 1212 | */ | 1219 | */ |
| 1213 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) | 1220 | static void cfq_exit_single_io_context(struct cfq_io_context *cic) |
| 1214 | { | 1221 | { |
| 1215 | struct cfq_data *cfqd = cic->cfqq->cfqd; | 1222 | struct cfq_data *cfqd = cic->key; |
| 1216 | request_queue_t *q = cfqd->queue; | 1223 | request_queue_t *q; |
| 1224 | |||
| 1225 | if (!cfqd) | ||
| 1226 | return; | ||
| 1227 | |||
| 1228 | q = cfqd->queue; | ||
| 1217 | 1229 | ||
| 1218 | WARN_ON(!irqs_disabled()); | 1230 | WARN_ON(!irqs_disabled()); |
| 1219 | 1231 | ||
| 1220 | spin_lock(q->queue_lock); | 1232 | spin_lock(q->queue_lock); |
| 1221 | 1233 | ||
| 1222 | if (unlikely(cic->cfqq == cfqd->active_queue)) | 1234 | if (cic->cfqq[ASYNC]) { |
| 1223 | __cfq_slice_expired(cfqd, cic->cfqq, 0); | 1235 | if (unlikely(cic->cfqq[ASYNC] == cfqd->active_queue)) |
| 1236 | __cfq_slice_expired(cfqd, cic->cfqq[ASYNC], 0); | ||
| 1237 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
| 1238 | cic->cfqq[ASYNC] = NULL; | ||
| 1239 | } | ||
| 1240 | |||
| 1241 | if (cic->cfqq[SYNC]) { | ||
| 1242 | if (unlikely(cic->cfqq[SYNC] == cfqd->active_queue)) | ||
| 1243 | __cfq_slice_expired(cfqd, cic->cfqq[SYNC], 0); | ||
| 1244 | cfq_put_queue(cic->cfqq[SYNC]); | ||
| 1245 | cic->cfqq[SYNC] = NULL; | ||
| 1246 | } | ||
| 1224 | 1247 | ||
| 1225 | cfq_put_queue(cic->cfqq); | 1248 | cic->key = NULL; |
| 1226 | cic->cfqq = NULL; | 1249 | list_del_init(&cic->queue_list); |
| 1227 | spin_unlock(q->queue_lock); | 1250 | spin_unlock(q->queue_lock); |
| 1228 | } | 1251 | } |
| 1229 | 1252 | ||
| 1230 | /* | ||
| 1231 | * Another task may update the task cic list, if it is doing a queue lookup | ||
| 1232 | * on its behalf. cfq_cic_lock excludes such concurrent updates | ||
| 1233 | */ | ||
| 1234 | static void cfq_exit_io_context(struct cfq_io_context *cic) | 1253 | static void cfq_exit_io_context(struct cfq_io_context *cic) |
| 1235 | { | 1254 | { |
| 1236 | struct cfq_io_context *__cic; | 1255 | struct cfq_io_context *__cic; |
| @@ -1242,12 +1261,14 @@ static void cfq_exit_io_context(struct cfq_io_context *cic) | |||
| 1242 | /* | 1261 | /* |
| 1243 | * put the reference this task is holding to the various queues | 1262 | * put the reference this task is holding to the various queues |
| 1244 | */ | 1263 | */ |
| 1264 | read_lock(&cfq_exit_lock); | ||
| 1245 | list_for_each(entry, &cic->list) { | 1265 | list_for_each(entry, &cic->list) { |
| 1246 | __cic = list_entry(entry, struct cfq_io_context, list); | 1266 | __cic = list_entry(entry, struct cfq_io_context, list); |
| 1247 | cfq_exit_single_io_context(__cic); | 1267 | cfq_exit_single_io_context(__cic); |
| 1248 | } | 1268 | } |
| 1249 | 1269 | ||
| 1250 | cfq_exit_single_io_context(cic); | 1270 | cfq_exit_single_io_context(cic); |
| 1271 | read_unlock(&cfq_exit_lock); | ||
| 1251 | local_irq_restore(flags); | 1272 | local_irq_restore(flags); |
| 1252 | } | 1273 | } |
| 1253 | 1274 | ||
| @@ -1258,7 +1279,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
| 1258 | 1279 | ||
| 1259 | if (cic) { | 1280 | if (cic) { |
| 1260 | INIT_LIST_HEAD(&cic->list); | 1281 | INIT_LIST_HEAD(&cic->list); |
| 1261 | cic->cfqq = NULL; | 1282 | cic->cfqq[ASYNC] = NULL; |
| 1283 | cic->cfqq[SYNC] = NULL; | ||
| 1262 | cic->key = NULL; | 1284 | cic->key = NULL; |
| 1263 | cic->last_end_request = jiffies; | 1285 | cic->last_end_request = jiffies; |
| 1264 | cic->ttime_total = 0; | 1286 | cic->ttime_total = 0; |
| @@ -1266,6 +1288,8 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
| 1266 | cic->ttime_mean = 0; | 1288 | cic->ttime_mean = 0; |
| 1267 | cic->dtor = cfq_free_io_context; | 1289 | cic->dtor = cfq_free_io_context; |
| 1268 | cic->exit = cfq_exit_io_context; | 1290 | cic->exit = cfq_exit_io_context; |
| 1291 | INIT_LIST_HEAD(&cic->queue_list); | ||
| 1292 | atomic_inc(&ioc_count); | ||
| 1269 | } | 1293 | } |
| 1270 | 1294 | ||
| 1271 | return cic; | 1295 | return cic; |
| @@ -1318,14 +1342,27 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
| 1318 | cfq_clear_cfqq_prio_changed(cfqq); | 1342 | cfq_clear_cfqq_prio_changed(cfqq); |
| 1319 | } | 1343 | } |
| 1320 | 1344 | ||
| 1321 | static inline void changed_ioprio(struct cfq_queue *cfqq) | 1345 | static inline void changed_ioprio(struct cfq_io_context *cic) |
| 1322 | { | 1346 | { |
| 1323 | if (cfqq) { | 1347 | struct cfq_data *cfqd = cic->key; |
| 1324 | struct cfq_data *cfqd = cfqq->cfqd; | 1348 | struct cfq_queue *cfqq; |
| 1325 | 1349 | if (cfqd) { | |
| 1326 | spin_lock(cfqd->queue->queue_lock); | 1350 | spin_lock(cfqd->queue->queue_lock); |
| 1327 | cfq_mark_cfqq_prio_changed(cfqq); | 1351 | cfqq = cic->cfqq[ASYNC]; |
| 1328 | cfq_init_prio_data(cfqq); | 1352 | if (cfqq) { |
| 1353 | struct cfq_queue *new_cfqq; | ||
| 1354 | new_cfqq = cfq_get_queue(cfqd, CFQ_KEY_ASYNC, | ||
| 1355 | cic->ioc->task, GFP_ATOMIC); | ||
| 1356 | if (new_cfqq) { | ||
| 1357 | cic->cfqq[ASYNC] = new_cfqq; | ||
| 1358 | cfq_put_queue(cfqq); | ||
| 1359 | } | ||
| 1360 | } | ||
| 1361 | cfqq = cic->cfqq[SYNC]; | ||
| 1362 | if (cfqq) { | ||
| 1363 | cfq_mark_cfqq_prio_changed(cfqq); | ||
| 1364 | cfq_init_prio_data(cfqq); | ||
| 1365 | } | ||
| 1329 | spin_unlock(cfqd->queue->queue_lock); | 1366 | spin_unlock(cfqd->queue->queue_lock); |
| 1330 | } | 1367 | } |
| 1331 | } | 1368 | } |
| @@ -1335,24 +1372,32 @@ static inline void changed_ioprio(struct cfq_queue *cfqq) | |||
| 1335 | */ | 1372 | */ |
| 1336 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) | 1373 | static int cfq_ioc_set_ioprio(struct io_context *ioc, unsigned int ioprio) |
| 1337 | { | 1374 | { |
| 1338 | struct cfq_io_context *cic = ioc->cic; | 1375 | struct cfq_io_context *cic; |
| 1376 | |||
| 1377 | write_lock(&cfq_exit_lock); | ||
| 1378 | |||
| 1379 | cic = ioc->cic; | ||
| 1339 | 1380 | ||
| 1340 | changed_ioprio(cic->cfqq); | 1381 | changed_ioprio(cic); |
| 1341 | 1382 | ||
| 1342 | list_for_each_entry(cic, &cic->list, list) | 1383 | list_for_each_entry(cic, &cic->list, list) |
| 1343 | changed_ioprio(cic->cfqq); | 1384 | changed_ioprio(cic); |
| 1385 | |||
| 1386 | write_unlock(&cfq_exit_lock); | ||
| 1344 | 1387 | ||
| 1345 | return 0; | 1388 | return 0; |
| 1346 | } | 1389 | } |
| 1347 | 1390 | ||
| 1348 | static struct cfq_queue * | 1391 | static struct cfq_queue * |
| 1349 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, unsigned short ioprio, | 1392 | cfq_get_queue(struct cfq_data *cfqd, unsigned int key, struct task_struct *tsk, |
| 1350 | gfp_t gfp_mask) | 1393 | gfp_t gfp_mask) |
| 1351 | { | 1394 | { |
| 1352 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); | 1395 | const int hashval = hash_long(key, CFQ_QHASH_SHIFT); |
| 1353 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1396 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
| 1397 | unsigned short ioprio; | ||
| 1354 | 1398 | ||
| 1355 | retry: | 1399 | retry: |
| 1400 | ioprio = tsk->ioprio; | ||
| 1356 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); | 1401 | cfqq = __cfq_find_cfq_hash(cfqd, key, ioprio, hashval); |
| 1357 | 1402 | ||
| 1358 | if (!cfqq) { | 1403 | if (!cfqq) { |
| @@ -1381,7 +1426,6 @@ retry: | |||
| 1381 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); | 1426 | hlist_add_head(&cfqq->cfq_hash, &cfqd->cfq_hash[hashval]); |
| 1382 | atomic_set(&cfqq->ref, 0); | 1427 | atomic_set(&cfqq->ref, 0); |
| 1383 | cfqq->cfqd = cfqd; | 1428 | cfqq->cfqd = cfqd; |
| 1384 | atomic_inc(&cfqd->ref); | ||
| 1385 | cfqq->service_last = 0; | 1429 | cfqq->service_last = 0; |
| 1386 | /* | 1430 | /* |
| 1387 | * set ->slice_left to allow preemption for a new process | 1431 | * set ->slice_left to allow preemption for a new process |
| @@ -1419,6 +1463,7 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
| 1419 | if (!ioc) | 1463 | if (!ioc) |
| 1420 | return NULL; | 1464 | return NULL; |
| 1421 | 1465 | ||
| 1466 | restart: | ||
| 1422 | if ((cic = ioc->cic) == NULL) { | 1467 | if ((cic = ioc->cic) == NULL) { |
| 1423 | cic = cfq_alloc_io_context(cfqd, gfp_mask); | 1468 | cic = cfq_alloc_io_context(cfqd, gfp_mask); |
| 1424 | 1469 | ||
| @@ -1429,11 +1474,13 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
| 1429 | * manually increment generic io_context usage count, it | 1474 | * manually increment generic io_context usage count, it |
| 1430 | * cannot go away since we are already holding one ref to it | 1475 | * cannot go away since we are already holding one ref to it |
| 1431 | */ | 1476 | */ |
| 1432 | ioc->cic = cic; | ||
| 1433 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
| 1434 | cic->ioc = ioc; | 1477 | cic->ioc = ioc; |
| 1435 | cic->key = cfqd; | 1478 | cic->key = cfqd; |
| 1436 | atomic_inc(&cfqd->ref); | 1479 | read_lock(&cfq_exit_lock); |
| 1480 | ioc->set_ioprio = cfq_ioc_set_ioprio; | ||
| 1481 | ioc->cic = cic; | ||
| 1482 | list_add(&cic->queue_list, &cfqd->cic_list); | ||
| 1483 | read_unlock(&cfq_exit_lock); | ||
| 1437 | } else { | 1484 | } else { |
| 1438 | struct cfq_io_context *__cic; | 1485 | struct cfq_io_context *__cic; |
| 1439 | 1486 | ||
| @@ -1443,6 +1490,20 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
| 1443 | if (cic->key == cfqd) | 1490 | if (cic->key == cfqd) |
| 1444 | goto out; | 1491 | goto out; |
| 1445 | 1492 | ||
| 1493 | if (unlikely(!cic->key)) { | ||
| 1494 | read_lock(&cfq_exit_lock); | ||
| 1495 | if (list_empty(&cic->list)) | ||
| 1496 | ioc->cic = NULL; | ||
| 1497 | else | ||
| 1498 | ioc->cic = list_entry(cic->list.next, | ||
| 1499 | struct cfq_io_context, | ||
| 1500 | list); | ||
| 1501 | read_unlock(&cfq_exit_lock); | ||
| 1502 | kmem_cache_free(cfq_ioc_pool, cic); | ||
| 1503 | atomic_dec(&ioc_count); | ||
| 1504 | goto restart; | ||
| 1505 | } | ||
| 1506 | |||
| 1446 | /* | 1507 | /* |
| 1447 | * cic exists, check if we already are there. linear search | 1508 | * cic exists, check if we already are there. linear search |
| 1448 | * should be ok here, the list will usually not be more than | 1509 | * should be ok here, the list will usually not be more than |
| @@ -1457,6 +1518,14 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
| 1457 | cic = __cic; | 1518 | cic = __cic; |
| 1458 | goto out; | 1519 | goto out; |
| 1459 | } | 1520 | } |
| 1521 | if (unlikely(!__cic->key)) { | ||
| 1522 | read_lock(&cfq_exit_lock); | ||
| 1523 | list_del(&__cic->list); | ||
| 1524 | read_unlock(&cfq_exit_lock); | ||
| 1525 | kmem_cache_free(cfq_ioc_pool, __cic); | ||
| 1526 | atomic_dec(&ioc_count); | ||
| 1527 | goto restart; | ||
| 1528 | } | ||
| 1460 | } | 1529 | } |
| 1461 | 1530 | ||
| 1462 | /* | 1531 | /* |
| @@ -1469,8 +1538,10 @@ cfq_get_io_context(struct cfq_data *cfqd, pid_t pid, gfp_t gfp_mask) | |||
| 1469 | 1538 | ||
| 1470 | __cic->ioc = ioc; | 1539 | __cic->ioc = ioc; |
| 1471 | __cic->key = cfqd; | 1540 | __cic->key = cfqd; |
| 1472 | atomic_inc(&cfqd->ref); | 1541 | read_lock(&cfq_exit_lock); |
| 1473 | list_add(&__cic->list, &cic->list); | 1542 | list_add(&__cic->list, &cic->list); |
| 1543 | list_add(&__cic->queue_list, &cfqd->cic_list); | ||
| 1544 | read_unlock(&cfq_exit_lock); | ||
| 1474 | cic = __cic; | 1545 | cic = __cic; |
| 1475 | } | 1546 | } |
| 1476 | 1547 | ||
| @@ -1890,6 +1961,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
| 1890 | struct cfq_queue *cfqq; | 1961 | struct cfq_queue *cfqq; |
| 1891 | struct cfq_rq *crq; | 1962 | struct cfq_rq *crq; |
| 1892 | unsigned long flags; | 1963 | unsigned long flags; |
| 1964 | int is_sync = key != CFQ_KEY_ASYNC; | ||
| 1893 | 1965 | ||
| 1894 | might_sleep_if(gfp_mask & __GFP_WAIT); | 1966 | might_sleep_if(gfp_mask & __GFP_WAIT); |
| 1895 | 1967 | ||
| @@ -1900,14 +1972,14 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
| 1900 | if (!cic) | 1972 | if (!cic) |
| 1901 | goto queue_fail; | 1973 | goto queue_fail; |
| 1902 | 1974 | ||
| 1903 | if (!cic->cfqq) { | 1975 | if (!cic->cfqq[is_sync]) { |
| 1904 | cfqq = cfq_get_queue(cfqd, key, tsk->ioprio, gfp_mask); | 1976 | cfqq = cfq_get_queue(cfqd, key, tsk, gfp_mask); |
| 1905 | if (!cfqq) | 1977 | if (!cfqq) |
| 1906 | goto queue_fail; | 1978 | goto queue_fail; |
| 1907 | 1979 | ||
| 1908 | cic->cfqq = cfqq; | 1980 | cic->cfqq[is_sync] = cfqq; |
| 1909 | } else | 1981 | } else |
| 1910 | cfqq = cic->cfqq; | 1982 | cfqq = cic->cfqq[is_sync]; |
| 1911 | 1983 | ||
| 1912 | cfqq->allocated[rw]++; | 1984 | cfqq->allocated[rw]++; |
| 1913 | cfq_clear_cfqq_must_alloc(cfqq); | 1985 | cfq_clear_cfqq_must_alloc(cfqq); |
| @@ -1924,7 +1996,7 @@ cfq_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
| 1924 | crq->cfq_queue = cfqq; | 1996 | crq->cfq_queue = cfqq; |
| 1925 | crq->io_context = cic; | 1997 | crq->io_context = cic; |
| 1926 | 1998 | ||
| 1927 | if (rw == READ || process_sync(tsk)) | 1999 | if (is_sync) |
| 1928 | cfq_mark_crq_is_sync(crq); | 2000 | cfq_mark_crq_is_sync(crq); |
| 1929 | else | 2001 | else |
| 1930 | cfq_clear_crq_is_sync(crq); | 2002 | cfq_clear_crq_is_sync(crq); |
| @@ -2055,15 +2127,35 @@ static void cfq_shutdown_timer_wq(struct cfq_data *cfqd) | |||
| 2055 | blk_sync_queue(cfqd->queue); | 2127 | blk_sync_queue(cfqd->queue); |
| 2056 | } | 2128 | } |
| 2057 | 2129 | ||
| 2058 | static void cfq_put_cfqd(struct cfq_data *cfqd) | 2130 | static void cfq_exit_queue(elevator_t *e) |
| 2059 | { | 2131 | { |
| 2132 | struct cfq_data *cfqd = e->elevator_data; | ||
| 2060 | request_queue_t *q = cfqd->queue; | 2133 | request_queue_t *q = cfqd->queue; |
| 2061 | 2134 | ||
| 2062 | if (!atomic_dec_and_test(&cfqd->ref)) | 2135 | cfq_shutdown_timer_wq(cfqd); |
| 2063 | return; | 2136 | write_lock(&cfq_exit_lock); |
| 2137 | spin_lock_irq(q->queue_lock); | ||
| 2138 | if (cfqd->active_queue) | ||
| 2139 | __cfq_slice_expired(cfqd, cfqd->active_queue, 0); | ||
| 2140 | while(!list_empty(&cfqd->cic_list)) { | ||
| 2141 | struct cfq_io_context *cic = list_entry(cfqd->cic_list.next, | ||
| 2142 | struct cfq_io_context, | ||
| 2143 | queue_list); | ||
| 2144 | if (cic->cfqq[ASYNC]) { | ||
| 2145 | cfq_put_queue(cic->cfqq[ASYNC]); | ||
| 2146 | cic->cfqq[ASYNC] = NULL; | ||
| 2147 | } | ||
| 2148 | if (cic->cfqq[SYNC]) { | ||
| 2149 | cfq_put_queue(cic->cfqq[SYNC]); | ||
| 2150 | cic->cfqq[SYNC] = NULL; | ||
| 2151 | } | ||
| 2152 | cic->key = NULL; | ||
| 2153 | list_del_init(&cic->queue_list); | ||
| 2154 | } | ||
| 2155 | spin_unlock_irq(q->queue_lock); | ||
| 2156 | write_unlock(&cfq_exit_lock); | ||
| 2064 | 2157 | ||
| 2065 | cfq_shutdown_timer_wq(cfqd); | 2158 | cfq_shutdown_timer_wq(cfqd); |
| 2066 | blk_put_queue(q); | ||
| 2067 | 2159 | ||
| 2068 | mempool_destroy(cfqd->crq_pool); | 2160 | mempool_destroy(cfqd->crq_pool); |
| 2069 | kfree(cfqd->crq_hash); | 2161 | kfree(cfqd->crq_hash); |
| @@ -2071,14 +2163,6 @@ static void cfq_put_cfqd(struct cfq_data *cfqd) | |||
| 2071 | kfree(cfqd); | 2163 | kfree(cfqd); |
| 2072 | } | 2164 | } |
| 2073 | 2165 | ||
| 2074 | static void cfq_exit_queue(elevator_t *e) | ||
| 2075 | { | ||
| 2076 | struct cfq_data *cfqd = e->elevator_data; | ||
| 2077 | |||
| 2078 | cfq_shutdown_timer_wq(cfqd); | ||
| 2079 | cfq_put_cfqd(cfqd); | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) | 2166 | static int cfq_init_queue(request_queue_t *q, elevator_t *e) |
| 2083 | { | 2167 | { |
| 2084 | struct cfq_data *cfqd; | 2168 | struct cfq_data *cfqd; |
| @@ -2097,6 +2181,7 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2097 | INIT_LIST_HEAD(&cfqd->cur_rr); | 2181 | INIT_LIST_HEAD(&cfqd->cur_rr); |
| 2098 | INIT_LIST_HEAD(&cfqd->idle_rr); | 2182 | INIT_LIST_HEAD(&cfqd->idle_rr); |
| 2099 | INIT_LIST_HEAD(&cfqd->empty_list); | 2183 | INIT_LIST_HEAD(&cfqd->empty_list); |
| 2184 | INIT_LIST_HEAD(&cfqd->cic_list); | ||
| 2100 | 2185 | ||
| 2101 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); | 2186 | cfqd->crq_hash = kmalloc(sizeof(struct hlist_head) * CFQ_MHASH_ENTRIES, GFP_KERNEL); |
| 2102 | if (!cfqd->crq_hash) | 2187 | if (!cfqd->crq_hash) |
| @@ -2118,7 +2203,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2118 | e->elevator_data = cfqd; | 2203 | e->elevator_data = cfqd; |
| 2119 | 2204 | ||
| 2120 | cfqd->queue = q; | 2205 | cfqd->queue = q; |
| 2121 | atomic_inc(&q->refcnt); | ||
| 2122 | 2206 | ||
| 2123 | cfqd->max_queued = q->nr_requests / 4; | 2207 | cfqd->max_queued = q->nr_requests / 4; |
| 2124 | q->nr_batching = cfq_queued; | 2208 | q->nr_batching = cfq_queued; |
| @@ -2133,8 +2217,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e) | |||
| 2133 | 2217 | ||
| 2134 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); | 2218 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue, q); |
| 2135 | 2219 | ||
| 2136 | atomic_set(&cfqd->ref, 1); | ||
| 2137 | |||
| 2138 | cfqd->cfq_queued = cfq_queued; | 2220 | cfqd->cfq_queued = cfq_queued; |
| 2139 | cfqd->cfq_quantum = cfq_quantum; | 2221 | cfqd->cfq_quantum = cfq_quantum; |
| 2140 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2222 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
| @@ -2193,11 +2275,6 @@ fail: | |||
| 2193 | /* | 2275 | /* |
| 2194 | * sysfs parts below --> | 2276 | * sysfs parts below --> |
| 2195 | */ | 2277 | */ |
| 2196 | struct cfq_fs_entry { | ||
| 2197 | struct attribute attr; | ||
| 2198 | ssize_t (*show)(struct cfq_data *, char *); | ||
| 2199 | ssize_t (*store)(struct cfq_data *, const char *, size_t); | ||
| 2200 | }; | ||
| 2201 | 2278 | ||
| 2202 | static ssize_t | 2279 | static ssize_t |
| 2203 | cfq_var_show(unsigned int var, char *page) | 2280 | cfq_var_show(unsigned int var, char *page) |
| @@ -2215,8 +2292,9 @@ cfq_var_store(unsigned int *var, const char *page, size_t count) | |||
| 2215 | } | 2292 | } |
| 2216 | 2293 | ||
| 2217 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 2294 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
| 2218 | static ssize_t __FUNC(struct cfq_data *cfqd, char *page) \ | 2295 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
| 2219 | { \ | 2296 | { \ |
| 2297 | struct cfq_data *cfqd = e->elevator_data; \ | ||
| 2220 | unsigned int __data = __VAR; \ | 2298 | unsigned int __data = __VAR; \ |
| 2221 | if (__CONV) \ | 2299 | if (__CONV) \ |
| 2222 | __data = jiffies_to_msecs(__data); \ | 2300 | __data = jiffies_to_msecs(__data); \ |
| @@ -2226,8 +2304,8 @@ SHOW_FUNCTION(cfq_quantum_show, cfqd->cfq_quantum, 0); | |||
| 2226 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); | 2304 | SHOW_FUNCTION(cfq_queued_show, cfqd->cfq_queued, 0); |
| 2227 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); | 2305 | SHOW_FUNCTION(cfq_fifo_expire_sync_show, cfqd->cfq_fifo_expire[1], 1); |
| 2228 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); | 2306 | SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1); |
| 2229 | SHOW_FUNCTION(cfq_back_max_show, cfqd->cfq_back_max, 0); | 2307 | SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0); |
| 2230 | SHOW_FUNCTION(cfq_back_penalty_show, cfqd->cfq_back_penalty, 0); | 2308 | SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0); |
| 2231 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); | 2309 | SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1); |
| 2232 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); | 2310 | SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1); |
| 2233 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); | 2311 | SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1); |
| @@ -2236,8 +2314,9 @@ SHOW_FUNCTION(cfq_max_depth_show, cfqd->cfq_max_depth, 0); | |||
| 2236 | #undef SHOW_FUNCTION | 2314 | #undef SHOW_FUNCTION |
| 2237 | 2315 | ||
| 2238 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 2316 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 2239 | static ssize_t __FUNC(struct cfq_data *cfqd, const char *page, size_t count) \ | 2317 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
| 2240 | { \ | 2318 | { \ |
| 2319 | struct cfq_data *cfqd = e->elevator_data; \ | ||
| 2241 | unsigned int __data; \ | 2320 | unsigned int __data; \ |
| 2242 | int ret = cfq_var_store(&__data, (page), count); \ | 2321 | int ret = cfq_var_store(&__data, (page), count); \ |
| 2243 | if (__data < (MIN)) \ | 2322 | if (__data < (MIN)) \ |
| @@ -2254,8 +2333,8 @@ STORE_FUNCTION(cfq_quantum_store, &cfqd->cfq_quantum, 1, UINT_MAX, 0); | |||
| 2254 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); | 2333 | STORE_FUNCTION(cfq_queued_store, &cfqd->cfq_queued, 1, UINT_MAX, 0); |
| 2255 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); | 2334 | STORE_FUNCTION(cfq_fifo_expire_sync_store, &cfqd->cfq_fifo_expire[1], 1, UINT_MAX, 1); |
| 2256 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); | 2335 | STORE_FUNCTION(cfq_fifo_expire_async_store, &cfqd->cfq_fifo_expire[0], 1, UINT_MAX, 1); |
| 2257 | STORE_FUNCTION(cfq_back_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); | 2336 | STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0); |
| 2258 | STORE_FUNCTION(cfq_back_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); | 2337 | STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1, UINT_MAX, 0); |
| 2259 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); | 2338 | STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1); |
| 2260 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); | 2339 | STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1); |
| 2261 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); | 2340 | STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1); |
| @@ -2263,112 +2342,22 @@ STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1, UINT_MAX, | |||
| 2263 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); | 2342 | STORE_FUNCTION(cfq_max_depth_store, &cfqd->cfq_max_depth, 1, UINT_MAX, 0); |
| 2264 | #undef STORE_FUNCTION | 2343 | #undef STORE_FUNCTION |
| 2265 | 2344 | ||
| 2266 | static struct cfq_fs_entry cfq_quantum_entry = { | 2345 | #define CFQ_ATTR(name) \ |
| 2267 | .attr = {.name = "quantum", .mode = S_IRUGO | S_IWUSR }, | 2346 | __ATTR(name, S_IRUGO|S_IWUSR, cfq_##name##_show, cfq_##name##_store) |
| 2268 | .show = cfq_quantum_show, | 2347 | |
| 2269 | .store = cfq_quantum_store, | 2348 | static struct elv_fs_entry cfq_attrs[] = { |
| 2270 | }; | 2349 | CFQ_ATTR(quantum), |
| 2271 | static struct cfq_fs_entry cfq_queued_entry = { | 2350 | CFQ_ATTR(queued), |
| 2272 | .attr = {.name = "queued", .mode = S_IRUGO | S_IWUSR }, | 2351 | CFQ_ATTR(fifo_expire_sync), |
| 2273 | .show = cfq_queued_show, | 2352 | CFQ_ATTR(fifo_expire_async), |
| 2274 | .store = cfq_queued_store, | 2353 | CFQ_ATTR(back_seek_max), |
| 2275 | }; | 2354 | CFQ_ATTR(back_seek_penalty), |
| 2276 | static struct cfq_fs_entry cfq_fifo_expire_sync_entry = { | 2355 | CFQ_ATTR(slice_sync), |
| 2277 | .attr = {.name = "fifo_expire_sync", .mode = S_IRUGO | S_IWUSR }, | 2356 | CFQ_ATTR(slice_async), |
| 2278 | .show = cfq_fifo_expire_sync_show, | 2357 | CFQ_ATTR(slice_async_rq), |
| 2279 | .store = cfq_fifo_expire_sync_store, | 2358 | CFQ_ATTR(slice_idle), |
| 2280 | }; | 2359 | CFQ_ATTR(max_depth), |
| 2281 | static struct cfq_fs_entry cfq_fifo_expire_async_entry = { | 2360 | __ATTR_NULL |
| 2282 | .attr = {.name = "fifo_expire_async", .mode = S_IRUGO | S_IWUSR }, | ||
| 2283 | .show = cfq_fifo_expire_async_show, | ||
| 2284 | .store = cfq_fifo_expire_async_store, | ||
| 2285 | }; | ||
| 2286 | static struct cfq_fs_entry cfq_back_max_entry = { | ||
| 2287 | .attr = {.name = "back_seek_max", .mode = S_IRUGO | S_IWUSR }, | ||
| 2288 | .show = cfq_back_max_show, | ||
| 2289 | .store = cfq_back_max_store, | ||
| 2290 | }; | ||
| 2291 | static struct cfq_fs_entry cfq_back_penalty_entry = { | ||
| 2292 | .attr = {.name = "back_seek_penalty", .mode = S_IRUGO | S_IWUSR }, | ||
| 2293 | .show = cfq_back_penalty_show, | ||
| 2294 | .store = cfq_back_penalty_store, | ||
| 2295 | }; | ||
| 2296 | static struct cfq_fs_entry cfq_slice_sync_entry = { | ||
| 2297 | .attr = {.name = "slice_sync", .mode = S_IRUGO | S_IWUSR }, | ||
| 2298 | .show = cfq_slice_sync_show, | ||
| 2299 | .store = cfq_slice_sync_store, | ||
| 2300 | }; | ||
| 2301 | static struct cfq_fs_entry cfq_slice_async_entry = { | ||
| 2302 | .attr = {.name = "slice_async", .mode = S_IRUGO | S_IWUSR }, | ||
| 2303 | .show = cfq_slice_async_show, | ||
| 2304 | .store = cfq_slice_async_store, | ||
| 2305 | }; | ||
| 2306 | static struct cfq_fs_entry cfq_slice_async_rq_entry = { | ||
| 2307 | .attr = {.name = "slice_async_rq", .mode = S_IRUGO | S_IWUSR }, | ||
| 2308 | .show = cfq_slice_async_rq_show, | ||
| 2309 | .store = cfq_slice_async_rq_store, | ||
| 2310 | }; | ||
| 2311 | static struct cfq_fs_entry cfq_slice_idle_entry = { | ||
| 2312 | .attr = {.name = "slice_idle", .mode = S_IRUGO | S_IWUSR }, | ||
| 2313 | .show = cfq_slice_idle_show, | ||
| 2314 | .store = cfq_slice_idle_store, | ||
| 2315 | }; | ||
| 2316 | static struct cfq_fs_entry cfq_max_depth_entry = { | ||
| 2317 | .attr = {.name = "max_depth", .mode = S_IRUGO | S_IWUSR }, | ||
| 2318 | .show = cfq_max_depth_show, | ||
| 2319 | .store = cfq_max_depth_store, | ||
| 2320 | }; | ||
| 2321 | |||
| 2322 | static struct attribute *default_attrs[] = { | ||
| 2323 | &cfq_quantum_entry.attr, | ||
| 2324 | &cfq_queued_entry.attr, | ||
| 2325 | &cfq_fifo_expire_sync_entry.attr, | ||
| 2326 | &cfq_fifo_expire_async_entry.attr, | ||
| 2327 | &cfq_back_max_entry.attr, | ||
| 2328 | &cfq_back_penalty_entry.attr, | ||
| 2329 | &cfq_slice_sync_entry.attr, | ||
| 2330 | &cfq_slice_async_entry.attr, | ||
| 2331 | &cfq_slice_async_rq_entry.attr, | ||
| 2332 | &cfq_slice_idle_entry.attr, | ||
| 2333 | &cfq_max_depth_entry.attr, | ||
| 2334 | NULL, | ||
| 2335 | }; | ||
| 2336 | |||
| 2337 | #define to_cfq(atr) container_of((atr), struct cfq_fs_entry, attr) | ||
| 2338 | |||
| 2339 | static ssize_t | ||
| 2340 | cfq_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
| 2341 | { | ||
| 2342 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 2343 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
| 2344 | |||
| 2345 | if (!entry->show) | ||
| 2346 | return -EIO; | ||
| 2347 | |||
| 2348 | return entry->show(e->elevator_data, page); | ||
| 2349 | } | ||
| 2350 | |||
| 2351 | static ssize_t | ||
| 2352 | cfq_attr_store(struct kobject *kobj, struct attribute *attr, | ||
| 2353 | const char *page, size_t length) | ||
| 2354 | { | ||
| 2355 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 2356 | struct cfq_fs_entry *entry = to_cfq(attr); | ||
| 2357 | |||
| 2358 | if (!entry->store) | ||
| 2359 | return -EIO; | ||
| 2360 | |||
| 2361 | return entry->store(e->elevator_data, page, length); | ||
| 2362 | } | ||
| 2363 | |||
| 2364 | static struct sysfs_ops cfq_sysfs_ops = { | ||
| 2365 | .show = cfq_attr_show, | ||
| 2366 | .store = cfq_attr_store, | ||
| 2367 | }; | ||
| 2368 | |||
| 2369 | static struct kobj_type cfq_ktype = { | ||
| 2370 | .sysfs_ops = &cfq_sysfs_ops, | ||
| 2371 | .default_attrs = default_attrs, | ||
| 2372 | }; | 2361 | }; |
| 2373 | 2362 | ||
| 2374 | static struct elevator_type iosched_cfq = { | 2363 | static struct elevator_type iosched_cfq = { |
| @@ -2389,8 +2378,9 @@ static struct elevator_type iosched_cfq = { | |||
| 2389 | .elevator_may_queue_fn = cfq_may_queue, | 2378 | .elevator_may_queue_fn = cfq_may_queue, |
| 2390 | .elevator_init_fn = cfq_init_queue, | 2379 | .elevator_init_fn = cfq_init_queue, |
| 2391 | .elevator_exit_fn = cfq_exit_queue, | 2380 | .elevator_exit_fn = cfq_exit_queue, |
| 2381 | .trim = cfq_trim, | ||
| 2392 | }, | 2382 | }, |
| 2393 | .elevator_ktype = &cfq_ktype, | 2383 | .elevator_attrs = cfq_attrs, |
| 2394 | .elevator_name = "cfq", | 2384 | .elevator_name = "cfq", |
| 2395 | .elevator_owner = THIS_MODULE, | 2385 | .elevator_owner = THIS_MODULE, |
| 2396 | }; | 2386 | }; |
| @@ -2419,7 +2409,13 @@ static int __init cfq_init(void) | |||
| 2419 | 2409 | ||
| 2420 | static void __exit cfq_exit(void) | 2410 | static void __exit cfq_exit(void) |
| 2421 | { | 2411 | { |
| 2412 | DECLARE_COMPLETION(all_gone); | ||
| 2422 | elv_unregister(&iosched_cfq); | 2413 | elv_unregister(&iosched_cfq); |
| 2414 | ioc_gone = &all_gone; | ||
| 2415 | barrier(); | ||
| 2416 | if (atomic_read(&ioc_count)) | ||
| 2417 | complete(ioc_gone); | ||
| 2418 | synchronize_rcu(); | ||
| 2423 | cfq_slab_kill(); | 2419 | cfq_slab_kill(); |
| 2424 | } | 2420 | } |
| 2425 | 2421 | ||
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index 27e494b1bf97..399fa1e60e1f 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c | |||
| @@ -694,11 +694,6 @@ deadline_set_request(request_queue_t *q, struct request *rq, struct bio *bio, | |||
| 694 | /* | 694 | /* |
| 695 | * sysfs parts below | 695 | * sysfs parts below |
| 696 | */ | 696 | */ |
| 697 | struct deadline_fs_entry { | ||
| 698 | struct attribute attr; | ||
| 699 | ssize_t (*show)(struct deadline_data *, char *); | ||
| 700 | ssize_t (*store)(struct deadline_data *, const char *, size_t); | ||
| 701 | }; | ||
| 702 | 697 | ||
| 703 | static ssize_t | 698 | static ssize_t |
| 704 | deadline_var_show(int var, char *page) | 699 | deadline_var_show(int var, char *page) |
| @@ -716,23 +711,25 @@ deadline_var_store(int *var, const char *page, size_t count) | |||
| 716 | } | 711 | } |
| 717 | 712 | ||
| 718 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ | 713 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
| 719 | static ssize_t __FUNC(struct deadline_data *dd, char *page) \ | 714 | static ssize_t __FUNC(elevator_t *e, char *page) \ |
| 720 | { \ | 715 | { \ |
| 721 | int __data = __VAR; \ | 716 | struct deadline_data *dd = e->elevator_data; \ |
| 717 | int __data = __VAR; \ | ||
| 722 | if (__CONV) \ | 718 | if (__CONV) \ |
| 723 | __data = jiffies_to_msecs(__data); \ | 719 | __data = jiffies_to_msecs(__data); \ |
| 724 | return deadline_var_show(__data, (page)); \ | 720 | return deadline_var_show(__data, (page)); \ |
| 725 | } | 721 | } |
| 726 | SHOW_FUNCTION(deadline_readexpire_show, dd->fifo_expire[READ], 1); | 722 | SHOW_FUNCTION(deadline_read_expire_show, dd->fifo_expire[READ], 1); |
| 727 | SHOW_FUNCTION(deadline_writeexpire_show, dd->fifo_expire[WRITE], 1); | 723 | SHOW_FUNCTION(deadline_write_expire_show, dd->fifo_expire[WRITE], 1); |
| 728 | SHOW_FUNCTION(deadline_writesstarved_show, dd->writes_starved, 0); | 724 | SHOW_FUNCTION(deadline_writes_starved_show, dd->writes_starved, 0); |
| 729 | SHOW_FUNCTION(deadline_frontmerges_show, dd->front_merges, 0); | 725 | SHOW_FUNCTION(deadline_front_merges_show, dd->front_merges, 0); |
| 730 | SHOW_FUNCTION(deadline_fifobatch_show, dd->fifo_batch, 0); | 726 | SHOW_FUNCTION(deadline_fifo_batch_show, dd->fifo_batch, 0); |
| 731 | #undef SHOW_FUNCTION | 727 | #undef SHOW_FUNCTION |
| 732 | 728 | ||
| 733 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ | 729 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
| 734 | static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) \ | 730 | static ssize_t __FUNC(elevator_t *e, const char *page, size_t count) \ |
| 735 | { \ | 731 | { \ |
| 732 | struct deadline_data *dd = e->elevator_data; \ | ||
| 736 | int __data; \ | 733 | int __data; \ |
| 737 | int ret = deadline_var_store(&__data, (page), count); \ | 734 | int ret = deadline_var_store(&__data, (page), count); \ |
| 738 | if (__data < (MIN)) \ | 735 | if (__data < (MIN)) \ |
| @@ -745,83 +742,24 @@ static ssize_t __FUNC(struct deadline_data *dd, const char *page, size_t count) | |||
| 745 | *(__PTR) = __data; \ | 742 | *(__PTR) = __data; \ |
| 746 | return ret; \ | 743 | return ret; \ |
| 747 | } | 744 | } |
| 748 | STORE_FUNCTION(deadline_readexpire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); | 745 | STORE_FUNCTION(deadline_read_expire_store, &dd->fifo_expire[READ], 0, INT_MAX, 1); |
| 749 | STORE_FUNCTION(deadline_writeexpire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); | 746 | STORE_FUNCTION(deadline_write_expire_store, &dd->fifo_expire[WRITE], 0, INT_MAX, 1); |
| 750 | STORE_FUNCTION(deadline_writesstarved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); | 747 | STORE_FUNCTION(deadline_writes_starved_store, &dd->writes_starved, INT_MIN, INT_MAX, 0); |
| 751 | STORE_FUNCTION(deadline_frontmerges_store, &dd->front_merges, 0, 1, 0); | 748 | STORE_FUNCTION(deadline_front_merges_store, &dd->front_merges, 0, 1, 0); |
| 752 | STORE_FUNCTION(deadline_fifobatch_store, &dd->fifo_batch, 0, INT_MAX, 0); | 749 | STORE_FUNCTION(deadline_fifo_batch_store, &dd->fifo_batch, 0, INT_MAX, 0); |
| 753 | #undef STORE_FUNCTION | 750 | #undef STORE_FUNCTION |
| 754 | 751 | ||
| 755 | static struct deadline_fs_entry deadline_readexpire_entry = { | 752 | #define DD_ATTR(name) \ |
| 756 | .attr = {.name = "read_expire", .mode = S_IRUGO | S_IWUSR }, | 753 | __ATTR(name, S_IRUGO|S_IWUSR, deadline_##name##_show, \ |
| 757 | .show = deadline_readexpire_show, | 754 | deadline_##name##_store) |
| 758 | .store = deadline_readexpire_store, | 755 | |
| 759 | }; | 756 | static struct elv_fs_entry deadline_attrs[] = { |
| 760 | static struct deadline_fs_entry deadline_writeexpire_entry = { | 757 | DD_ATTR(read_expire), |
| 761 | .attr = {.name = "write_expire", .mode = S_IRUGO | S_IWUSR }, | 758 | DD_ATTR(write_expire), |
| 762 | .show = deadline_writeexpire_show, | 759 | DD_ATTR(writes_starved), |
| 763 | .store = deadline_writeexpire_store, | 760 | DD_ATTR(front_merges), |
| 764 | }; | 761 | DD_ATTR(fifo_batch), |
| 765 | static struct deadline_fs_entry deadline_writesstarved_entry = { | 762 | __ATTR_NULL |
| 766 | .attr = {.name = "writes_starved", .mode = S_IRUGO | S_IWUSR }, | ||
| 767 | .show = deadline_writesstarved_show, | ||
| 768 | .store = deadline_writesstarved_store, | ||
| 769 | }; | ||
| 770 | static struct deadline_fs_entry deadline_frontmerges_entry = { | ||
| 771 | .attr = {.name = "front_merges", .mode = S_IRUGO | S_IWUSR }, | ||
| 772 | .show = deadline_frontmerges_show, | ||
| 773 | .store = deadline_frontmerges_store, | ||
| 774 | }; | ||
| 775 | static struct deadline_fs_entry deadline_fifobatch_entry = { | ||
| 776 | .attr = {.name = "fifo_batch", .mode = S_IRUGO | S_IWUSR }, | ||
| 777 | .show = deadline_fifobatch_show, | ||
| 778 | .store = deadline_fifobatch_store, | ||
| 779 | }; | ||
| 780 | |||
| 781 | static struct attribute *default_attrs[] = { | ||
| 782 | &deadline_readexpire_entry.attr, | ||
| 783 | &deadline_writeexpire_entry.attr, | ||
| 784 | &deadline_writesstarved_entry.attr, | ||
| 785 | &deadline_frontmerges_entry.attr, | ||
| 786 | &deadline_fifobatch_entry.attr, | ||
| 787 | NULL, | ||
| 788 | }; | ||
| 789 | |||
| 790 | #define to_deadline(atr) container_of((atr), struct deadline_fs_entry, attr) | ||
| 791 | |||
| 792 | static ssize_t | ||
| 793 | deadline_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
| 794 | { | ||
| 795 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 796 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
| 797 | |||
| 798 | if (!entry->show) | ||
| 799 | return -EIO; | ||
| 800 | |||
| 801 | return entry->show(e->elevator_data, page); | ||
| 802 | } | ||
| 803 | |||
| 804 | static ssize_t | ||
| 805 | deadline_attr_store(struct kobject *kobj, struct attribute *attr, | ||
| 806 | const char *page, size_t length) | ||
| 807 | { | ||
| 808 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 809 | struct deadline_fs_entry *entry = to_deadline(attr); | ||
| 810 | |||
| 811 | if (!entry->store) | ||
| 812 | return -EIO; | ||
| 813 | |||
| 814 | return entry->store(e->elevator_data, page, length); | ||
| 815 | } | ||
| 816 | |||
| 817 | static struct sysfs_ops deadline_sysfs_ops = { | ||
| 818 | .show = deadline_attr_show, | ||
| 819 | .store = deadline_attr_store, | ||
| 820 | }; | ||
| 821 | |||
| 822 | static struct kobj_type deadline_ktype = { | ||
| 823 | .sysfs_ops = &deadline_sysfs_ops, | ||
| 824 | .default_attrs = default_attrs, | ||
| 825 | }; | 763 | }; |
| 826 | 764 | ||
| 827 | static struct elevator_type iosched_deadline = { | 765 | static struct elevator_type iosched_deadline = { |
| @@ -840,7 +778,7 @@ static struct elevator_type iosched_deadline = { | |||
| 840 | .elevator_exit_fn = deadline_exit_queue, | 778 | .elevator_exit_fn = deadline_exit_queue, |
| 841 | }, | 779 | }, |
| 842 | 780 | ||
| 843 | .elevator_ktype = &deadline_ktype, | 781 | .elevator_attrs = deadline_attrs, |
| 844 | .elevator_name = "deadline", | 782 | .elevator_name = "deadline", |
| 845 | .elevator_owner = THIS_MODULE, | 783 | .elevator_owner = THIS_MODULE, |
| 846 | }; | 784 | }; |
diff --git a/block/elevator.c b/block/elevator.c index 24b702d649a9..db3d0d8296a0 100644 --- a/block/elevator.c +++ b/block/elevator.c | |||
| @@ -120,15 +120,10 @@ static struct elevator_type *elevator_get(const char *name) | |||
| 120 | return e; | 120 | return e; |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | static int elevator_attach(request_queue_t *q, struct elevator_type *e, | 123 | static int elevator_attach(request_queue_t *q, struct elevator_queue *eq) |
| 124 | struct elevator_queue *eq) | ||
| 125 | { | 124 | { |
| 126 | int ret = 0; | 125 | int ret = 0; |
| 127 | 126 | ||
| 128 | memset(eq, 0, sizeof(*eq)); | ||
| 129 | eq->ops = &e->ops; | ||
| 130 | eq->elevator_type = e; | ||
| 131 | |||
| 132 | q->elevator = eq; | 127 | q->elevator = eq; |
| 133 | 128 | ||
| 134 | if (eq->ops->elevator_init_fn) | 129 | if (eq->ops->elevator_init_fn) |
| @@ -154,6 +149,32 @@ static int __init elevator_setup(char *str) | |||
| 154 | 149 | ||
| 155 | __setup("elevator=", elevator_setup); | 150 | __setup("elevator=", elevator_setup); |
| 156 | 151 | ||
| 152 | static struct kobj_type elv_ktype; | ||
| 153 | |||
| 154 | static elevator_t *elevator_alloc(struct elevator_type *e) | ||
| 155 | { | ||
| 156 | elevator_t *eq = kmalloc(sizeof(elevator_t), GFP_KERNEL); | ||
| 157 | if (eq) { | ||
| 158 | memset(eq, 0, sizeof(*eq)); | ||
| 159 | eq->ops = &e->ops; | ||
| 160 | eq->elevator_type = e; | ||
| 161 | kobject_init(&eq->kobj); | ||
| 162 | snprintf(eq->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | ||
| 163 | eq->kobj.ktype = &elv_ktype; | ||
| 164 | mutex_init(&eq->sysfs_lock); | ||
| 165 | } else { | ||
| 166 | elevator_put(e); | ||
| 167 | } | ||
| 168 | return eq; | ||
| 169 | } | ||
| 170 | |||
| 171 | static void elevator_release(struct kobject *kobj) | ||
| 172 | { | ||
| 173 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 174 | elevator_put(e->elevator_type); | ||
| 175 | kfree(e); | ||
| 176 | } | ||
| 177 | |||
| 157 | int elevator_init(request_queue_t *q, char *name) | 178 | int elevator_init(request_queue_t *q, char *name) |
| 158 | { | 179 | { |
| 159 | struct elevator_type *e = NULL; | 180 | struct elevator_type *e = NULL; |
| @@ -176,29 +197,26 @@ int elevator_init(request_queue_t *q, char *name) | |||
| 176 | e = elevator_get("noop"); | 197 | e = elevator_get("noop"); |
| 177 | } | 198 | } |
| 178 | 199 | ||
| 179 | eq = kmalloc(sizeof(struct elevator_queue), GFP_KERNEL); | 200 | eq = elevator_alloc(e); |
| 180 | if (!eq) { | 201 | if (!eq) |
| 181 | elevator_put(e); | ||
| 182 | return -ENOMEM; | 202 | return -ENOMEM; |
| 183 | } | ||
| 184 | 203 | ||
| 185 | ret = elevator_attach(q, e, eq); | 204 | ret = elevator_attach(q, eq); |
| 186 | if (ret) { | 205 | if (ret) |
| 187 | kfree(eq); | 206 | kobject_put(&eq->kobj); |
| 188 | elevator_put(e); | ||
| 189 | } | ||
| 190 | 207 | ||
| 191 | return ret; | 208 | return ret; |
| 192 | } | 209 | } |
| 193 | 210 | ||
| 194 | void elevator_exit(elevator_t *e) | 211 | void elevator_exit(elevator_t *e) |
| 195 | { | 212 | { |
| 213 | mutex_lock(&e->sysfs_lock); | ||
| 196 | if (e->ops->elevator_exit_fn) | 214 | if (e->ops->elevator_exit_fn) |
| 197 | e->ops->elevator_exit_fn(e); | 215 | e->ops->elevator_exit_fn(e); |
| 216 | e->ops = NULL; | ||
| 217 | mutex_unlock(&e->sysfs_lock); | ||
| 198 | 218 | ||
| 199 | elevator_put(e->elevator_type); | 219 | kobject_put(&e->kobj); |
| 200 | e->elevator_type = NULL; | ||
| 201 | kfree(e); | ||
| 202 | } | 220 | } |
| 203 | 221 | ||
| 204 | /* | 222 | /* |
| @@ -627,26 +645,79 @@ void elv_completed_request(request_queue_t *q, struct request *rq) | |||
| 627 | } | 645 | } |
| 628 | } | 646 | } |
| 629 | 647 | ||
| 630 | int elv_register_queue(struct request_queue *q) | 648 | #define to_elv(atr) container_of((atr), struct elv_fs_entry, attr) |
| 649 | |||
| 650 | static ssize_t | ||
| 651 | elv_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | ||
| 631 | { | 652 | { |
| 632 | elevator_t *e = q->elevator; | 653 | elevator_t *e = container_of(kobj, elevator_t, kobj); |
| 654 | struct elv_fs_entry *entry = to_elv(attr); | ||
| 655 | ssize_t error; | ||
| 633 | 656 | ||
| 634 | e->kobj.parent = kobject_get(&q->kobj); | 657 | if (!entry->show) |
| 635 | if (!e->kobj.parent) | 658 | return -EIO; |
| 636 | return -EBUSY; | ||
| 637 | 659 | ||
| 638 | snprintf(e->kobj.name, KOBJ_NAME_LEN, "%s", "iosched"); | 660 | mutex_lock(&e->sysfs_lock); |
| 639 | e->kobj.ktype = e->elevator_type->elevator_ktype; | 661 | error = e->ops ? entry->show(e, page) : -ENOENT; |
| 662 | mutex_unlock(&e->sysfs_lock); | ||
| 663 | return error; | ||
| 664 | } | ||
| 665 | |||
| 666 | static ssize_t | ||
| 667 | elv_attr_store(struct kobject *kobj, struct attribute *attr, | ||
| 668 | const char *page, size_t length) | ||
| 669 | { | ||
| 670 | elevator_t *e = container_of(kobj, elevator_t, kobj); | ||
| 671 | struct elv_fs_entry *entry = to_elv(attr); | ||
| 672 | ssize_t error; | ||
| 673 | |||
| 674 | if (!entry->store) | ||
| 675 | return -EIO; | ||
| 676 | |||
| 677 | mutex_lock(&e->sysfs_lock); | ||
| 678 | error = e->ops ? entry->store(e, page, length) : -ENOENT; | ||
| 679 | mutex_unlock(&e->sysfs_lock); | ||
| 680 | return error; | ||
| 681 | } | ||
| 682 | |||
| 683 | static struct sysfs_ops elv_sysfs_ops = { | ||
| 684 | .show = elv_attr_show, | ||
| 685 | .store = elv_attr_store, | ||
| 686 | }; | ||
| 687 | |||
| 688 | static struct kobj_type elv_ktype = { | ||
| 689 | .sysfs_ops = &elv_sysfs_ops, | ||
| 690 | .release = elevator_release, | ||
| 691 | }; | ||
| 640 | 692 | ||
| 641 | return kobject_register(&e->kobj); | 693 | int elv_register_queue(struct request_queue *q) |
| 694 | { | ||
| 695 | elevator_t *e = q->elevator; | ||
| 696 | int error; | ||
| 697 | |||
| 698 | e->kobj.parent = &q->kobj; | ||
| 699 | |||
| 700 | error = kobject_add(&e->kobj); | ||
| 701 | if (!error) { | ||
| 702 | struct elv_fs_entry *attr = e->elevator_type->elevator_attrs; | ||
| 703 | if (attr) { | ||
| 704 | while (attr->attr.name) { | ||
| 705 | if (sysfs_create_file(&e->kobj, &attr->attr)) | ||
| 706 | break; | ||
| 707 | attr++; | ||
| 708 | } | ||
| 709 | } | ||
| 710 | kobject_uevent(&e->kobj, KOBJ_ADD); | ||
| 711 | } | ||
| 712 | return error; | ||
| 642 | } | 713 | } |
| 643 | 714 | ||
| 644 | void elv_unregister_queue(struct request_queue *q) | 715 | void elv_unregister_queue(struct request_queue *q) |
| 645 | { | 716 | { |
| 646 | if (q) { | 717 | if (q) { |
| 647 | elevator_t *e = q->elevator; | 718 | elevator_t *e = q->elevator; |
| 648 | kobject_unregister(&e->kobj); | 719 | kobject_uevent(&e->kobj, KOBJ_REMOVE); |
| 649 | kobject_put(&q->kobj); | 720 | kobject_del(&e->kobj); |
| 650 | } | 721 | } |
| 651 | } | 722 | } |
| 652 | 723 | ||
| @@ -675,21 +746,15 @@ void elv_unregister(struct elevator_type *e) | |||
| 675 | /* | 746 | /* |
| 676 | * Iterate every thread in the process to remove the io contexts. | 747 | * Iterate every thread in the process to remove the io contexts. |
| 677 | */ | 748 | */ |
| 678 | read_lock(&tasklist_lock); | 749 | if (e->ops.trim) { |
| 679 | do_each_thread(g, p) { | 750 | read_lock(&tasklist_lock); |
| 680 | struct io_context *ioc = p->io_context; | 751 | do_each_thread(g, p) { |
| 681 | if (ioc && ioc->cic) { | 752 | task_lock(p); |
| 682 | ioc->cic->exit(ioc->cic); | 753 | e->ops.trim(p->io_context); |
| 683 | ioc->cic->dtor(ioc->cic); | 754 | task_unlock(p); |
| 684 | ioc->cic = NULL; | 755 | } while_each_thread(g, p); |
| 685 | } | 756 | read_unlock(&tasklist_lock); |
| 686 | if (ioc && ioc->aic) { | 757 | } |
| 687 | ioc->aic->exit(ioc->aic); | ||
| 688 | ioc->aic->dtor(ioc->aic); | ||
| 689 | ioc->aic = NULL; | ||
| 690 | } | ||
| 691 | } while_each_thread(g, p); | ||
| 692 | read_unlock(&tasklist_lock); | ||
| 693 | 758 | ||
| 694 | spin_lock_irq(&elv_list_lock); | 759 | spin_lock_irq(&elv_list_lock); |
| 695 | list_del_init(&e->list); | 760 | list_del_init(&e->list); |
| @@ -703,16 +768,16 @@ EXPORT_SYMBOL_GPL(elv_unregister); | |||
| 703 | * need for the new one. this way we have a chance of going back to the old | 768 | * need for the new one. this way we have a chance of going back to the old |
| 704 | * one, if the new one fails init for some reason. | 769 | * one, if the new one fails init for some reason. |
| 705 | */ | 770 | */ |
| 706 | static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | 771 | static int elevator_switch(request_queue_t *q, struct elevator_type *new_e) |
| 707 | { | 772 | { |
| 708 | elevator_t *old_elevator, *e; | 773 | elevator_t *old_elevator, *e; |
| 709 | 774 | ||
| 710 | /* | 775 | /* |
| 711 | * Allocate new elevator | 776 | * Allocate new elevator |
| 712 | */ | 777 | */ |
| 713 | e = kmalloc(sizeof(elevator_t), GFP_KERNEL); | 778 | e = elevator_alloc(new_e); |
| 714 | if (!e) | 779 | if (!e) |
| 715 | goto error; | 780 | return 0; |
| 716 | 781 | ||
| 717 | /* | 782 | /* |
| 718 | * Turn on BYPASS and drain all requests w/ elevator private data | 783 | * Turn on BYPASS and drain all requests w/ elevator private data |
| @@ -743,7 +808,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
| 743 | /* | 808 | /* |
| 744 | * attach and start new elevator | 809 | * attach and start new elevator |
| 745 | */ | 810 | */ |
| 746 | if (elevator_attach(q, new_e, e)) | 811 | if (elevator_attach(q, e)) |
| 747 | goto fail; | 812 | goto fail; |
| 748 | 813 | ||
| 749 | if (elv_register_queue(q)) | 814 | if (elv_register_queue(q)) |
| @@ -754,7 +819,7 @@ static void elevator_switch(request_queue_t *q, struct elevator_type *new_e) | |||
| 754 | */ | 819 | */ |
| 755 | elevator_exit(old_elevator); | 820 | elevator_exit(old_elevator); |
| 756 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 821 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
| 757 | return; | 822 | return 1; |
| 758 | 823 | ||
| 759 | fail_register: | 824 | fail_register: |
| 760 | /* | 825 | /* |
| @@ -767,10 +832,9 @@ fail: | |||
| 767 | q->elevator = old_elevator; | 832 | q->elevator = old_elevator; |
| 768 | elv_register_queue(q); | 833 | elv_register_queue(q); |
| 769 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 834 | clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
| 770 | kfree(e); | 835 | if (e) |
| 771 | error: | 836 | kobject_put(&e->kobj); |
| 772 | elevator_put(new_e); | 837 | return 0; |
| 773 | printk(KERN_ERR "elevator: switch to %s failed\n",new_e->elevator_name); | ||
| 774 | } | 838 | } |
| 775 | 839 | ||
| 776 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | 840 | ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) |
| @@ -797,7 +861,8 @@ ssize_t elv_iosched_store(request_queue_t *q, const char *name, size_t count) | |||
| 797 | return count; | 861 | return count; |
| 798 | } | 862 | } |
| 799 | 863 | ||
| 800 | elevator_switch(q, e); | 864 | if (!elevator_switch(q, e)) |
| 865 | printk(KERN_ERR "elevator: switch to %s failed\n",elevator_name); | ||
| 801 | return count; | 866 | return count; |
| 802 | } | 867 | } |
| 803 | 868 | ||
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 0ef2971a9e82..6c793b196aa9 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
| @@ -1740,16 +1740,11 @@ EXPORT_SYMBOL(blk_run_queue); | |||
| 1740 | * Hopefully the low level driver will have finished any | 1740 | * Hopefully the low level driver will have finished any |
| 1741 | * outstanding requests first... | 1741 | * outstanding requests first... |
| 1742 | **/ | 1742 | **/ |
| 1743 | void blk_cleanup_queue(request_queue_t * q) | 1743 | static void blk_release_queue(struct kobject *kobj) |
| 1744 | { | 1744 | { |
| 1745 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); | ||
| 1745 | struct request_list *rl = &q->rq; | 1746 | struct request_list *rl = &q->rq; |
| 1746 | 1747 | ||
| 1747 | if (!atomic_dec_and_test(&q->refcnt)) | ||
| 1748 | return; | ||
| 1749 | |||
| 1750 | if (q->elevator) | ||
| 1751 | elevator_exit(q->elevator); | ||
| 1752 | |||
| 1753 | blk_sync_queue(q); | 1748 | blk_sync_queue(q); |
| 1754 | 1749 | ||
| 1755 | if (rl->rq_pool) | 1750 | if (rl->rq_pool) |
| @@ -1761,6 +1756,24 @@ void blk_cleanup_queue(request_queue_t * q) | |||
| 1761 | kmem_cache_free(requestq_cachep, q); | 1756 | kmem_cache_free(requestq_cachep, q); |
| 1762 | } | 1757 | } |
| 1763 | 1758 | ||
| 1759 | void blk_put_queue(request_queue_t *q) | ||
| 1760 | { | ||
| 1761 | kobject_put(&q->kobj); | ||
| 1762 | } | ||
| 1763 | EXPORT_SYMBOL(blk_put_queue); | ||
| 1764 | |||
| 1765 | void blk_cleanup_queue(request_queue_t * q) | ||
| 1766 | { | ||
| 1767 | mutex_lock(&q->sysfs_lock); | ||
| 1768 | set_bit(QUEUE_FLAG_DEAD, &q->queue_flags); | ||
| 1769 | mutex_unlock(&q->sysfs_lock); | ||
| 1770 | |||
| 1771 | if (q->elevator) | ||
| 1772 | elevator_exit(q->elevator); | ||
| 1773 | |||
| 1774 | blk_put_queue(q); | ||
| 1775 | } | ||
| 1776 | |||
| 1764 | EXPORT_SYMBOL(blk_cleanup_queue); | 1777 | EXPORT_SYMBOL(blk_cleanup_queue); |
| 1765 | 1778 | ||
| 1766 | static int blk_init_free_list(request_queue_t *q) | 1779 | static int blk_init_free_list(request_queue_t *q) |
| @@ -1788,6 +1801,8 @@ request_queue_t *blk_alloc_queue(gfp_t gfp_mask) | |||
| 1788 | } | 1801 | } |
| 1789 | EXPORT_SYMBOL(blk_alloc_queue); | 1802 | EXPORT_SYMBOL(blk_alloc_queue); |
| 1790 | 1803 | ||
| 1804 | static struct kobj_type queue_ktype; | ||
| 1805 | |||
| 1791 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | 1806 | request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) |
| 1792 | { | 1807 | { |
| 1793 | request_queue_t *q; | 1808 | request_queue_t *q; |
| @@ -1798,11 +1813,16 @@ request_queue_t *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
| 1798 | 1813 | ||
| 1799 | memset(q, 0, sizeof(*q)); | 1814 | memset(q, 0, sizeof(*q)); |
| 1800 | init_timer(&q->unplug_timer); | 1815 | init_timer(&q->unplug_timer); |
| 1801 | atomic_set(&q->refcnt, 1); | 1816 | |
| 1817 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | ||
| 1818 | q->kobj.ktype = &queue_ktype; | ||
| 1819 | kobject_init(&q->kobj); | ||
| 1802 | 1820 | ||
| 1803 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 1821 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; |
| 1804 | q->backing_dev_info.unplug_io_data = q; | 1822 | q->backing_dev_info.unplug_io_data = q; |
| 1805 | 1823 | ||
| 1824 | mutex_init(&q->sysfs_lock); | ||
| 1825 | |||
| 1806 | return q; | 1826 | return q; |
| 1807 | } | 1827 | } |
| 1808 | EXPORT_SYMBOL(blk_alloc_queue_node); | 1828 | EXPORT_SYMBOL(blk_alloc_queue_node); |
| @@ -1854,8 +1874,10 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
| 1854 | return NULL; | 1874 | return NULL; |
| 1855 | 1875 | ||
| 1856 | q->node = node_id; | 1876 | q->node = node_id; |
| 1857 | if (blk_init_free_list(q)) | 1877 | if (blk_init_free_list(q)) { |
| 1858 | goto out_init; | 1878 | kmem_cache_free(requestq_cachep, q); |
| 1879 | return NULL; | ||
| 1880 | } | ||
| 1859 | 1881 | ||
| 1860 | /* | 1882 | /* |
| 1861 | * if caller didn't supply a lock, they get per-queue locking with | 1883 | * if caller didn't supply a lock, they get per-queue locking with |
| @@ -1891,9 +1913,7 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id) | |||
| 1891 | return q; | 1913 | return q; |
| 1892 | } | 1914 | } |
| 1893 | 1915 | ||
| 1894 | blk_cleanup_queue(q); | 1916 | blk_put_queue(q); |
| 1895 | out_init: | ||
| 1896 | kmem_cache_free(requestq_cachep, q); | ||
| 1897 | return NULL; | 1917 | return NULL; |
| 1898 | } | 1918 | } |
| 1899 | EXPORT_SYMBOL(blk_init_queue_node); | 1919 | EXPORT_SYMBOL(blk_init_queue_node); |
| @@ -1901,7 +1921,7 @@ EXPORT_SYMBOL(blk_init_queue_node); | |||
| 1901 | int blk_get_queue(request_queue_t *q) | 1921 | int blk_get_queue(request_queue_t *q) |
| 1902 | { | 1922 | { |
| 1903 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { | 1923 | if (likely(!test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) { |
| 1904 | atomic_inc(&q->refcnt); | 1924 | kobject_get(&q->kobj); |
| 1905 | return 0; | 1925 | return 0; |
| 1906 | } | 1926 | } |
| 1907 | 1927 | ||
| @@ -3477,10 +3497,12 @@ void put_io_context(struct io_context *ioc) | |||
| 3477 | BUG_ON(atomic_read(&ioc->refcount) == 0); | 3497 | BUG_ON(atomic_read(&ioc->refcount) == 0); |
| 3478 | 3498 | ||
| 3479 | if (atomic_dec_and_test(&ioc->refcount)) { | 3499 | if (atomic_dec_and_test(&ioc->refcount)) { |
| 3500 | rcu_read_lock(); | ||
| 3480 | if (ioc->aic && ioc->aic->dtor) | 3501 | if (ioc->aic && ioc->aic->dtor) |
| 3481 | ioc->aic->dtor(ioc->aic); | 3502 | ioc->aic->dtor(ioc->aic); |
| 3482 | if (ioc->cic && ioc->cic->dtor) | 3503 | if (ioc->cic && ioc->cic->dtor) |
| 3483 | ioc->cic->dtor(ioc->cic); | 3504 | ioc->cic->dtor(ioc->cic); |
| 3505 | rcu_read_unlock(); | ||
| 3484 | 3506 | ||
| 3485 | kmem_cache_free(iocontext_cachep, ioc); | 3507 | kmem_cache_free(iocontext_cachep, ioc); |
| 3486 | } | 3508 | } |
| @@ -3614,10 +3636,13 @@ static ssize_t | |||
| 3614 | queue_requests_store(struct request_queue *q, const char *page, size_t count) | 3636 | queue_requests_store(struct request_queue *q, const char *page, size_t count) |
| 3615 | { | 3637 | { |
| 3616 | struct request_list *rl = &q->rq; | 3638 | struct request_list *rl = &q->rq; |
| 3639 | unsigned long nr; | ||
| 3640 | int ret = queue_var_store(&nr, page, count); | ||
| 3641 | if (nr < BLKDEV_MIN_RQ) | ||
| 3642 | nr = BLKDEV_MIN_RQ; | ||
| 3617 | 3643 | ||
| 3618 | int ret = queue_var_store(&q->nr_requests, page, count); | 3644 | spin_lock_irq(q->queue_lock); |
| 3619 | if (q->nr_requests < BLKDEV_MIN_RQ) | 3645 | q->nr_requests = nr; |
| 3620 | q->nr_requests = BLKDEV_MIN_RQ; | ||
| 3621 | blk_queue_congestion_threshold(q); | 3646 | blk_queue_congestion_threshold(q); |
| 3622 | 3647 | ||
| 3623 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) | 3648 | if (rl->count[READ] >= queue_congestion_on_threshold(q)) |
| @@ -3643,6 +3668,7 @@ queue_requests_store(struct request_queue *q, const char *page, size_t count) | |||
| 3643 | blk_clear_queue_full(q, WRITE); | 3668 | blk_clear_queue_full(q, WRITE); |
| 3644 | wake_up(&rl->wait[WRITE]); | 3669 | wake_up(&rl->wait[WRITE]); |
| 3645 | } | 3670 | } |
| 3671 | spin_unlock_irq(q->queue_lock); | ||
| 3646 | return ret; | 3672 | return ret; |
| 3647 | } | 3673 | } |
| 3648 | 3674 | ||
| @@ -3758,13 +3784,19 @@ static ssize_t | |||
| 3758 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) | 3784 | queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page) |
| 3759 | { | 3785 | { |
| 3760 | struct queue_sysfs_entry *entry = to_queue(attr); | 3786 | struct queue_sysfs_entry *entry = to_queue(attr); |
| 3761 | struct request_queue *q; | 3787 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
| 3788 | ssize_t res; | ||
| 3762 | 3789 | ||
| 3763 | q = container_of(kobj, struct request_queue, kobj); | ||
| 3764 | if (!entry->show) | 3790 | if (!entry->show) |
| 3765 | return -EIO; | 3791 | return -EIO; |
| 3766 | 3792 | mutex_lock(&q->sysfs_lock); | |
| 3767 | return entry->show(q, page); | 3793 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
| 3794 | mutex_unlock(&q->sysfs_lock); | ||
| 3795 | return -ENOENT; | ||
| 3796 | } | ||
| 3797 | res = entry->show(q, page); | ||
| 3798 | mutex_unlock(&q->sysfs_lock); | ||
| 3799 | return res; | ||
| 3768 | } | 3800 | } |
| 3769 | 3801 | ||
| 3770 | static ssize_t | 3802 | static ssize_t |
| @@ -3772,13 +3804,20 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr, | |||
| 3772 | const char *page, size_t length) | 3804 | const char *page, size_t length) |
| 3773 | { | 3805 | { |
| 3774 | struct queue_sysfs_entry *entry = to_queue(attr); | 3806 | struct queue_sysfs_entry *entry = to_queue(attr); |
| 3775 | struct request_queue *q; | 3807 | request_queue_t *q = container_of(kobj, struct request_queue, kobj); |
| 3808 | |||
| 3809 | ssize_t res; | ||
| 3776 | 3810 | ||
| 3777 | q = container_of(kobj, struct request_queue, kobj); | ||
| 3778 | if (!entry->store) | 3811 | if (!entry->store) |
| 3779 | return -EIO; | 3812 | return -EIO; |
| 3780 | 3813 | mutex_lock(&q->sysfs_lock); | |
| 3781 | return entry->store(q, page, length); | 3814 | if (test_bit(QUEUE_FLAG_DEAD, &q->queue_flags)) { |
| 3815 | mutex_unlock(&q->sysfs_lock); | ||
| 3816 | return -ENOENT; | ||
| 3817 | } | ||
| 3818 | res = entry->store(q, page, length); | ||
| 3819 | mutex_unlock(&q->sysfs_lock); | ||
| 3820 | return res; | ||
| 3782 | } | 3821 | } |
| 3783 | 3822 | ||
| 3784 | static struct sysfs_ops queue_sysfs_ops = { | 3823 | static struct sysfs_ops queue_sysfs_ops = { |
| @@ -3789,6 +3828,7 @@ static struct sysfs_ops queue_sysfs_ops = { | |||
| 3789 | static struct kobj_type queue_ktype = { | 3828 | static struct kobj_type queue_ktype = { |
| 3790 | .sysfs_ops = &queue_sysfs_ops, | 3829 | .sysfs_ops = &queue_sysfs_ops, |
| 3791 | .default_attrs = default_attrs, | 3830 | .default_attrs = default_attrs, |
| 3831 | .release = blk_release_queue, | ||
| 3792 | }; | 3832 | }; |
| 3793 | 3833 | ||
| 3794 | int blk_register_queue(struct gendisk *disk) | 3834 | int blk_register_queue(struct gendisk *disk) |
| @@ -3801,19 +3841,17 @@ int blk_register_queue(struct gendisk *disk) | |||
| 3801 | return -ENXIO; | 3841 | return -ENXIO; |
| 3802 | 3842 | ||
| 3803 | q->kobj.parent = kobject_get(&disk->kobj); | 3843 | q->kobj.parent = kobject_get(&disk->kobj); |
| 3804 | if (!q->kobj.parent) | ||
| 3805 | return -EBUSY; | ||
| 3806 | 3844 | ||
| 3807 | snprintf(q->kobj.name, KOBJ_NAME_LEN, "%s", "queue"); | 3845 | ret = kobject_add(&q->kobj); |
| 3808 | q->kobj.ktype = &queue_ktype; | ||
| 3809 | |||
| 3810 | ret = kobject_register(&q->kobj); | ||
| 3811 | if (ret < 0) | 3846 | if (ret < 0) |
| 3812 | return ret; | 3847 | return ret; |
| 3813 | 3848 | ||
| 3849 | kobject_uevent(&q->kobj, KOBJ_ADD); | ||
| 3850 | |||
| 3814 | ret = elv_register_queue(q); | 3851 | ret = elv_register_queue(q); |
| 3815 | if (ret) { | 3852 | if (ret) { |
| 3816 | kobject_unregister(&q->kobj); | 3853 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 3854 | kobject_del(&q->kobj); | ||
| 3817 | return ret; | 3855 | return ret; |
| 3818 | } | 3856 | } |
| 3819 | 3857 | ||
| @@ -3827,7 +3865,8 @@ void blk_unregister_queue(struct gendisk *disk) | |||
| 3827 | if (q && q->request_fn) { | 3865 | if (q && q->request_fn) { |
| 3828 | elv_unregister_queue(q); | 3866 | elv_unregister_queue(q); |
| 3829 | 3867 | ||
| 3830 | kobject_unregister(&q->kobj); | 3868 | kobject_uevent(&q->kobj, KOBJ_REMOVE); |
| 3869 | kobject_del(&q->kobj); | ||
| 3831 | kobject_put(&disk->kobj); | 3870 | kobject_put(&disk->kobj); |
| 3832 | } | 3871 | } |
| 3833 | } | 3872 | } |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 5f6d1a5cce11..0010704739e3 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
| @@ -1307,7 +1307,7 @@ static int __init loop_init(void) | |||
| 1307 | 1307 | ||
| 1308 | out_mem4: | 1308 | out_mem4: |
| 1309 | while (i--) | 1309 | while (i--) |
| 1310 | blk_put_queue(loop_dev[i].lo_queue); | 1310 | blk_cleanup_queue(loop_dev[i].lo_queue); |
| 1311 | devfs_remove("loop"); | 1311 | devfs_remove("loop"); |
| 1312 | i = max_loop; | 1312 | i = max_loop; |
| 1313 | out_mem3: | 1313 | out_mem3: |
| @@ -1328,7 +1328,7 @@ static void loop_exit(void) | |||
| 1328 | 1328 | ||
| 1329 | for (i = 0; i < max_loop; i++) { | 1329 | for (i = 0; i < max_loop; i++) { |
| 1330 | del_gendisk(disks[i]); | 1330 | del_gendisk(disks[i]); |
| 1331 | blk_put_queue(loop_dev[i].lo_queue); | 1331 | blk_cleanup_queue(loop_dev[i].lo_queue); |
| 1332 | put_disk(disks[i]); | 1332 | put_disk(disks[i]); |
| 1333 | } | 1333 | } |
| 1334 | devfs_remove("loop"); | 1334 | devfs_remove("loop"); |
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c index bc9b2bcd7dba..476a5b553f34 100644 --- a/drivers/block/pktcdvd.c +++ b/drivers/block/pktcdvd.c | |||
| @@ -2514,7 +2514,7 @@ static int pkt_setup_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
| 2514 | return 0; | 2514 | return 0; |
| 2515 | 2515 | ||
| 2516 | out_new_dev: | 2516 | out_new_dev: |
| 2517 | blk_put_queue(disk->queue); | 2517 | blk_cleanup_queue(disk->queue); |
| 2518 | out_mem2: | 2518 | out_mem2: |
| 2519 | put_disk(disk); | 2519 | put_disk(disk); |
| 2520 | out_mem: | 2520 | out_mem: |
| @@ -2555,7 +2555,7 @@ static int pkt_remove_dev(struct pkt_ctrl_command *ctrl_cmd) | |||
| 2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); | 2555 | DPRINTK("pktcdvd: writer %s unmapped\n", pd->name); |
| 2556 | 2556 | ||
| 2557 | del_gendisk(pd->disk); | 2557 | del_gendisk(pd->disk); |
| 2558 | blk_put_queue(pd->disk->queue); | 2558 | blk_cleanup_queue(pd->disk->queue); |
| 2559 | put_disk(pd->disk); | 2559 | put_disk(pd->disk); |
| 2560 | 2560 | ||
| 2561 | pkt_devs[idx] = NULL; | 2561 | pkt_devs[idx] = NULL; |
diff --git a/drivers/block/umem.c b/drivers/block/umem.c index 4ada1268b40d..c16e66b9c7a7 100644 --- a/drivers/block/umem.c +++ b/drivers/block/umem.c | |||
| @@ -1131,7 +1131,7 @@ static void mm_pci_remove(struct pci_dev *dev) | |||
| 1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, | 1131 | pci_free_consistent(card->dev, PAGE_SIZE*2, |
| 1132 | card->mm_pages[1].desc, | 1132 | card->mm_pages[1].desc, |
| 1133 | card->mm_pages[1].page_dma); | 1133 | card->mm_pages[1].page_dma); |
| 1134 | blk_put_queue(card->queue); | 1134 | blk_cleanup_queue(card->queue); |
| 1135 | } | 1135 | } |
| 1136 | 1136 | ||
| 1137 | static const struct pci_device_id mm_pci_ids[] = { { | 1137 | static const struct pci_device_id mm_pci_ids[] = { { |
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 745ca1f67b14..88d60202b9db 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
| @@ -840,7 +840,7 @@ static struct mapped_device *alloc_dev(unsigned int minor, int persistent) | |||
| 840 | bad3: | 840 | bad3: |
| 841 | mempool_destroy(md->io_pool); | 841 | mempool_destroy(md->io_pool); |
| 842 | bad2: | 842 | bad2: |
| 843 | blk_put_queue(md->queue); | 843 | blk_cleanup_queue(md->queue); |
| 844 | free_minor(minor); | 844 | free_minor(minor); |
| 845 | bad1: | 845 | bad1: |
| 846 | kfree(md); | 846 | kfree(md); |
| @@ -860,7 +860,7 @@ static void free_dev(struct mapped_device *md) | |||
| 860 | del_gendisk(md->disk); | 860 | del_gendisk(md->disk); |
| 861 | free_minor(minor); | 861 | free_minor(minor); |
| 862 | put_disk(md->disk); | 862 | put_disk(md->disk); |
| 863 | blk_put_queue(md->queue); | 863 | blk_cleanup_queue(md->queue); |
| 864 | kfree(md); | 864 | kfree(md); |
| 865 | } | 865 | } |
| 866 | 866 | ||
diff --git a/drivers/md/md.c b/drivers/md/md.c index d05e3125d298..5ed2228745cb 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -213,8 +213,11 @@ static void mddev_put(mddev_t *mddev) | |||
| 213 | return; | 213 | return; |
| 214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { | 214 | if (!mddev->raid_disks && list_empty(&mddev->disks)) { |
| 215 | list_del(&mddev->all_mddevs); | 215 | list_del(&mddev->all_mddevs); |
| 216 | blk_put_queue(mddev->queue); | 216 | /* that blocks */ |
| 217 | blk_cleanup_queue(mddev->queue); | ||
| 218 | /* that also blocks */ | ||
| 217 | kobject_unregister(&mddev->kobj); | 219 | kobject_unregister(&mddev->kobj); |
| 220 | /* result blows... */ | ||
| 218 | } | 221 | } |
| 219 | spin_unlock(&all_mddevs_lock); | 222 | spin_unlock(&all_mddevs_lock); |
| 220 | } | 223 | } |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 2e727f49ad19..44133250da2e 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
| @@ -273,7 +273,7 @@ removeseg: | |||
| 273 | list_del(&dev_info->lh); | 273 | list_del(&dev_info->lh); |
| 274 | 274 | ||
| 275 | del_gendisk(dev_info->gd); | 275 | del_gendisk(dev_info->gd); |
| 276 | blk_put_queue(dev_info->dcssblk_queue); | 276 | blk_cleanup_queue(dev_info->dcssblk_queue); |
| 277 | dev_info->gd->queue = NULL; | 277 | dev_info->gd->queue = NULL; |
| 278 | put_disk(dev_info->gd); | 278 | put_disk(dev_info->gd); |
| 279 | device_unregister(dev); | 279 | device_unregister(dev); |
| @@ -491,7 +491,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
| 491 | unregister_dev: | 491 | unregister_dev: |
| 492 | PRINT_ERR("device_create_file() failed!\n"); | 492 | PRINT_ERR("device_create_file() failed!\n"); |
| 493 | list_del(&dev_info->lh); | 493 | list_del(&dev_info->lh); |
| 494 | blk_put_queue(dev_info->dcssblk_queue); | 494 | blk_cleanup_queue(dev_info->dcssblk_queue); |
| 495 | dev_info->gd->queue = NULL; | 495 | dev_info->gd->queue = NULL; |
| 496 | put_disk(dev_info->gd); | 496 | put_disk(dev_info->gd); |
| 497 | device_unregister(&dev_info->dev); | 497 | device_unregister(&dev_info->dev); |
| @@ -505,7 +505,7 @@ list_del: | |||
| 505 | unload_seg: | 505 | unload_seg: |
| 506 | segment_unload(local_buf); | 506 | segment_unload(local_buf); |
| 507 | dealloc_gendisk: | 507 | dealloc_gendisk: |
| 508 | blk_put_queue(dev_info->dcssblk_queue); | 508 | blk_cleanup_queue(dev_info->dcssblk_queue); |
| 509 | dev_info->gd->queue = NULL; | 509 | dev_info->gd->queue = NULL; |
| 510 | put_disk(dev_info->gd); | 510 | put_disk(dev_info->gd); |
| 511 | free_dev_info: | 511 | free_dev_info: |
| @@ -562,7 +562,7 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
| 562 | list_del(&dev_info->lh); | 562 | list_del(&dev_info->lh); |
| 563 | 563 | ||
| 564 | del_gendisk(dev_info->gd); | 564 | del_gendisk(dev_info->gd); |
| 565 | blk_put_queue(dev_info->dcssblk_queue); | 565 | blk_cleanup_queue(dev_info->dcssblk_queue); |
| 566 | dev_info->gd->queue = NULL; | 566 | dev_info->gd->queue = NULL; |
| 567 | put_disk(dev_info->gd); | 567 | put_disk(dev_info->gd); |
| 568 | device_unregister(&dev_info->dev); | 568 | device_unregister(&dev_info->dev); |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 860e7a485a5f..56bb6a4e15f3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -58,7 +58,7 @@ struct cfq_io_context { | |||
| 58 | * circular list of cfq_io_contexts belonging to a process io context | 58 | * circular list of cfq_io_contexts belonging to a process io context |
| 59 | */ | 59 | */ |
| 60 | struct list_head list; | 60 | struct list_head list; |
| 61 | struct cfq_queue *cfqq; | 61 | struct cfq_queue *cfqq[2]; |
| 62 | void *key; | 62 | void *key; |
| 63 | 63 | ||
| 64 | struct io_context *ioc; | 64 | struct io_context *ioc; |
| @@ -69,6 +69,8 @@ struct cfq_io_context { | |||
| 69 | unsigned long ttime_samples; | 69 | unsigned long ttime_samples; |
| 70 | unsigned long ttime_mean; | 70 | unsigned long ttime_mean; |
| 71 | 71 | ||
| 72 | struct list_head queue_list; | ||
| 73 | |||
| 72 | void (*dtor)(struct cfq_io_context *); | 74 | void (*dtor)(struct cfq_io_context *); |
| 73 | void (*exit)(struct cfq_io_context *); | 75 | void (*exit)(struct cfq_io_context *); |
| 74 | }; | 76 | }; |
| @@ -404,8 +406,6 @@ struct request_queue | |||
| 404 | 406 | ||
| 405 | struct blk_queue_tag *queue_tags; | 407 | struct blk_queue_tag *queue_tags; |
| 406 | 408 | ||
| 407 | atomic_t refcnt; | ||
| 408 | |||
| 409 | unsigned int nr_sorted; | 409 | unsigned int nr_sorted; |
| 410 | unsigned int in_flight; | 410 | unsigned int in_flight; |
| 411 | 411 | ||
| @@ -424,6 +424,8 @@ struct request_queue | |||
| 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 424 | struct request pre_flush_rq, bar_rq, post_flush_rq; |
| 425 | struct request *orig_bar_rq; | 425 | struct request *orig_bar_rq; |
| 426 | unsigned int bi_size; | 426 | unsigned int bi_size; |
| 427 | |||
| 428 | struct mutex sysfs_lock; | ||
| 427 | }; | 429 | }; |
| 428 | 430 | ||
| 429 | #define RQ_INACTIVE (-1) | 431 | #define RQ_INACTIVE (-1) |
| @@ -725,7 +727,7 @@ extern long nr_blockdev_pages(void); | |||
| 725 | int blk_get_queue(request_queue_t *); | 727 | int blk_get_queue(request_queue_t *); |
| 726 | request_queue_t *blk_alloc_queue(gfp_t); | 728 | request_queue_t *blk_alloc_queue(gfp_t); |
| 727 | request_queue_t *blk_alloc_queue_node(gfp_t, int); | 729 | request_queue_t *blk_alloc_queue_node(gfp_t, int); |
| 728 | #define blk_put_queue(q) blk_cleanup_queue((q)) | 730 | extern void blk_put_queue(request_queue_t *); |
| 729 | 731 | ||
| 730 | /* | 732 | /* |
| 731 | * tag stuff | 733 | * tag stuff |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 18cf1f3e1184..ad133fcfb239 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -48,10 +48,17 @@ struct elevator_ops | |||
| 48 | 48 | ||
| 49 | elevator_init_fn *elevator_init_fn; | 49 | elevator_init_fn *elevator_init_fn; |
| 50 | elevator_exit_fn *elevator_exit_fn; | 50 | elevator_exit_fn *elevator_exit_fn; |
| 51 | void (*trim)(struct io_context *); | ||
| 51 | }; | 52 | }; |
| 52 | 53 | ||
| 53 | #define ELV_NAME_MAX (16) | 54 | #define ELV_NAME_MAX (16) |
| 54 | 55 | ||
| 56 | struct elv_fs_entry { | ||
| 57 | struct attribute attr; | ||
| 58 | ssize_t (*show)(elevator_t *, char *); | ||
| 59 | ssize_t (*store)(elevator_t *, const char *, size_t); | ||
| 60 | }; | ||
| 61 | |||
| 55 | /* | 62 | /* |
| 56 | * identifies an elevator type, such as AS or deadline | 63 | * identifies an elevator type, such as AS or deadline |
| 57 | */ | 64 | */ |
| @@ -60,7 +67,7 @@ struct elevator_type | |||
| 60 | struct list_head list; | 67 | struct list_head list; |
| 61 | struct elevator_ops ops; | 68 | struct elevator_ops ops; |
| 62 | struct elevator_type *elevator_type; | 69 | struct elevator_type *elevator_type; |
| 63 | struct kobj_type *elevator_ktype; | 70 | struct elv_fs_entry *elevator_attrs; |
| 64 | char elevator_name[ELV_NAME_MAX]; | 71 | char elevator_name[ELV_NAME_MAX]; |
| 65 | struct module *elevator_owner; | 72 | struct module *elevator_owner; |
| 66 | }; | 73 | }; |
| @@ -74,6 +81,7 @@ struct elevator_queue | |||
| 74 | void *elevator_data; | 81 | void *elevator_data; |
| 75 | struct kobject kobj; | 82 | struct kobject kobj; |
| 76 | struct elevator_type *elevator_type; | 83 | struct elevator_type *elevator_type; |
| 84 | struct mutex sysfs_lock; | ||
| 77 | }; | 85 | }; |
| 78 | 86 | ||
| 79 | /* | 87 | /* |
diff --git a/kernel/exit.c b/kernel/exit.c index 531aadca5530..d1e8d500a7e1 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -807,8 +807,6 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 807 | panic("Attempted to kill the idle task!"); | 807 | panic("Attempted to kill the idle task!"); |
| 808 | if (unlikely(tsk->pid == 1)) | 808 | if (unlikely(tsk->pid == 1)) |
| 809 | panic("Attempted to kill init!"); | 809 | panic("Attempted to kill init!"); |
| 810 | if (tsk->io_context) | ||
| 811 | exit_io_context(); | ||
| 812 | 810 | ||
| 813 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { | 811 | if (unlikely(current->ptrace & PT_TRACE_EXIT)) { |
| 814 | current->ptrace_message = code; | 812 | current->ptrace_message = code; |
| @@ -822,6 +820,8 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 822 | if (unlikely(tsk->flags & PF_EXITING)) { | 820 | if (unlikely(tsk->flags & PF_EXITING)) { |
| 823 | printk(KERN_ALERT | 821 | printk(KERN_ALERT |
| 824 | "Fixing recursive fault but reboot is needed!\n"); | 822 | "Fixing recursive fault but reboot is needed!\n"); |
| 823 | if (tsk->io_context) | ||
| 824 | exit_io_context(); | ||
| 825 | set_current_state(TASK_UNINTERRUPTIBLE); | 825 | set_current_state(TASK_UNINTERRUPTIBLE); |
| 826 | schedule(); | 826 | schedule(); |
| 827 | } | 827 | } |
| @@ -881,6 +881,9 @@ fastcall NORET_TYPE void do_exit(long code) | |||
| 881 | */ | 881 | */ |
| 882 | mutex_debug_check_no_locks_held(tsk); | 882 | mutex_debug_check_no_locks_held(tsk); |
| 883 | 883 | ||
| 884 | if (tsk->io_context) | ||
| 885 | exit_io_context(); | ||
| 886 | |||
| 884 | /* PF_DEAD causes final put_task_struct after we schedule. */ | 887 | /* PF_DEAD causes final put_task_struct after we schedule. */ |
| 885 | preempt_disable(); | 888 | preempt_disable(); |
| 886 | BUG_ON(tsk->flags & PF_DEAD); | 889 | BUG_ON(tsk->flags & PF_DEAD); |
