diff options
author | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-07-03 18:13:18 -0400 |
commit | c43768cbb7655ea5ff782ae250f6e2ef4297cf98 (patch) | |
tree | 3982e41dde3eecaa3739a5d1a8ed18d04bd74f01 /block/cfq-iosched.c | |
parent | 1a8dd307cc0a2119be4e578c517795464e6dabba (diff) | |
parent | 746a99a5af60ee676afa2ba469ccd1373493c7e7 (diff) |
Merge branch 'master' into for-next
Pull linus#master to merge PER_CPU_DEF_ATTRIBUTES and alpha build fix
changes. As alpha in percpu tree uses 'weak' attribute instead of
inline assembly, there's no need for __used attribute.
Conflicts:
arch/alpha/include/asm/percpu.h
arch/mn10300/kernel/vmlinux.lds.S
include/linux/percpu-defs.h
Diffstat (limited to 'block/cfq-iosched.c')
-rw-r--r-- | block/cfq-iosched.c | 176 |
1 files changed, 93 insertions, 83 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 0f1cc7d3855e..85208dd1d05b 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -71,6 +71,51 @@ struct cfq_rb_root { | |||
71 | #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } | 71 | #define CFQ_RB_ROOT (struct cfq_rb_root) { RB_ROOT, NULL, } |
72 | 72 | ||
73 | /* | 73 | /* |
74 | * Per process-grouping structure | ||
75 | */ | ||
76 | struct cfq_queue { | ||
77 | /* reference count */ | ||
78 | atomic_t ref; | ||
79 | /* various state flags, see below */ | ||
80 | unsigned int flags; | ||
81 | /* parent cfq_data */ | ||
82 | struct cfq_data *cfqd; | ||
83 | /* service_tree member */ | ||
84 | struct rb_node rb_node; | ||
85 | /* service_tree key */ | ||
86 | unsigned long rb_key; | ||
87 | /* prio tree member */ | ||
88 | struct rb_node p_node; | ||
89 | /* prio tree root we belong to, if any */ | ||
90 | struct rb_root *p_root; | ||
91 | /* sorted list of pending requests */ | ||
92 | struct rb_root sort_list; | ||
93 | /* if fifo isn't expired, next request to serve */ | ||
94 | struct request *next_rq; | ||
95 | /* requests queued in sort_list */ | ||
96 | int queued[2]; | ||
97 | /* currently allocated requests */ | ||
98 | int allocated[2]; | ||
99 | /* fifo list of requests in sort_list */ | ||
100 | struct list_head fifo; | ||
101 | |||
102 | unsigned long slice_end; | ||
103 | long slice_resid; | ||
104 | unsigned int slice_dispatch; | ||
105 | |||
106 | /* pending metadata requests */ | ||
107 | int meta_pending; | ||
108 | /* number of requests that are on the dispatch list or inside driver */ | ||
109 | int dispatched; | ||
110 | |||
111 | /* io prio of this group */ | ||
112 | unsigned short ioprio, org_ioprio; | ||
113 | unsigned short ioprio_class, org_ioprio_class; | ||
114 | |||
115 | pid_t pid; | ||
116 | }; | ||
117 | |||
118 | /* | ||
74 | * Per block device queue structure | 119 | * Per block device queue structure |
75 | */ | 120 | */ |
76 | struct cfq_data { | 121 | struct cfq_data { |
@@ -135,51 +180,11 @@ struct cfq_data { | |||
135 | unsigned int cfq_slice_idle; | 180 | unsigned int cfq_slice_idle; |
136 | 181 | ||
137 | struct list_head cic_list; | 182 | struct list_head cic_list; |
138 | }; | ||
139 | |||
140 | /* | ||
141 | * Per process-grouping structure | ||
142 | */ | ||
143 | struct cfq_queue { | ||
144 | /* reference count */ | ||
145 | atomic_t ref; | ||
146 | /* various state flags, see below */ | ||
147 | unsigned int flags; | ||
148 | /* parent cfq_data */ | ||
149 | struct cfq_data *cfqd; | ||
150 | /* service_tree member */ | ||
151 | struct rb_node rb_node; | ||
152 | /* service_tree key */ | ||
153 | unsigned long rb_key; | ||
154 | /* prio tree member */ | ||
155 | struct rb_node p_node; | ||
156 | /* prio tree root we belong to, if any */ | ||
157 | struct rb_root *p_root; | ||
158 | /* sorted list of pending requests */ | ||
159 | struct rb_root sort_list; | ||
160 | /* if fifo isn't expired, next request to serve */ | ||
161 | struct request *next_rq; | ||
162 | /* requests queued in sort_list */ | ||
163 | int queued[2]; | ||
164 | /* currently allocated requests */ | ||
165 | int allocated[2]; | ||
166 | /* fifo list of requests in sort_list */ | ||
167 | struct list_head fifo; | ||
168 | 183 | ||
169 | unsigned long slice_end; | 184 | /* |
170 | long slice_resid; | 185 | * Fallback dummy cfqq for extreme OOM conditions |
171 | unsigned int slice_dispatch; | 186 | */ |
172 | 187 | struct cfq_queue oom_cfqq; | |
173 | /* pending metadata requests */ | ||
174 | int meta_pending; | ||
175 | /* number of requests that are on the dispatch list or inside driver */ | ||
176 | int dispatched; | ||
177 | |||
178 | /* io prio of this group */ | ||
179 | unsigned short ioprio, org_ioprio; | ||
180 | unsigned short ioprio_class, org_ioprio_class; | ||
181 | |||
182 | pid_t pid; | ||
183 | }; | 188 | }; |
184 | 189 | ||
185 | enum cfqq_state_flags { | 190 | enum cfqq_state_flags { |
@@ -1641,6 +1646,26 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1641 | ioc->ioprio_changed = 0; | 1646 | ioc->ioprio_changed = 0; |
1642 | } | 1647 | } |
1643 | 1648 | ||
1649 | static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq, | ||
1650 | pid_t pid, int is_sync) | ||
1651 | { | ||
1652 | RB_CLEAR_NODE(&cfqq->rb_node); | ||
1653 | RB_CLEAR_NODE(&cfqq->p_node); | ||
1654 | INIT_LIST_HEAD(&cfqq->fifo); | ||
1655 | |||
1656 | atomic_set(&cfqq->ref, 0); | ||
1657 | cfqq->cfqd = cfqd; | ||
1658 | |||
1659 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1660 | |||
1661 | if (is_sync) { | ||
1662 | if (!cfq_class_idle(cfqq)) | ||
1663 | cfq_mark_cfqq_idle_window(cfqq); | ||
1664 | cfq_mark_cfqq_sync(cfqq); | ||
1665 | } | ||
1666 | cfqq->pid = pid; | ||
1667 | } | ||
1668 | |||
1644 | static struct cfq_queue * | 1669 | static struct cfq_queue * |
1645 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1670 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1646 | struct io_context *ioc, gfp_t gfp_mask) | 1671 | struct io_context *ioc, gfp_t gfp_mask) |
@@ -1653,56 +1678,40 @@ retry: | |||
1653 | /* cic always exists here */ | 1678 | /* cic always exists here */ |
1654 | cfqq = cic_to_cfqq(cic, is_sync); | 1679 | cfqq = cic_to_cfqq(cic, is_sync); |
1655 | 1680 | ||
1656 | if (!cfqq) { | 1681 | /* |
1682 | * Always try a new alloc if we fell back to the OOM cfqq | ||
1683 | * originally, since it should just be a temporary situation. | ||
1684 | */ | ||
1685 | if (!cfqq || cfqq == &cfqd->oom_cfqq) { | ||
1686 | cfqq = NULL; | ||
1657 | if (new_cfqq) { | 1687 | if (new_cfqq) { |
1658 | cfqq = new_cfqq; | 1688 | cfqq = new_cfqq; |
1659 | new_cfqq = NULL; | 1689 | new_cfqq = NULL; |
1660 | } else if (gfp_mask & __GFP_WAIT) { | 1690 | } else if (gfp_mask & __GFP_WAIT) { |
1661 | /* | ||
1662 | * Inform the allocator of the fact that we will | ||
1663 | * just repeat this allocation if it fails, to allow | ||
1664 | * the allocator to do whatever it needs to attempt to | ||
1665 | * free memory. | ||
1666 | */ | ||
1667 | spin_unlock_irq(cfqd->queue->queue_lock); | 1691 | spin_unlock_irq(cfqd->queue->queue_lock); |
1668 | new_cfqq = kmem_cache_alloc_node(cfq_pool, | 1692 | new_cfqq = kmem_cache_alloc_node(cfq_pool, |
1669 | gfp_mask | __GFP_NOFAIL | __GFP_ZERO, | 1693 | gfp_mask | __GFP_ZERO, |
1670 | cfqd->queue->node); | 1694 | cfqd->queue->node); |
1671 | spin_lock_irq(cfqd->queue->queue_lock); | 1695 | spin_lock_irq(cfqd->queue->queue_lock); |
1672 | goto retry; | 1696 | if (new_cfqq) |
1697 | goto retry; | ||
1673 | } else { | 1698 | } else { |
1674 | cfqq = kmem_cache_alloc_node(cfq_pool, | 1699 | cfqq = kmem_cache_alloc_node(cfq_pool, |
1675 | gfp_mask | __GFP_ZERO, | 1700 | gfp_mask | __GFP_ZERO, |
1676 | cfqd->queue->node); | 1701 | cfqd->queue->node); |
1677 | if (!cfqq) | ||
1678 | goto out; | ||
1679 | } | 1702 | } |
1680 | 1703 | ||
1681 | RB_CLEAR_NODE(&cfqq->rb_node); | 1704 | if (cfqq) { |
1682 | RB_CLEAR_NODE(&cfqq->p_node); | 1705 | cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); |
1683 | INIT_LIST_HEAD(&cfqq->fifo); | 1706 | cfq_init_prio_data(cfqq, ioc); |
1684 | 1707 | cfq_log_cfqq(cfqd, cfqq, "alloced"); | |
1685 | atomic_set(&cfqq->ref, 0); | 1708 | } else |
1686 | cfqq->cfqd = cfqd; | 1709 | cfqq = &cfqd->oom_cfqq; |
1687 | |||
1688 | cfq_mark_cfqq_prio_changed(cfqq); | ||
1689 | |||
1690 | cfq_init_prio_data(cfqq, ioc); | ||
1691 | |||
1692 | if (is_sync) { | ||
1693 | if (!cfq_class_idle(cfqq)) | ||
1694 | cfq_mark_cfqq_idle_window(cfqq); | ||
1695 | cfq_mark_cfqq_sync(cfqq); | ||
1696 | } | ||
1697 | cfqq->pid = current->pid; | ||
1698 | cfq_log_cfqq(cfqd, cfqq, "alloced"); | ||
1699 | } | 1710 | } |
1700 | 1711 | ||
1701 | if (new_cfqq) | 1712 | if (new_cfqq) |
1702 | kmem_cache_free(cfq_pool, new_cfqq); | 1713 | kmem_cache_free(cfq_pool, new_cfqq); |
1703 | 1714 | ||
1704 | out: | ||
1705 | WARN_ON((gfp_mask & __GFP_WAIT) && !cfqq); | ||
1706 | return cfqq; | 1715 | return cfqq; |
1707 | } | 1716 | } |
1708 | 1717 | ||
@@ -1735,11 +1744,8 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, | |||
1735 | cfqq = *async_cfqq; | 1744 | cfqq = *async_cfqq; |
1736 | } | 1745 | } |
1737 | 1746 | ||
1738 | if (!cfqq) { | 1747 | if (!cfqq) |
1739 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); | 1748 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
1740 | if (!cfqq) | ||
1741 | return NULL; | ||
1742 | } | ||
1743 | 1749 | ||
1744 | /* | 1750 | /* |
1745 | * pin the queue now that it's allocated, scheduler exit will prune it | 1751 | * pin the queue now that it's allocated, scheduler exit will prune it |
@@ -2307,10 +2313,6 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
2307 | cfqq = cic_to_cfqq(cic, is_sync); | 2313 | cfqq = cic_to_cfqq(cic, is_sync); |
2308 | if (!cfqq) { | 2314 | if (!cfqq) { |
2309 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); | 2315 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
2310 | |||
2311 | if (!cfqq) | ||
2312 | goto queue_fail; | ||
2313 | |||
2314 | cic_set_cfqq(cic, cfqq, is_sync); | 2316 | cic_set_cfqq(cic, cfqq, is_sync); |
2315 | } | 2317 | } |
2316 | 2318 | ||
@@ -2465,6 +2467,14 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2465 | for (i = 0; i < CFQ_PRIO_LISTS; i++) | 2467 | for (i = 0; i < CFQ_PRIO_LISTS; i++) |
2466 | cfqd->prio_trees[i] = RB_ROOT; | 2468 | cfqd->prio_trees[i] = RB_ROOT; |
2467 | 2469 | ||
2470 | /* | ||
2471 | * Our fallback cfqq if cfq_find_alloc_queue() runs into OOM issues. | ||
2472 | * Grab a permanent reference to it, so that the normal code flow | ||
2473 | * will not attempt to free it. | ||
2474 | */ | ||
2475 | cfq_init_cfqq(cfqd, &cfqd->oom_cfqq, 1, 0); | ||
2476 | atomic_inc(&cfqd->oom_cfqq.ref); | ||
2477 | |||
2468 | INIT_LIST_HEAD(&cfqd->cic_list); | 2478 | INIT_LIST_HEAD(&cfqd->cic_list); |
2469 | 2479 | ||
2470 | cfqd->queue = q; | 2480 | cfqd->queue = q; |