aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-timeout.c12
-rw-r--r--block/cfq-iosched.c2
3 files changed, 11 insertions, 11 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5fe03def34b2..2cc682b860ea 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -286,16 +286,16 @@ done:
286static struct cgroup_subsys_state * 286static struct cgroup_subsys_state *
287blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) 287blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
288{ 288{
289 struct blkio_cgroup *blkcg, *parent_blkcg; 289 struct blkio_cgroup *blkcg;
290 struct cgroup *parent = cgroup->parent;
290 291
291 if (!cgroup->parent) { 292 if (!parent) {
292 blkcg = &blkio_root_cgroup; 293 blkcg = &blkio_root_cgroup;
293 goto done; 294 goto done;
294 } 295 }
295 296
296 /* Currently we do not support hierarchy deeper than two level (0,1) */ 297 /* Currently we do not support hierarchy deeper than two level (0,1) */
297 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); 298 if (parent != cgroup->top_cgroup)
298 if (css_depth(&parent_blkcg->css) > 0)
299 return ERR_PTR(-EINVAL); 299 return ERR_PTR(-EINVAL);
300 300
301 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 301 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ba7e0aca878..4f0c06c7a338 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
109 struct request_queue *q = (struct request_queue *) data; 109 struct request_queue *q = (struct request_queue *) data;
110 unsigned long flags, next = 0; 110 unsigned long flags, next = 0;
111 struct request *rq, *tmp; 111 struct request *rq, *tmp;
112 int next_set = 0;
112 113
113 spin_lock_irqsave(q->queue_lock, flags); 114 spin_lock_irqsave(q->queue_lock, flags);
114 115
@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
122 if (blk_mark_rq_complete(rq)) 123 if (blk_mark_rq_complete(rq))
123 continue; 124 continue;
124 blk_rq_timed_out(rq); 125 blk_rq_timed_out(rq);
125 } else if (!next || time_after(next, rq->deadline)) 126 } else if (!next_set || time_after(next, rq->deadline)) {
126 next = rq->deadline; 127 next = rq->deadline;
128 next_set = 1;
129 }
127 } 130 }
128 131
129 /* 132 if (next_set)
130 * next can never be 0 here with the list non-empty, since we always
131 * bump ->deadline to 1 so we can detect if the timer was ever added
132 * or not. See comment in blk_add_timer()
133 */
134 if (next)
135 mod_timer(&q->timeout, round_jiffies_up(next)); 133 mod_timer(&q->timeout, round_jiffies_up(next));
136 134
137 spin_unlock_irqrestore(q->queue_lock, flags); 135 spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 838834be115b..5f127cfb2e92 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -3694,8 +3694,10 @@ static void *cfq_init_queue(struct request_queue *q)
3694 * to make sure that cfq_put_cfqg() does not try to kfree root group 3694 * to make sure that cfq_put_cfqg() does not try to kfree root group
3695 */ 3695 */
3696 atomic_set(&cfqg->ref, 1); 3696 atomic_set(&cfqg->ref, 1);
3697 rcu_read_lock();
3697 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 3698 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3698 0); 3699 0);
3700 rcu_read_unlock();
3699#endif 3701#endif
3700 /* 3702 /*
3701 * Not strictly needed (since RB_ROOT just clears the node and we 3703 * Not strictly needed (since RB_ROOT just clears the node and we