aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2010-10-01 08:49:49 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-10-01 08:49:49 -0400
commitfe0714377ee2ca161bf2afb7773e22f15f1786d4 (patch)
tree09f5e8686d741d012333c92251b8cc66793ef916 /block/blk-cgroup.c
parent02977e4af7ed3b478c505e50491ffdf3e1314cf4 (diff)
blkio: Recalculate the throttled bio dispatch time upon throttle limit change
o Currently any cgroup throttle limit changes are processed asynchronousy and the change does not take affect till a new bio is dispatched from same group. o It might happen that a user sets a redicuously low limit on throttling. Say 1 bytes per second on reads. In such cases simple operations like mount a disk can wait for a very long time. o Once bio is throttled, there is no easy way to come out of that wait even if user increases the read limit later. o This patch fixes it. Now if a user changes the cgroup limits, we recalculate the bio dispatch time according to new limits. o Can't take queueu lock under blkcg_lock, hence after the change I wake up the dispatch thread again which recalculates the time. So there are some variables being synchronized across two threads without lock and I had to make use of barriers. Hoping I have used barriers correctly. Any review of memory barrier code especially will help. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b06ca70354e3..52c12130a5de 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -124,7 +124,8 @@ blkio_update_group_weight(struct blkio_group *blkg, unsigned int weight)
124 if (blkiop->plid != blkg->plid) 124 if (blkiop->plid != blkg->plid)
125 continue; 125 continue;
126 if (blkiop->ops.blkio_update_group_weight_fn) 126 if (blkiop->ops.blkio_update_group_weight_fn)
127 blkiop->ops.blkio_update_group_weight_fn(blkg, weight); 127 blkiop->ops.blkio_update_group_weight_fn(blkg->key,
128 blkg, weight);
128 } 129 }
129} 130}
130 131
@@ -141,11 +142,13 @@ static inline void blkio_update_group_bps(struct blkio_group *blkg, u64 bps,
141 142
142 if (fileid == BLKIO_THROTL_read_bps_device 143 if (fileid == BLKIO_THROTL_read_bps_device
143 && blkiop->ops.blkio_update_group_read_bps_fn) 144 && blkiop->ops.blkio_update_group_read_bps_fn)
144 blkiop->ops.blkio_update_group_read_bps_fn(blkg, bps); 145 blkiop->ops.blkio_update_group_read_bps_fn(blkg->key,
146 blkg, bps);
145 147
146 if (fileid == BLKIO_THROTL_write_bps_device 148 if (fileid == BLKIO_THROTL_write_bps_device
147 && blkiop->ops.blkio_update_group_write_bps_fn) 149 && blkiop->ops.blkio_update_group_write_bps_fn)
148 blkiop->ops.blkio_update_group_write_bps_fn(blkg, bps); 150 blkiop->ops.blkio_update_group_write_bps_fn(blkg->key,
151 blkg, bps);
149 } 152 }
150} 153}
151 154
@@ -162,11 +165,13 @@ static inline void blkio_update_group_iops(struct blkio_group *blkg,
162 165
163 if (fileid == BLKIO_THROTL_read_iops_device 166 if (fileid == BLKIO_THROTL_read_iops_device
164 && blkiop->ops.blkio_update_group_read_iops_fn) 167 && blkiop->ops.blkio_update_group_read_iops_fn)
165 blkiop->ops.blkio_update_group_read_iops_fn(blkg, iops); 168 blkiop->ops.blkio_update_group_read_iops_fn(blkg->key,
169 blkg, iops);
166 170
167 if (fileid == BLKIO_THROTL_write_iops_device 171 if (fileid == BLKIO_THROTL_write_iops_device
168 && blkiop->ops.blkio_update_group_write_iops_fn) 172 && blkiop->ops.blkio_update_group_write_iops_fn)
169 blkiop->ops.blkio_update_group_write_iops_fn(blkg,iops); 173 blkiop->ops.blkio_update_group_write_iops_fn(blkg->key,
174 blkg,iops);
170 } 175 }
171} 176}
172 177