summaryrefslogtreecommitdiffstats
path: root/block/kyber-iosched.c
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2018-09-27 18:55:53 -0400
committerJens Axboe <axboe@kernel.dk>2018-09-27 19:34:56 -0400
commitfa2a1f609e6491383ab63ff6329e0aaa2db2b9f7 (patch)
treea3bf07ac5b8ec7c46705c0efdfddfe3d8a8b70bc /block/kyber-iosched.c
parentf8232f29ca268b0ba9e98638c9ed71e337e7f0a4 (diff)
kyber: don't make domain token sbitmap larger than necessary
The domain token sbitmaps are currently initialized to the device queue depth or 256, whichever is larger, and immediately resized to the maximum depth for that domain (256, 128, or 64 for read, write, and other, respectively). The sbitmap is never resized larger than that, so it's unnecessary to allocate a bitmap larger than the maximum depth. Let's just allocate it to the maximum depth to begin with. This will use marginally less memory, and more importantly, give us a more appropriate number of bits per sbitmap word. Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/kyber-iosched.c')
-rw-r--r--block/kyber-iosched.c15
1 files changed, 2 insertions, 13 deletions
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c
index 95d062c07c61..08eb5295c18d 100644
--- a/block/kyber-iosched.c
+++ b/block/kyber-iosched.c
@@ -40,8 +40,6 @@ enum {
40}; 40};
41 41
42enum { 42enum {
43 KYBER_MIN_DEPTH = 256,
44
45 /* 43 /*
46 * In order to prevent starvation of synchronous requests by a flood of 44 * In order to prevent starvation of synchronous requests by a flood of
47 * asynchronous requests, we reserve 25% of requests for synchronous 45 * asynchronous requests, we reserve 25% of requests for synchronous
@@ -305,7 +303,6 @@ static int kyber_bucket_fn(const struct request *rq)
305static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) 303static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
306{ 304{
307 struct kyber_queue_data *kqd; 305 struct kyber_queue_data *kqd;
308 unsigned int max_tokens;
309 unsigned int shift; 306 unsigned int shift;
310 int ret = -ENOMEM; 307 int ret = -ENOMEM;
311 int i; 308 int i;
@@ -320,25 +317,17 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q)
320 if (!kqd->cb) 317 if (!kqd->cb)
321 goto err_kqd; 318 goto err_kqd;
322 319
323 /*
324 * The maximum number of tokens for any scheduling domain is at least
325 * the queue depth of a single hardware queue. If the hardware doesn't
326 * have many tags, still provide a reasonable number.
327 */
328 max_tokens = max_t(unsigned int, q->tag_set->queue_depth,
329 KYBER_MIN_DEPTH);
330 for (i = 0; i < KYBER_NUM_DOMAINS; i++) { 320 for (i = 0; i < KYBER_NUM_DOMAINS; i++) {
331 WARN_ON(!kyber_depth[i]); 321 WARN_ON(!kyber_depth[i]);
332 WARN_ON(!kyber_batch_size[i]); 322 WARN_ON(!kyber_batch_size[i]);
333 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], 323 ret = sbitmap_queue_init_node(&kqd->domain_tokens[i],
334 max_tokens, -1, false, GFP_KERNEL, 324 kyber_depth[i], -1, false,
335 q->node); 325 GFP_KERNEL, q->node);
336 if (ret) { 326 if (ret) {
337 while (--i >= 0) 327 while (--i >= 0)
338 sbitmap_queue_free(&kqd->domain_tokens[i]); 328 sbitmap_queue_free(&kqd->domain_tokens[i]);
339 goto err_cb; 329 goto err_cb;
340 } 330 }
341 sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]);
342 } 331 }
343 332
344 shift = kyber_sched_tags_shift(kqd); 333 shift = kyber_sched_tags_shift(kqd);