diff options
Diffstat (limited to 'block/kyber-iosched.c')
-rw-r--r-- | block/kyber-iosched.c | 15 |
1 files changed, 2 insertions, 13 deletions
diff --git a/block/kyber-iosched.c b/block/kyber-iosched.c index 95d062c07c61..08eb5295c18d 100644 --- a/block/kyber-iosched.c +++ b/block/kyber-iosched.c | |||
@@ -40,8 +40,6 @@ enum { | |||
40 | }; | 40 | }; |
41 | 41 | ||
42 | enum { | 42 | enum { |
43 | KYBER_MIN_DEPTH = 256, | ||
44 | |||
45 | /* | 43 | /* |
46 | * In order to prevent starvation of synchronous requests by a flood of | 44 | * In order to prevent starvation of synchronous requests by a flood of |
47 | * asynchronous requests, we reserve 25% of requests for synchronous | 45 | * asynchronous requests, we reserve 25% of requests for synchronous |
@@ -305,7 +303,6 @@ static int kyber_bucket_fn(const struct request *rq) | |||
305 | static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) | 303 | static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) |
306 | { | 304 | { |
307 | struct kyber_queue_data *kqd; | 305 | struct kyber_queue_data *kqd; |
308 | unsigned int max_tokens; | ||
309 | unsigned int shift; | 306 | unsigned int shift; |
310 | int ret = -ENOMEM; | 307 | int ret = -ENOMEM; |
311 | int i; | 308 | int i; |
@@ -320,25 +317,17 @@ static struct kyber_queue_data *kyber_queue_data_alloc(struct request_queue *q) | |||
320 | if (!kqd->cb) | 317 | if (!kqd->cb) |
321 | goto err_kqd; | 318 | goto err_kqd; |
322 | 319 | ||
323 | /* | ||
324 | * The maximum number of tokens for any scheduling domain is at least | ||
325 | * the queue depth of a single hardware queue. If the hardware doesn't | ||
326 | * have many tags, still provide a reasonable number. | ||
327 | */ | ||
328 | max_tokens = max_t(unsigned int, q->tag_set->queue_depth, | ||
329 | KYBER_MIN_DEPTH); | ||
330 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { | 320 | for (i = 0; i < KYBER_NUM_DOMAINS; i++) { |
331 | WARN_ON(!kyber_depth[i]); | 321 | WARN_ON(!kyber_depth[i]); |
332 | WARN_ON(!kyber_batch_size[i]); | 322 | WARN_ON(!kyber_batch_size[i]); |
333 | ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], | 323 | ret = sbitmap_queue_init_node(&kqd->domain_tokens[i], |
334 | max_tokens, -1, false, GFP_KERNEL, | 324 | kyber_depth[i], -1, false, |
335 | q->node); | 325 | GFP_KERNEL, q->node); |
336 | if (ret) { | 326 | if (ret) { |
337 | while (--i >= 0) | 327 | while (--i >= 0) |
338 | sbitmap_queue_free(&kqd->domain_tokens[i]); | 328 | sbitmap_queue_free(&kqd->domain_tokens[i]); |
339 | goto err_cb; | 329 | goto err_cb; |
340 | } | 330 | } |
341 | sbitmap_queue_resize(&kqd->domain_tokens[i], kyber_depth[i]); | ||
342 | } | 331 | } |
343 | 332 | ||
344 | shift = kyber_sched_tags_shift(kqd); | 333 | shift = kyber_sched_tags_shift(kqd); |