diff options
author | Jens Axboe <jens.axboe@oracle.com> | 2008-01-24 02:52:45 -0500 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-01-28 04:50:29 -0500 |
commit | fd0928df98b9578be8a786ac0cb78a47a5e17a20 (patch) | |
tree | 70a34cf207bea1bec28e59cf0dba7d20e7f8b0f1 /block | |
parent | 91525300baf162e83e923b09ca286f9205e21522 (diff) |
ioprio: move io priority from task_struct to io_context
This is where it belongs and then it doesn't take up space for a
process that doesn't do IO.
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/cfq-iosched.c | 34 | ||||
-rw-r--r-- | block/ll_rw_blk.c | 30 |
2 files changed, 37 insertions, 27 deletions
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 13553e015d72..533af75329e6 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -199,7 +199,7 @@ CFQ_CFQQ_FNS(sync); | |||
199 | 199 | ||
200 | static void cfq_dispatch_insert(struct request_queue *, struct request *); | 200 | static void cfq_dispatch_insert(struct request_queue *, struct request *); |
201 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, | 201 | static struct cfq_queue *cfq_get_queue(struct cfq_data *, int, |
202 | struct task_struct *, gfp_t); | 202 | struct io_context *, gfp_t); |
203 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, | 203 | static struct cfq_io_context *cfq_cic_rb_lookup(struct cfq_data *, |
204 | struct io_context *); | 204 | struct io_context *); |
205 | 205 | ||
@@ -1273,7 +1273,7 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask) | |||
1273 | return cic; | 1273 | return cic; |
1274 | } | 1274 | } |
1275 | 1275 | ||
1276 | static void cfq_init_prio_data(struct cfq_queue *cfqq) | 1276 | static void cfq_init_prio_data(struct cfq_queue *cfqq, struct io_context *ioc) |
1277 | { | 1277 | { |
1278 | struct task_struct *tsk = current; | 1278 | struct task_struct *tsk = current; |
1279 | int ioprio_class; | 1279 | int ioprio_class; |
@@ -1281,7 +1281,7 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1281 | if (!cfq_cfqq_prio_changed(cfqq)) | 1281 | if (!cfq_cfqq_prio_changed(cfqq)) |
1282 | return; | 1282 | return; |
1283 | 1283 | ||
1284 | ioprio_class = IOPRIO_PRIO_CLASS(tsk->ioprio); | 1284 | ioprio_class = IOPRIO_PRIO_CLASS(ioc->ioprio); |
1285 | switch (ioprio_class) { | 1285 | switch (ioprio_class) { |
1286 | default: | 1286 | default: |
1287 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); | 1287 | printk(KERN_ERR "cfq: bad prio %x\n", ioprio_class); |
@@ -1293,11 +1293,11 @@ static void cfq_init_prio_data(struct cfq_queue *cfqq) | |||
1293 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 1293 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
1294 | break; | 1294 | break; |
1295 | case IOPRIO_CLASS_RT: | 1295 | case IOPRIO_CLASS_RT: |
1296 | cfqq->ioprio = task_ioprio(tsk); | 1296 | cfqq->ioprio = task_ioprio(ioc); |
1297 | cfqq->ioprio_class = IOPRIO_CLASS_RT; | 1297 | cfqq->ioprio_class = IOPRIO_CLASS_RT; |
1298 | break; | 1298 | break; |
1299 | case IOPRIO_CLASS_BE: | 1299 | case IOPRIO_CLASS_BE: |
1300 | cfqq->ioprio = task_ioprio(tsk); | 1300 | cfqq->ioprio = task_ioprio(ioc); |
1301 | cfqq->ioprio_class = IOPRIO_CLASS_BE; | 1301 | cfqq->ioprio_class = IOPRIO_CLASS_BE; |
1302 | break; | 1302 | break; |
1303 | case IOPRIO_CLASS_IDLE: | 1303 | case IOPRIO_CLASS_IDLE: |
@@ -1330,8 +1330,7 @@ static inline void changed_ioprio(struct cfq_io_context *cic) | |||
1330 | cfqq = cic->cfqq[ASYNC]; | 1330 | cfqq = cic->cfqq[ASYNC]; |
1331 | if (cfqq) { | 1331 | if (cfqq) { |
1332 | struct cfq_queue *new_cfqq; | 1332 | struct cfq_queue *new_cfqq; |
1333 | new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc->task, | 1333 | new_cfqq = cfq_get_queue(cfqd, ASYNC, cic->ioc, GFP_ATOMIC); |
1334 | GFP_ATOMIC); | ||
1335 | if (new_cfqq) { | 1334 | if (new_cfqq) { |
1336 | cic->cfqq[ASYNC] = new_cfqq; | 1335 | cic->cfqq[ASYNC] = new_cfqq; |
1337 | cfq_put_queue(cfqq); | 1336 | cfq_put_queue(cfqq); |
@@ -1363,13 +1362,13 @@ static void cfq_ioc_set_ioprio(struct io_context *ioc) | |||
1363 | 1362 | ||
1364 | static struct cfq_queue * | 1363 | static struct cfq_queue * |
1365 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, | 1364 | cfq_find_alloc_queue(struct cfq_data *cfqd, int is_sync, |
1366 | struct task_struct *tsk, gfp_t gfp_mask) | 1365 | struct io_context *ioc, gfp_t gfp_mask) |
1367 | { | 1366 | { |
1368 | struct cfq_queue *cfqq, *new_cfqq = NULL; | 1367 | struct cfq_queue *cfqq, *new_cfqq = NULL; |
1369 | struct cfq_io_context *cic; | 1368 | struct cfq_io_context *cic; |
1370 | 1369 | ||
1371 | retry: | 1370 | retry: |
1372 | cic = cfq_cic_rb_lookup(cfqd, tsk->io_context); | 1371 | cic = cfq_cic_rb_lookup(cfqd, ioc); |
1373 | /* cic always exists here */ | 1372 | /* cic always exists here */ |
1374 | cfqq = cic_to_cfqq(cic, is_sync); | 1373 | cfqq = cic_to_cfqq(cic, is_sync); |
1375 | 1374 | ||
@@ -1412,7 +1411,7 @@ retry: | |||
1412 | cfq_mark_cfqq_prio_changed(cfqq); | 1411 | cfq_mark_cfqq_prio_changed(cfqq); |
1413 | cfq_mark_cfqq_queue_new(cfqq); | 1412 | cfq_mark_cfqq_queue_new(cfqq); |
1414 | 1413 | ||
1415 | cfq_init_prio_data(cfqq); | 1414 | cfq_init_prio_data(cfqq, ioc); |
1416 | } | 1415 | } |
1417 | 1416 | ||
1418 | if (new_cfqq) | 1417 | if (new_cfqq) |
@@ -1439,11 +1438,11 @@ cfq_async_queue_prio(struct cfq_data *cfqd, int ioprio_class, int ioprio) | |||
1439 | } | 1438 | } |
1440 | 1439 | ||
1441 | static struct cfq_queue * | 1440 | static struct cfq_queue * |
1442 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | 1441 | cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct io_context *ioc, |
1443 | gfp_t gfp_mask) | 1442 | gfp_t gfp_mask) |
1444 | { | 1443 | { |
1445 | const int ioprio = task_ioprio(tsk); | 1444 | const int ioprio = task_ioprio(ioc); |
1446 | const int ioprio_class = task_ioprio_class(tsk); | 1445 | const int ioprio_class = task_ioprio_class(ioc); |
1447 | struct cfq_queue **async_cfqq = NULL; | 1446 | struct cfq_queue **async_cfqq = NULL; |
1448 | struct cfq_queue *cfqq = NULL; | 1447 | struct cfq_queue *cfqq = NULL; |
1449 | 1448 | ||
@@ -1453,7 +1452,7 @@ cfq_get_queue(struct cfq_data *cfqd, int is_sync, struct task_struct *tsk, | |||
1453 | } | 1452 | } |
1454 | 1453 | ||
1455 | if (!cfqq) { | 1454 | if (!cfqq) { |
1456 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, tsk, gfp_mask); | 1455 | cfqq = cfq_find_alloc_queue(cfqd, is_sync, ioc, gfp_mask); |
1457 | if (!cfqq) | 1456 | if (!cfqq) |
1458 | return NULL; | 1457 | return NULL; |
1459 | } | 1458 | } |
@@ -1793,7 +1792,7 @@ static void cfq_insert_request(struct request_queue *q, struct request *rq) | |||
1793 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1792 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1794 | struct cfq_queue *cfqq = RQ_CFQQ(rq); | 1793 | struct cfq_queue *cfqq = RQ_CFQQ(rq); |
1795 | 1794 | ||
1796 | cfq_init_prio_data(cfqq); | 1795 | cfq_init_prio_data(cfqq, RQ_CIC(rq)->ioc); |
1797 | 1796 | ||
1798 | cfq_add_rq_rb(rq); | 1797 | cfq_add_rq_rb(rq); |
1799 | 1798 | ||
@@ -1900,7 +1899,7 @@ static int cfq_may_queue(struct request_queue *q, int rw) | |||
1900 | 1899 | ||
1901 | cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); | 1900 | cfqq = cic_to_cfqq(cic, rw & REQ_RW_SYNC); |
1902 | if (cfqq) { | 1901 | if (cfqq) { |
1903 | cfq_init_prio_data(cfqq); | 1902 | cfq_init_prio_data(cfqq, cic->ioc); |
1904 | cfq_prio_boost(cfqq); | 1903 | cfq_prio_boost(cfqq); |
1905 | 1904 | ||
1906 | return __cfq_may_queue(cfqq); | 1905 | return __cfq_may_queue(cfqq); |
@@ -1938,7 +1937,6 @@ static int | |||
1938 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | 1937 | cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) |
1939 | { | 1938 | { |
1940 | struct cfq_data *cfqd = q->elevator->elevator_data; | 1939 | struct cfq_data *cfqd = q->elevator->elevator_data; |
1941 | struct task_struct *tsk = current; | ||
1942 | struct cfq_io_context *cic; | 1940 | struct cfq_io_context *cic; |
1943 | const int rw = rq_data_dir(rq); | 1941 | const int rw = rq_data_dir(rq); |
1944 | const int is_sync = rq_is_sync(rq); | 1942 | const int is_sync = rq_is_sync(rq); |
@@ -1956,7 +1954,7 @@ cfq_set_request(struct request_queue *q, struct request *rq, gfp_t gfp_mask) | |||
1956 | 1954 | ||
1957 | cfqq = cic_to_cfqq(cic, is_sync); | 1955 | cfqq = cic_to_cfqq(cic, is_sync); |
1958 | if (!cfqq) { | 1956 | if (!cfqq) { |
1959 | cfqq = cfq_get_queue(cfqd, is_sync, tsk, gfp_mask); | 1957 | cfqq = cfq_get_queue(cfqd, is_sync, cic->ioc, gfp_mask); |
1960 | 1958 | ||
1961 | if (!cfqq) | 1959 | if (!cfqq) |
1962 | goto queue_fail; | 1960 | goto queue_fail; |
diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index 3d0422f48453..b9bb02e845cd 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c | |||
@@ -3904,6 +3904,26 @@ void exit_io_context(void) | |||
3904 | put_io_context(ioc); | 3904 | put_io_context(ioc); |
3905 | } | 3905 | } |
3906 | 3906 | ||
3907 | struct io_context *alloc_io_context(gfp_t gfp_flags, int node) | ||
3908 | { | ||
3909 | struct io_context *ret; | ||
3910 | |||
3911 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | ||
3912 | if (ret) { | ||
3913 | atomic_set(&ret->refcount, 1); | ||
3914 | ret->task = current; | ||
3915 | ret->ioprio_changed = 0; | ||
3916 | ret->ioprio = 0; | ||
3917 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
3918 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
3919 | ret->aic = NULL; | ||
3920 | ret->cic_root.rb_node = NULL; | ||
3921 | ret->ioc_data = NULL; | ||
3922 | } | ||
3923 | |||
3924 | return ret; | ||
3925 | } | ||
3926 | |||
3907 | /* | 3927 | /* |
3908 | * If the current task has no IO context then create one and initialise it. | 3928 | * If the current task has no IO context then create one and initialise it. |
3909 | * Otherwise, return its existing IO context. | 3929 | * Otherwise, return its existing IO context. |
@@ -3921,16 +3941,8 @@ static struct io_context *current_io_context(gfp_t gfp_flags, int node) | |||
3921 | if (likely(ret)) | 3941 | if (likely(ret)) |
3922 | return ret; | 3942 | return ret; |
3923 | 3943 | ||
3924 | ret = kmem_cache_alloc_node(iocontext_cachep, gfp_flags, node); | 3944 | ret = alloc_io_context(gfp_flags, node); |
3925 | if (ret) { | 3945 | if (ret) { |
3926 | atomic_set(&ret->refcount, 1); | ||
3927 | ret->task = current; | ||
3928 | ret->ioprio_changed = 0; | ||
3929 | ret->last_waited = jiffies; /* doesn't matter... */ | ||
3930 | ret->nr_batch_requests = 0; /* because this is 0 */ | ||
3931 | ret->aic = NULL; | ||
3932 | ret->cic_root.rb_node = NULL; | ||
3933 | ret->ioc_data = NULL; | ||
3934 | /* make sure set_task_ioprio() sees the settings above */ | 3946 | /* make sure set_task_ioprio() sees the settings above */ |
3935 | smp_wmb(); | 3947 | smp_wmb(); |
3936 | tsk->io_context = ret; | 3948 | tsk->io_context = ret; |