diff options
author | Artem Bityutskiy <Artem.Bityutskiy@nokia.com> | 2010-07-25 07:29:11 -0400 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2010-08-07 12:53:16 -0400 |
commit | 6f904ff0e39ea88f81eb77e8dfb4e1238492f0a8 (patch) | |
tree | 8a22ff5ffe31e221554915ac9135da15d3adad8d /mm | |
parent | 4aeefdc69f7b6f3f287e6fd8d4b213953b9e92d8 (diff) |
writeback: harmonize writeback threads naming
The write-back code mixes words "thread" and "task" for the same things. This
is not a big deal, but still an inconsistency.
hch: a convention I tend to use and I've seen in various places
is to always use _task for the storage of the task_struct pointer,
and thread everywhere else. This especially helps with having
foo_thread for the actual thread and foo_task for a global
variable keeping the task_struct pointer
This patch renames:
* 'bdi_add_default_flusher_task()' -> 'bdi_add_default_flusher_thread()'
* 'bdi_forker_task()' -> 'bdi_forker_thread()'
because bdi threads are 'bdi_writeback_thread()', so these names are more
consistent.
This patch also amends commentaries and makes them refer the forker and bdi
threads as "thread", not "task".
Also, while on it, make 'bdi_add_default_flusher_thread()' declaration use
'static void' instead of 'void static' and make checkpatch.pl happy.
Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/backing-dev.c | 26 |
1 files changed, 13 insertions, 13 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index ac78a3336181..4e9ed2a8521f 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -50,7 +50,7 @@ static struct timer_list sync_supers_timer; | |||
50 | static int bdi_sync_supers(void *); | 50 | static int bdi_sync_supers(void *); |
51 | static void sync_supers_timer_fn(unsigned long); | 51 | static void sync_supers_timer_fn(unsigned long); |
52 | 52 | ||
53 | static void bdi_add_default_flusher_task(struct backing_dev_info *bdi); | 53 | static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi); |
54 | 54 | ||
55 | #ifdef CONFIG_DEBUG_FS | 55 | #ifdef CONFIG_DEBUG_FS |
56 | #include <linux/debugfs.h> | 56 | #include <linux/debugfs.h> |
@@ -279,10 +279,10 @@ static void bdi_flush_io(struct backing_dev_info *bdi) | |||
279 | } | 279 | } |
280 | 280 | ||
281 | /* | 281 | /* |
282 | * kupdated() used to do this. We cannot do it from the bdi_forker_task() | 282 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() |
283 | * or we risk deadlocking on ->s_umount. The longer term solution would be | 283 | * or we risk deadlocking on ->s_umount. The longer term solution would be |
284 | * to implement sync_supers_bdi() or similar and simply do it from the | 284 | * to implement sync_supers_bdi() or similar and simply do it from the |
285 | * bdi writeback tasks individually. | 285 | * bdi writeback thread individually. |
286 | */ | 286 | */ |
287 | static int bdi_sync_supers(void *unused) | 287 | static int bdi_sync_supers(void *unused) |
288 | { | 288 | { |
@@ -318,7 +318,7 @@ static void sync_supers_timer_fn(unsigned long unused) | |||
318 | bdi_arm_supers_timer(); | 318 | bdi_arm_supers_timer(); |
319 | } | 319 | } |
320 | 320 | ||
321 | static int bdi_forker_task(void *ptr) | 321 | static int bdi_forker_thread(void *ptr) |
322 | { | 322 | { |
323 | struct bdi_writeback *me = ptr; | 323 | struct bdi_writeback *me = ptr; |
324 | 324 | ||
@@ -354,7 +354,7 @@ static int bdi_forker_task(void *ptr) | |||
354 | !bdi_has_dirty_io(bdi)) | 354 | !bdi_has_dirty_io(bdi)) |
355 | continue; | 355 | continue; |
356 | 356 | ||
357 | bdi_add_default_flusher_task(bdi); | 357 | bdi_add_default_flusher_thread(bdi); |
358 | } | 358 | } |
359 | 359 | ||
360 | set_current_state(TASK_INTERRUPTIBLE); | 360 | set_current_state(TASK_INTERRUPTIBLE); |
@@ -376,7 +376,7 @@ static int bdi_forker_task(void *ptr) | |||
376 | 376 | ||
377 | /* | 377 | /* |
378 | * This is our real job - check for pending entries in | 378 | * This is our real job - check for pending entries in |
379 | * bdi_pending_list, and create the tasks that got added | 379 | * bdi_pending_list, and create the threads that got added |
380 | */ | 380 | */ |
381 | bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, | 381 | bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, |
382 | bdi_list); | 382 | bdi_list); |
@@ -387,7 +387,7 @@ static int bdi_forker_task(void *ptr) | |||
387 | wb->task = kthread_run(bdi_writeback_thread, wb, "flush-%s", | 387 | wb->task = kthread_run(bdi_writeback_thread, wb, "flush-%s", |
388 | dev_name(bdi->dev)); | 388 | dev_name(bdi->dev)); |
389 | /* | 389 | /* |
390 | * If task creation fails, then readd the bdi to | 390 | * If thread creation fails, then readd the bdi to |
391 | * the pending list and force writeout of the bdi | 391 | * the pending list and force writeout of the bdi |
392 | * from this forker thread. That will free some memory | 392 | * from this forker thread. That will free some memory |
393 | * and we can try again. | 393 | * and we can try again. |
@@ -430,10 +430,10 @@ static void bdi_add_to_pending(struct rcu_head *head) | |||
430 | } | 430 | } |
431 | 431 | ||
432 | /* | 432 | /* |
433 | * Add the default flusher task that gets created for any bdi | 433 | * Add the default flusher thread that gets created for any bdi |
434 | * that has dirty data pending writeout | 434 | * that has dirty data pending writeout |
435 | */ | 435 | */ |
436 | void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) | 436 | static void bdi_add_default_flusher_thread(struct backing_dev_info *bdi) |
437 | { | 437 | { |
438 | if (!bdi_cap_writeback_dirty(bdi)) | 438 | if (!bdi_cap_writeback_dirty(bdi)) |
439 | return; | 439 | return; |
@@ -445,10 +445,10 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi) | |||
445 | } | 445 | } |
446 | 446 | ||
447 | /* | 447 | /* |
448 | * Check with the helper whether to proceed adding a task. Will only | 448 | * Check with the helper whether to proceed adding a thread. Will only |
449 | * abort if we two or more simultanous calls to | 449 | * abort if we two or more simultanous calls to |
450 | * bdi_add_default_flusher_task() occured, further additions will block | 450 | * bdi_add_default_flusher_thread() occured, further additions will |
451 | * waiting for previous additions to finish. | 451 | * block waiting for previous additions to finish. |
452 | */ | 452 | */ |
453 | if (!test_and_set_bit(BDI_pending, &bdi->state)) { | 453 | if (!test_and_set_bit(BDI_pending, &bdi->state)) { |
454 | list_del_rcu(&bdi->bdi_list); | 454 | list_del_rcu(&bdi->bdi_list); |
@@ -506,7 +506,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
506 | if (bdi_cap_flush_forker(bdi)) { | 506 | if (bdi_cap_flush_forker(bdi)) { |
507 | struct bdi_writeback *wb = &bdi->wb; | 507 | struct bdi_writeback *wb = &bdi->wb; |
508 | 508 | ||
509 | wb->task = kthread_run(bdi_forker_task, wb, "bdi-%s", | 509 | wb->task = kthread_run(bdi_forker_thread, wb, "bdi-%s", |
510 | dev_name(dev)); | 510 | dev_name(dev)); |
511 | if (IS_ERR(wb->task)) { | 511 | if (IS_ERR(wb->task)) { |
512 | wb->task = NULL; | 512 | wb->task = NULL; |