aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAdam Buchbinder <adam.buchbinder@gmail.com>2009-12-11 16:35:40 -0500
committerJiri Kosina <jkosina@suse.cz>2010-02-04 05:55:45 -0500
commit2a61aa401638529cd4231f6106980d307fba98fa (patch)
treea3d7565570c5996d0b3ae5fdf0126e065e750431
parentc41b20e721ea4f6f20f66a66e7f0c3c97a2ca9c2 (diff)
Fix misspellings of "invocation" in comments.
Some comments misspell "invocation"; this fixes them. No code changes. Signed-off-by: Adam Buchbinder <adam.buchbinder@gmail.com> Signed-off-by: Jiri Kosina <jkosina@suse.cz>
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/mpage.c2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--kernel/sched_cpupri.c2
4 files changed, 4 insertions, 4 deletions
diff --git a/fs/buffer.c b/fs/buffer.c
index 6fa530256bf..1d920bab5e7 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2893,7 +2893,7 @@ int block_write_full_page_endio(struct page *page, get_block_t *get_block,
2893 2893
2894 /* 2894 /*
2895 * The page straddles i_size. It must be zeroed out on each and every 2895 * The page straddles i_size. It must be zeroed out on each and every
2896 * writepage invokation because it may be mmapped. "A file is mapped 2896 * writepage invocation because it may be mmapped. "A file is mapped
2897 * in multiples of the page size. For a file that is not a multiple of 2897 * in multiples of the page size. For a file that is not a multiple of
2898 * the page size, the remaining memory is zeroed when mapped, and 2898 * the page size, the remaining memory is zeroed when mapped, and
2899 * writes to that region are not written out to the file." 2899 * writes to that region are not written out to the file."
diff --git a/fs/mpage.c b/fs/mpage.c
index 42381bd6543..598d54e200e 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -561,7 +561,7 @@ page_is_mapped:
561 if (page->index >= end_index) { 561 if (page->index >= end_index) {
562 /* 562 /*
563 * The page straddles i_size. It must be zeroed out on each 563 * The page straddles i_size. It must be zeroed out on each
564 * and every writepage invokation because it may be mmapped. 564 * and every writepage invocation because it may be mmapped.
565 * "A file is mapped in multiples of the page size. For a file 565 * "A file is mapped in multiples of the page size. For a file
566 * that is not a multiple of the page size, the remaining memory 566 * that is not a multiple of the page size, the remaining memory
567 * is zeroed when mapped, and writes to that region are not 567 * is zeroed when mapped, and writes to that region are not
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 30fe668c254..e60a340fe89 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -349,7 +349,7 @@ struct zone {
349 * prev_priority holds the scanning priority for this zone. It is 349 * prev_priority holds the scanning priority for this zone. It is
350 * defined as the scanning priority at which we achieved our reclaim 350 * defined as the scanning priority at which we achieved our reclaim
351 * target at the previous try_to_free_pages() or balance_pgdat() 351 * target at the previous try_to_free_pages() or balance_pgdat()
352 * invokation. 352 * invocation.
353 * 353 *
354 * We use prev_priority as a measure of how much stress page reclaim is 354 * We use prev_priority as a measure of how much stress page reclaim is
355 * under - it drives the swappiness decision: whether to unmap mapped 355 * under - it drives the swappiness decision: whether to unmap mapped
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index 597b33099df..3db4b1a0e92 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -58,7 +58,7 @@ static int convert_prio(int prio)
58 * @lowest_mask: A mask to fill in with selected CPUs (or NULL) 58 * @lowest_mask: A mask to fill in with selected CPUs (or NULL)
59 * 59 *
60 * Note: This function returns the recommended CPUs as calculated during the 60 * Note: This function returns the recommended CPUs as calculated during the
61 * current invokation. By the time the call returns, the CPUs may have in 61 * current invocation. By the time the call returns, the CPUs may have in
62 * fact changed priorities any number of times. While not ideal, it is not 62 * fact changed priorities any number of times. While not ideal, it is not
63 * an issue of correctness since the normal rebalancer logic will correct 63 * an issue of correctness since the normal rebalancer logic will correct
64 * any discrepancies created by racing against the uncertainty of the current 64 * any discrepancies created by racing against the uncertainty of the current