diff options
author | Lucas De Marchi <lucas.demarchi@profusion.mobi> | 2011-03-30 21:57:33 -0400 |
---|---|---|
committer | Lucas De Marchi <lucas.demarchi@profusion.mobi> | 2011-03-31 10:26:23 -0400 |
commit | 25985edcedea6396277003854657b5f3cb31a628 (patch) | |
tree | f026e810210a2ee7290caeb737c23cb6472b7c38 /arch/powerpc/mm | |
parent | 6aba74f2791287ec407e0f92487a725a25908067 (diff) |
Fix common misspellings
Fixes generated by 'codespell' and manually reviewed.
Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/hash_low_64.S | 24 | ||||
-rw-r--r-- | arch/powerpc/mm/hash_utils_64.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 2 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 10 | ||||
-rw-r--r-- | arch/powerpc/mm/tlb_low_64e.S | 4 |
5 files changed, 21 insertions, 21 deletions
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 3079f6b44cf5..5b7dd4ea02b5 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -192,8 +192,8 @@ htab_insert_pte: | |||
192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ | 192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ |
193 | 193 | ||
194 | /* Call ppc_md.hpte_insert */ | 194 | /* Call ppc_md.hpte_insert */ |
195 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 195 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
196 | mr r4,r29 /* Retreive va */ | 196 | mr r4,r29 /* Retrieve va */ |
197 | li r7,0 /* !bolted, !secondary */ | 197 | li r7,0 /* !bolted, !secondary */ |
198 | li r8,MMU_PAGE_4K /* page size */ | 198 | li r8,MMU_PAGE_4K /* page size */ |
199 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 199 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1) | |||
215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
216 | 216 | ||
217 | /* Call ppc_md.hpte_insert */ | 217 | /* Call ppc_md.hpte_insert */ |
218 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 218 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
219 | mr r4,r29 /* Retreive va */ | 219 | mr r4,r29 /* Retrieve va */ |
220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
221 | li r8,MMU_PAGE_4K /* page size */ | 221 | li r8,MMU_PAGE_4K /* page size */ |
222 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 222 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -495,8 +495,8 @@ htab_special_pfn: | |||
495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
496 | 496 | ||
497 | /* Call ppc_md.hpte_insert */ | 497 | /* Call ppc_md.hpte_insert */ |
498 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 498 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
499 | mr r4,r29 /* Retreive va */ | 499 | mr r4,r29 /* Retrieve va */ |
500 | li r7,0 /* !bolted, !secondary */ | 500 | li r7,0 /* !bolted, !secondary */ |
501 | li r8,MMU_PAGE_4K /* page size */ | 501 | li r8,MMU_PAGE_4K /* page size */ |
502 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 502 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1) | |||
522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
523 | 523 | ||
524 | /* Call ppc_md.hpte_insert */ | 524 | /* Call ppc_md.hpte_insert */ |
525 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 525 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
526 | mr r4,r29 /* Retreive va */ | 526 | mr r4,r29 /* Retrieve va */ |
527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
528 | li r8,MMU_PAGE_4K /* page size */ | 528 | li r8,MMU_PAGE_4K /* page size */ |
529 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 529 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -813,8 +813,8 @@ ht64_insert_pte: | |||
813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
814 | 814 | ||
815 | /* Call ppc_md.hpte_insert */ | 815 | /* Call ppc_md.hpte_insert */ |
816 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 816 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
817 | mr r4,r29 /* Retreive va */ | 817 | mr r4,r29 /* Retrieve va */ |
818 | li r7,0 /* !bolted, !secondary */ | 818 | li r7,0 /* !bolted, !secondary */ |
819 | li r8,MMU_PAGE_64K | 819 | li r8,MMU_PAGE_64K |
820 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 820 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1) | |||
836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
837 | 837 | ||
838 | /* Call ppc_md.hpte_insert */ | 838 | /* Call ppc_md.hpte_insert */ |
839 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 839 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
840 | mr r4,r29 /* Retreive va */ | 840 | mr r4,r29 /* Retrieve va */ |
841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
842 | li r8,MMU_PAGE_64K | 842 | li r8,MMU_PAGE_64K |
843 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 843 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a5991facddce..58a022d0f463 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -753,7 +753,7 @@ void __cpuinit early_init_mmu_secondary(void) | |||
753 | mtspr(SPRN_SDR1, _SDR1); | 753 | mtspr(SPRN_SDR1, _SDR1); |
754 | 754 | ||
755 | /* Initialize STAB/SLB. We use a virtual address as it works | 755 | /* Initialize STAB/SLB. We use a virtual address as it works |
756 | * in real mode on pSeries and we want a virutal address on | 756 | * in real mode on pSeries and we want a virtual address on |
757 | * iSeries anyway | 757 | * iSeries anyway |
758 | */ | 758 | */ |
759 | if (cpu_has_feature(CPU_FTR_SLB)) | 759 | if (cpu_has_feature(CPU_FTR_SLB)) |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index a66499650909..57e545b84bf1 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -424,7 +424,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | |||
424 | clear_page(page); | 424 | clear_page(page); |
425 | 425 | ||
426 | /* | 426 | /* |
427 | * We shouldnt have to do this, but some versions of glibc | 427 | * We shouldn't have to do this, but some versions of glibc |
428 | * require it (ld.so assumes zero filled pages are icache clean) | 428 | * require it (ld.so assumes zero filled pages are icache clean) |
429 | * - Anton | 429 | * - Anton |
430 | */ | 430 | */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 0dc95c0aa3be..5ec1dad2a19d 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -440,11 +440,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |||
440 | } | 440 | } |
441 | 441 | ||
442 | /* | 442 | /* |
443 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | 443 | * Retrieve and validate the ibm,dynamic-memory property of the device tree. |
444 | * | 444 | * |
445 | * The layout of the ibm,dynamic-memory property is a number N of memblock | 445 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
446 | * list entries followed by N memblock list entries. Each memblock list entry | 446 | * list entries followed by N memblock list entries. Each memblock list entry |
447 | * contains information as layed out in the of_drconf_cell struct above. | 447 | * contains information as laid out in the of_drconf_cell struct above. |
448 | */ | 448 | */ |
449 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | 449 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) |
450 | { | 450 | { |
@@ -468,7 +468,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |||
468 | } | 468 | } |
469 | 469 | ||
470 | /* | 470 | /* |
471 | * Retreive and validate the ibm,lmb-size property for drconf memory | 471 | * Retrieve and validate the ibm,lmb-size property for drconf memory |
472 | * from the device tree. | 472 | * from the device tree. |
473 | */ | 473 | */ |
474 | static u64 of_get_lmb_size(struct device_node *memory) | 474 | static u64 of_get_lmb_size(struct device_node *memory) |
@@ -490,7 +490,7 @@ struct assoc_arrays { | |||
490 | }; | 490 | }; |
491 | 491 | ||
492 | /* | 492 | /* |
493 | * Retreive and validate the list of associativity arrays for drconf | 493 | * Retrieve and validate the list of associativity arrays for drconf |
494 | * memory from the ibm,associativity-lookup-arrays property of the | 494 | * memory from the ibm,associativity-lookup-arrays property of the |
495 | * device tree.. | 495 | * device tree.. |
496 | * | 496 | * |
@@ -604,7 +604,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, | |||
604 | * Returns the size the region should have to enforce the memory limit. | 604 | * Returns the size the region should have to enforce the memory limit. |
605 | * This will either be the original value of size, a truncated value, | 605 | * This will either be the original value of size, a truncated value, |
606 | * or zero. If the returned value of size is 0 the region should be | 606 | * or zero. If the returned value of size is 0 the region should be |
607 | * discarded as it lies wholy above the memory limit. | 607 | * discarded as it lies wholly above the memory limit. |
608 | */ | 608 | */ |
609 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, | 609 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
610 | unsigned long size) | 610 | unsigned long size) |
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 8526bd9d2aa3..33cf704e4e1b 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
@@ -192,7 +192,7 @@ normal_tlb_miss: | |||
192 | or r10,r15,r14 | 192 | or r10,r15,r14 |
193 | 193 | ||
194 | BEGIN_MMU_FTR_SECTION | 194 | BEGIN_MMU_FTR_SECTION |
195 | /* Set the TLB reservation and seach for existing entry. Then load | 195 | /* Set the TLB reservation and search for existing entry. Then load |
196 | * the entry. | 196 | * the entry. |
197 | */ | 197 | */ |
198 | PPC_TLBSRX_DOT(0,r16) | 198 | PPC_TLBSRX_DOT(0,r16) |
@@ -425,7 +425,7 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) | |||
425 | 425 | ||
426 | virt_page_table_tlb_miss_fault: | 426 | virt_page_table_tlb_miss_fault: |
427 | /* If we fault here, things are a little bit tricky. We need to call | 427 | /* If we fault here, things are a little bit tricky. We need to call |
428 | * either data or instruction store fault, and we need to retreive | 428 | * either data or instruction store fault, and we need to retrieve |
429 | * the original fault address and ESR (for data). | 429 | * the original fault address and ESR (for data). |
430 | * | 430 | * |
431 | * The thing is, we know that in normal circumstances, this is | 431 | * The thing is, we know that in normal circumstances, this is |