aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSimon Arlott <simon@fire.lp0.eu>2007-10-19 19:25:36 -0400
committerAdrian Bunk <bunk@kernel.org>2007-10-19 19:25:36 -0400
commit676b1855de0a18100b3c340084eb8ef72bde4fb1 (patch)
treecbcbe6dec24a23f97f93ec7753ab74d34a92473a
parent5b20311eeae7c5e7d9484cd0878ac756a20a78e4 (diff)
spelling fixes: arch/x86_64/
Spelling fixes in arch/x86_64/. Signed-off-by: Simon Arlott <simon@fire.lp0.eu> Signed-off-by: Adrian Bunk <bunk@kernel.org>
-rw-r--r--arch/x86/boot/compressed/misc_64.c4
-rw-r--r--arch/x86/kernel/io_apic_64.c4
-rw-r--r--arch/x86/kernel/mce_64.c4
-rw-r--r--arch/x86/kernel/signal_64.c2
-rw-r--r--arch/x86/kernel/smpboot_64.c2
-rw-r--r--arch/x86/kernel/traps_64.c2
-rw-r--r--arch/x86/kernel/vsyscall_64.c2
-rw-r--r--arch/x86/mm/fault_64.c4
-rw-r--r--arch/x86/mm/srat_64.c2
9 files changed, 13 insertions, 13 deletions
diff --git a/arch/x86/boot/compressed/misc_64.c b/arch/x86/boot/compressed/misc_64.c
index f932b0e89096..6ea015aa65e4 100644
--- a/arch/x86/boot/compressed/misc_64.c
+++ b/arch/x86/boot/compressed/misc_64.c
@@ -25,7 +25,7 @@
25 25
26/* 26/*
27 * Getting to provable safe in place decompression is hard. 27 * Getting to provable safe in place decompression is hard.
28 * Worst case behaviours need to be analized. 28 * Worst case behaviours need to be analyzed.
29 * Background information: 29 * Background information:
30 * 30 *
31 * The file layout is: 31 * The file layout is:
@@ -94,7 +94,7 @@
94 * Adding 32768 instead of 32767 just makes for round numbers. 94 * Adding 32768 instead of 32767 just makes for round numbers.
95 * Adding the decompressor_size is necessary as it musht live after all 95 * Adding the decompressor_size is necessary as it musht live after all
96 * of the data as well. Last I measured the decompressor is about 14K. 96 * of the data as well. Last I measured the decompressor is about 14K.
97 * 10K of actuall data and 4K of bss. 97 * 10K of actual data and 4K of bss.
98 * 98 *
99 */ 99 */
100 100
diff --git a/arch/x86/kernel/io_apic_64.c b/arch/x86/kernel/io_apic_64.c
index 1c2c7bf6a9d3..b3c2d268d708 100644
--- a/arch/x86/kernel/io_apic_64.c
+++ b/arch/x86/kernel/io_apic_64.c
@@ -1770,7 +1770,7 @@ __setup("no_timer_check", notimercheck);
1770 1770
1771/* 1771/*
1772 * 1772 *
1773 * IRQ's that are handled by the PIC in the MPS IOAPIC case. 1773 * IRQs that are handled by the PIC in the MPS IOAPIC case.
1774 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ. 1774 * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
1775 * Linux doesn't really care, as it's not actually used 1775 * Linux doesn't really care, as it's not actually used
1776 * for any interrupt handling anyway. 1776 * for any interrupt handling anyway.
@@ -1921,7 +1921,7 @@ void destroy_irq(unsigned int irq)
1921} 1921}
1922 1922
1923/* 1923/*
1924 * MSI mesage composition 1924 * MSI message composition
1925 */ 1925 */
1926#ifdef CONFIG_PCI_MSI 1926#ifdef CONFIG_PCI_MSI
1927static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg) 1927static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2cb..82c85bdff3a1 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -320,7 +320,7 @@ void do_machine_check(struct pt_regs * regs, long error_code)
320#ifdef CONFIG_X86_MCE_INTEL 320#ifdef CONFIG_X86_MCE_INTEL
321/*** 321/***
322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog 322 * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog
323 * @cpu: The CPU on which the event occured. 323 * @cpu: The CPU on which the event occurred.
324 * @status: Event status information 324 * @status: Event status information
325 * 325 *
326 * This function should be called by the thermal interrupt after the 326 * This function should be called by the thermal interrupt after the
@@ -688,7 +688,7 @@ static int __init mcheck_disable(char *str)
688 return 1; 688 return 1;
689} 689}
690 690
691/* mce=off disables machine check. Note you can reenable it later 691/* mce=off disables machine check. Note you can re-enable it later
692 using sysfs. 692 using sysfs.
693 mce=TOLERANCELEVEL (number, see above) 693 mce=TOLERANCELEVEL (number, see above)
694 mce=bootlog Log MCEs from before booting. Disabled by default on AMD. 694 mce=bootlog Log MCEs from before booting. Disabled by default on AMD.
diff --git a/arch/x86/kernel/signal_64.c b/arch/x86/kernel/signal_64.c
index 683802bec419..ab086b0357fc 100644
--- a/arch/x86/kernel/signal_64.c
+++ b/arch/x86/kernel/signal_64.c
@@ -410,7 +410,7 @@ static void do_signal(struct pt_regs *regs)
410 410
411 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 411 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
412 if (signr > 0) { 412 if (signr > 0) {
413 /* Reenable any watchpoints before delivering the 413 /* Re-enable any watchpoints before delivering the
414 * signal to user space. The processor register will 414 * signal to user space. The processor register will
415 * have been cleared if the watchpoint triggered 415 * have been cleared if the watchpoint triggered
416 * inside the kernel. 416 * inside the kernel.
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e351ac4ab5b1..d4c33aba3ff2 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -350,7 +350,7 @@ void __cpuinit start_secondary(void)
350 /* 350 /*
351 * We need to hold call_lock, so there is no inconsistency 351 * We need to hold call_lock, so there is no inconsistency
352 * between the time smp_call_function() determines number of 352 * between the time smp_call_function() determines number of
353 * IPI receipients, and the time when the determination is made 353 * IPI recipients, and the time when the determination is made
354 * for which cpus receive the IPI in genapic_flat.c. Holding this 354 * for which cpus receive the IPI in genapic_flat.c. Holding this
355 * lock helps us to not include this cpu in a currently in progress 355 * lock helps us to not include this cpu in a currently in progress
356 * smp_call_function(). 356 * smp_call_function().
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b4a9b3db1994..b4c887341a26 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -201,7 +201,7 @@ static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
201#define MSG(txt) ops->warning(data, txt) 201#define MSG(txt) ops->warning(data, txt)
202 202
203/* 203/*
204 * x86-64 can have upto three kernel stacks: 204 * x86-64 can have up to three kernel stacks:
205 * process stack 205 * process stack
206 * interrupt stack 206 * interrupt stack
207 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack 207 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 585541ca1a7e..e14cb3f53862 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -53,7 +53,7 @@
53/* 53/*
54 * vsyscall_gtod_data contains data that is : 54 * vsyscall_gtod_data contains data that is :
55 * - readonly from vsyscalls 55 * - readonly from vsyscalls
56 * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) 56 * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
57 * Try to keep this structure as small as possible to avoid cache line ping pongs 57 * Try to keep this structure as small as possible to avoid cache line ping pongs
58 */ 58 */
59int __vgetcpu_mode __section_vgetcpu_mode; 59int __vgetcpu_mode __section_vgetcpu_mode;
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5149ac136a5d..7c560843ded1 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -378,7 +378,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
378 again: 378 again:
379 /* When running in the kernel we expect faults to occur only to 379 /* When running in the kernel we expect faults to occur only to
380 * addresses in user space. All other faults represent errors in the 380 * addresses in user space. All other faults represent errors in the
381 * kernel and should generate an OOPS. Unfortunatly, in the case of an 381 * kernel and should generate an OOPS. Unfortunately, in the case of an
382 * erroneous fault occurring in a code path which already holds mmap_sem 382 * erroneous fault occurring in a code path which already holds mmap_sem
383 * we will deadlock attempting to validate the fault against the 383 * we will deadlock attempting to validate the fault against the
384 * address space. Luckily the kernel only validly references user 384 * address space. Luckily the kernel only validly references user
@@ -386,7 +386,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
386 * exceptions table. 386 * exceptions table.
387 * 387 *
388 * As the vast majority of faults will be valid we will only perform 388 * As the vast majority of faults will be valid we will only perform
389 * the source reference check when there is a possibilty of a deadlock. 389 * the source reference check when there is a possibility of a deadlock.
390 * Attempt to lock the address space, if we cannot we then validate the 390 * Attempt to lock the address space, if we cannot we then validate the
391 * source. If this is invalid we can skip the address space check, 391 * source. If this is invalid we can skip the address space check,
392 * thus avoiding the deadlock. 392 * thus avoiding the deadlock.
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 56089ccc3949..ea85172fc0cc 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -218,7 +218,7 @@ static inline int save_add_info(void) {return 0;}
218/* 218/*
219 * Update nodes_add and decide if to include add are in the zone. 219 * Update nodes_add and decide if to include add are in the zone.
220 * Both SPARSE and RESERVE need nodes_add infomation. 220 * Both SPARSE and RESERVE need nodes_add infomation.
221 * This code supports one contigious hot add area per node. 221 * This code supports one contiguous hot add area per node.
222 */ 222 */
223static int reserve_hotadd(int node, unsigned long start, unsigned long end) 223static int reserve_hotadd(int node, unsigned long start, unsigned long end)
224{ 224{