aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2018-12-03 04:47:34 -0500
committerIngo Molnar <mingo@kernel.org>2018-12-03 04:49:13 -0500
commita97673a1c43d005a3ae215f4ca8b4bbb5691aea1 (patch)
treed32f6d27212f5cfdb072c3300f42b744a0bd8e25 /arch
parentdf60673198ae678f68af54873b8904ba93fe13a0 (diff)
x86: Fix various typos in comments
Go over arch/x86/ and fix common typos in comments, and a typo in an actual function argument name. No change in functionality intended. Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/crypto/cast5_avx_glue.c2
-rw-r--r--arch/x86/crypto/cast6_avx_glue.c2
-rw-r--r--arch/x86/entry/common.c2
-rw-r--r--arch/x86/entry/vdso/vma.c2
-rw-r--r--arch/x86/events/intel/bts.c2
-rw-r--r--arch/x86/events/intel/core.c2
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/events/intel/p4.c2
-rw-r--r--arch/x86/include/asm/alternative.h2
-rw-r--r--arch/x86/include/asm/cmpxchg.h2
-rw-r--r--arch/x86/include/asm/efi.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/crash_dump_64.c2
-rw-r--r--arch/x86/kernel/process_64.c2
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/platform/ce4100/ce4100.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c2
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c2
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c2
-rw-r--r--arch/x86/platform/uv/uv_nmi.c2
-rw-r--r--arch/x86/xen/setup.c2
26 files changed, 28 insertions, 28 deletions
diff --git a/arch/x86/crypto/cast5_avx_glue.c b/arch/x86/crypto/cast5_avx_glue.c
index 41034745d6a2..d1ce49119da8 100644
--- a/arch/x86/crypto/cast5_avx_glue.c
+++ b/arch/x86/crypto/cast5_avx_glue.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Glue Code for the AVX assembler implemention of the Cast5 Cipher 2 * Glue Code for the AVX assembler implementation of the Cast5 Cipher
3 * 3 *
4 * Copyright (C) 2012 Johannes Goetzfried 4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
diff --git a/arch/x86/crypto/cast6_avx_glue.c b/arch/x86/crypto/cast6_avx_glue.c
index 9fb66b5e94b2..18965c39305e 100644
--- a/arch/x86/crypto/cast6_avx_glue.c
+++ b/arch/x86/crypto/cast6_avx_glue.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Glue Code for the AVX assembler implemention of the Cast6 Cipher 2 * Glue Code for the AVX assembler implementation of the Cast6 Cipher
3 * 3 *
4 * Copyright (C) 2012 Johannes Goetzfried 4 * Copyright (C) 2012 Johannes Goetzfried
5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de> 5 * <Johannes.Goetzfried@informatik.stud.uni-erlangen.de>
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 3b2490b81918..7bc105f47d21 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -140,7 +140,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
140 /* 140 /*
141 * In order to return to user mode, we need to have IRQs off with 141 * In order to return to user mode, we need to have IRQs off with
142 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags 142 * none of EXIT_TO_USERMODE_LOOP_FLAGS set. Several of these flags
143 * can be set at any time on preemptable kernels if we have IRQs on, 143 * can be set at any time on preemptible kernels if we have IRQs on,
144 * so we need to loop. Disabling preemption wouldn't help: doing the 144 * so we need to loop. Disabling preemption wouldn't help: doing the
145 * work to clear some of the flags can sleep. 145 * work to clear some of the flags can sleep.
146 */ 146 */
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
index 7eb878561910..babc4e7a519c 100644
--- a/arch/x86/entry/vdso/vma.c
+++ b/arch/x86/entry/vdso/vma.c
@@ -261,7 +261,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
261 * abusing from userspace install_speciall_mapping, which may 261 * abusing from userspace install_speciall_mapping, which may
262 * not do accounting and rlimit right. 262 * not do accounting and rlimit right.
263 * We could search vma near context.vdso, but it's a slowpath, 263 * We could search vma near context.vdso, but it's a slowpath,
264 * so let's explicitely check all VMAs to be completely sure. 264 * so let's explicitly check all VMAs to be completely sure.
265 */ 265 */
266 for (vma = mm->mmap; vma; vma = vma->vm_next) { 266 for (vma = mm->mmap; vma; vma = vma->vm_next) {
267 if (vma_is_special_mapping(vma, &vdso_mapping) || 267 if (vma_is_special_mapping(vma, &vdso_mapping) ||
diff --git a/arch/x86/events/intel/bts.c b/arch/x86/events/intel/bts.c
index 24ffa1e88cf9..a01ef1b0f883 100644
--- a/arch/x86/events/intel/bts.c
+++ b/arch/x86/events/intel/bts.c
@@ -589,7 +589,7 @@ static __init int bts_init(void)
589 * the AUX buffer. 589 * the AUX buffer.
590 * 590 *
591 * However, since this driver supports per-CPU and per-task inherit 591 * However, since this driver supports per-CPU and per-task inherit
592 * we cannot use the user mapping since it will not be availble 592 * we cannot use the user mapping since it will not be available
593 * if we're not running the owning process. 593 * if we're not running the owning process.
594 * 594 *
595 * With PTI we can't use the kernal map either, because its not 595 * With PTI we can't use the kernal map either, because its not
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index ecc3e34ca955..40e12cfc87f6 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1930,7 +1930,7 @@ static void intel_pmu_enable_all(int added)
1930 * in sequence on the same PMC or on different PMCs. 1930 * in sequence on the same PMC or on different PMCs.
1931 * 1931 *
1932 * In practise it appears some of these events do in fact count, and 1932 * In practise it appears some of these events do in fact count, and
1933 * we need to programm all 4 events. 1933 * we need to program all 4 events.
1934 */ 1934 */
1935static void intel_pmu_nhm_workaround(void) 1935static void intel_pmu_nhm_workaround(void)
1936{ 1936{
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index b7b01d762d32..e9acf1d2e7b2 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1199,7 +1199,7 @@ static void setup_pebs_sample_data(struct perf_event *event,
1199 /* 1199 /*
1200 * We must however always use iregs for the unwinder to stay sane; the 1200 * We must however always use iregs for the unwinder to stay sane; the
1201 * record BP,SP,IP can point into thin air when the record is from a 1201 * record BP,SP,IP can point into thin air when the record is from a
1202 * previous PMI context or an (I)RET happend between the record and 1202 * previous PMI context or an (I)RET happened between the record and
1203 * PMI. 1203 * PMI.
1204 */ 1204 */
1205 if (sample_type & PERF_SAMPLE_CALLCHAIN) 1205 if (sample_type & PERF_SAMPLE_CALLCHAIN)
diff --git a/arch/x86/events/intel/p4.c b/arch/x86/events/intel/p4.c
index d32c0eed38ca..dee579efb2b2 100644
--- a/arch/x86/events/intel/p4.c
+++ b/arch/x86/events/intel/p4.c
@@ -1259,7 +1259,7 @@ again:
1259 } 1259 }
1260 /* 1260 /*
1261 * Perf does test runs to see if a whole group can be assigned 1261 * Perf does test runs to see if a whole group can be assigned
1262 * together succesfully. There can be multiple rounds of this. 1262 * together successfully. There can be multiple rounds of this.
1263 * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config 1263 * Unfortunately, p4_pmu_swap_config_ts touches the hwc->config
1264 * bits, such that the next round of group assignments will 1264 * bits, such that the next round of group assignments will
1265 * cause the above p4_should_swap_ts to pass instead of fail. 1265 * cause the above p4_should_swap_ts to pass instead of fail.
diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
index d7faa16622d8..ea9886651c39 100644
--- a/arch/x86/include/asm/alternative.h
+++ b/arch/x86/include/asm/alternative.h
@@ -167,7 +167,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
167/* 167/*
168 * Alternative inline assembly with input. 168 * Alternative inline assembly with input.
169 * 169 *
170 * Pecularities: 170 * Peculiarities:
171 * No memory clobber here. 171 * No memory clobber here.
172 * Argument numbers start with 1. 172 * Argument numbers start with 1.
173 * Best is to use constraints that are fixed size (like (%1) ... "r") 173 * Best is to use constraints that are fixed size (like (%1) ... "r")
diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
index bfb85e5844ab..a8bfac131256 100644
--- a/arch/x86/include/asm/cmpxchg.h
+++ b/arch/x86/include/asm/cmpxchg.h
@@ -7,7 +7,7 @@
7#include <asm/alternative.h> /* Provides LOCK_PREFIX */ 7#include <asm/alternative.h> /* Provides LOCK_PREFIX */
8 8
9/* 9/*
10 * Non-existant functions to indicate usage errors at link time 10 * Non-existent functions to indicate usage errors at link time
11 * (or compile-time if the compiler implements __compiletime_error(). 11 * (or compile-time if the compiler implements __compiletime_error().
12 */ 12 */
13extern void __xchg_wrong_size(void) 13extern void __xchg_wrong_size(void)
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index eea40d52ca78..db45773a63b6 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -19,7 +19,7 @@
19 * This is the main reason why we're doing stable VA mappings for RT 19 * This is the main reason why we're doing stable VA mappings for RT
20 * services. 20 * services.
21 * 21 *
22 * This flag is used in conjuction with a chicken bit called 22 * This flag is used in conjunction with a chicken bit called
23 * "efi=old_map" which can be used as a fallback to the old runtime 23 * "efi=old_map" which can be used as a fallback to the old runtime
24 * services mapping method in case there's some b0rkage with a 24 * services mapping method in case there's some b0rkage with a
25 * particular EFI implementation (haha, it is hard to hold up the 25 * particular EFI implementation (haha, it is hard to hold up the
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 06635fbca81c..2624de16cd7a 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -848,7 +848,7 @@ EXPORT_SYMBOL(acpi_unregister_ioapic);
848/** 848/**
849 * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base 849 * acpi_ioapic_registered - Check whether IOAPIC assoicatied with @gsi_base
850 * has been registered 850 * has been registered
851 * @handle: ACPI handle of the IOAPIC deivce 851 * @handle: ACPI handle of the IOAPIC device
852 * @gsi_base: GSI base associated with the IOAPIC 852 * @gsi_base: GSI base associated with the IOAPIC
853 * 853 *
854 * Assume caller holds some type of lock to serialize acpi_ioapic_registered() 854 * Assume caller holds some type of lock to serialize acpi_ioapic_registered()
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 36d2696c9563..6b4826166dec 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -686,7 +686,7 @@ DEFINE_PER_CPU(unsigned, mce_poll_count);
686 * errors here. However this would be quite problematic -- 686 * errors here. However this would be quite problematic --
687 * we would need to reimplement the Monarch handling and 687 * we would need to reimplement the Monarch handling and
688 * it would mess up the exclusion between exception handler 688 * it would mess up the exclusion between exception handler
689 * and poll hander -- * so we skip this for now. 689 * and poll handler -- * so we skip this for now.
690 * These cases should not happen anyways, or only when the CPU 690 * These cases should not happen anyways, or only when the CPU
691 * is already totally * confused. In this case it's likely it will 691 * is already totally * confused. In this case it's likely it will
692 * not fully execute the machine check handler either. 692 * not fully execute the machine check handler either.
diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
index eb8ab3915268..22369dd5de3b 100644
--- a/arch/x86/kernel/crash_dump_64.c
+++ b/arch/x86/kernel/crash_dump_64.c
@@ -62,7 +62,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
62 62
63/** 63/**
64 * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the 64 * copy_oldmem_page_encrypted - same as copy_oldmem_page() above but ioremap the
65 * memory with the encryption mask set to accomodate kdump on SME-enabled 65 * memory with the encryption mask set to accommodate kdump on SME-enabled
66 * machines. 66 * machines.
67 */ 67 */
68ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize, 68ssize_t copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index bbfbf017065c..6a567f7e315b 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -684,7 +684,7 @@ void set_personality_64bit(void)
684 /* TBD: overwrites user setup. Should have two bits. 684 /* TBD: overwrites user setup. Should have two bits.
685 But 64bit processes have always behaved this way, 685 But 64bit processes have always behaved this way,
686 so it's not too bad. The main problem is just that 686 so it's not too bad. The main problem is just that
687 32bit childs are affected again. */ 687 32bit children are affected again. */
688 current->personality &= ~READ_IMPLIES_EXEC; 688 current->personality &= ~READ_IMPLIES_EXEC;
689} 689}
690 690
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 02edd9960e9d..6415100fbe66 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -485,7 +485,7 @@ struct __packed vmcs12 {
485 /* 485 /*
486 * To allow migration of L1 (complete with its L2 guests) between 486 * To allow migration of L1 (complete with its L2 guests) between
487 * machines of different natural widths (32 or 64 bit), we cannot have 487 * machines of different natural widths (32 or 64 bit), we cannot have
488 * unsigned long fields with no explict size. We use u64 (aliased 488 * unsigned long fields with no explicit size. We use u64 (aliased
489 * natural_width) instead. Luckily, x86 is little-endian. 489 * natural_width) instead. Luckily, x86 is little-endian.
490 */ 490 */
491 natural_width cr0_guest_host_mask; 491 natural_width cr0_guest_host_mask;
@@ -4936,7 +4936,7 @@ static __init int alloc_kvm_area(void)
4936 * vmcs->revision_id to KVM_EVMCS_VERSION instead of 4936 * vmcs->revision_id to KVM_EVMCS_VERSION instead of
4937 * revision_id reported by MSR_IA32_VMX_BASIC. 4937 * revision_id reported by MSR_IA32_VMX_BASIC.
4938 * 4938 *
4939 * However, even though not explictly documented by 4939 * However, even though not explicitly documented by
4940 * TLFS, VMXArea passed as VMXON argument should 4940 * TLFS, VMXArea passed as VMXON argument should
4941 * still be marked with revision_id reported by 4941 * still be marked with revision_id reported by
4942 * physical CPU. 4942 * physical CPU.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index d02937760c3b..e998f1396ec3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9280,7 +9280,7 @@ static void kvm_mmu_slot_apply_flags(struct kvm *kvm,
9280 * with dirty logging disabled in order to eliminate unnecessary GPA 9280 * with dirty logging disabled in order to eliminate unnecessary GPA
9281 * logging in PML buffer (and potential PML buffer full VMEXT). This 9281 * logging in PML buffer (and potential PML buffer full VMEXT). This
9282 * guarantees leaving PML enabled during guest's lifetime won't have 9282 * guarantees leaving PML enabled during guest's lifetime won't have
9283 * any additonal overhead from PML when guest is running with dirty 9283 * any additional overhead from PML when guest is running with dirty
9284 * logging disabled for memory slots. 9284 * logging disabled for memory slots.
9285 * 9285 *
9286 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot 9286 * kvm_x86_ops->slot_enable_log_dirty is called when switching new slot
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index db7a10082238..c353a0f1db39 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1704,7 +1704,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
1704 } else if (!(in_flag & CPA_PAGES_ARRAY)) { 1704 } else if (!(in_flag & CPA_PAGES_ARRAY)) {
1705 /* 1705 /*
1706 * in_flag of CPA_PAGES_ARRAY implies it is aligned. 1706 * in_flag of CPA_PAGES_ARRAY implies it is aligned.
1707 * No need to cehck in that case 1707 * No need to check in that case
1708 */ 1708 */
1709 if (*addr & ~PAGE_MASK) { 1709 if (*addr & ~PAGE_MASK) {
1710 *addr &= PAGE_MASK; 1710 *addr &= PAGE_MASK;
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
index ce4b06733c09..b3233b1835ea 100644
--- a/arch/x86/platform/ce4100/ce4100.c
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -84,7 +84,7 @@ static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
84} 84}
85 85
86static void ce4100_serial_fixup(int port, struct uart_port *up, 86static void ce4100_serial_fixup(int port, struct uart_port *up,
87 u32 *capabilites) 87 u32 *capabilities)
88{ 88{
89#ifdef CONFIG_EARLY_PRINTK 89#ifdef CONFIG_EARLY_PRINTK
90 /* 90 /*
@@ -111,7 +111,7 @@ static void ce4100_serial_fixup(int port, struct uart_port *up,
111 up->serial_in = ce4100_mem_serial_in; 111 up->serial_in = ce4100_mem_serial_in;
112 up->serial_out = ce4100_mem_serial_out; 112 up->serial_out = ce4100_mem_serial_out;
113 113
114 *capabilites |= (1 << 12); 114 *capabilities |= (1 << 12);
115} 115}
116 116
117static __init void sdv_serial_fixup(void) 117static __init void sdv_serial_fixup(void)
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
index dbfc5cf2aa93..96f438d4b026 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_bcm43xx.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * platform_bcm43xx.c: bcm43xx platform data initilization file 2 * platform_bcm43xx.c: bcm43xx platform data initialization file
3 * 3 *
4 * (C) Copyright 2016 Intel Corporation 4 * (C) Copyright 2016 Intel Corporation
5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 5 * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 27186ad654c9..7a7fc54c449b 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * spidev platform data initilization file 2 * spidev platform data initialization file
3 * 3 *
4 * (C) Copyright 2014, 2016 Intel Corporation 4 * (C) Copyright 2014, 2016 Intel Corporation
5 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 5 * Authors: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
index 429a94192671..8344d5a928c9 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_pcal9555a.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * PCAL9555a platform data initilization file 2 * PCAL9555a platform data initialization file
3 * 3 *
4 * Copyright (C) 2016, Intel Corporation 4 * Copyright (C) 2016, Intel Corporation
5 * 5 *
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
index 2e569d10f2d0..a9f2e888e135 100644
--- a/arch/x86/platform/intel/iosf_mbi.c
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -13,7 +13,7 @@
13 * 13 *
14 * 14 *
15 * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a 15 * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
16 * mailbox interface (MBI) to communicate with mutiple devices. This 16 * mailbox interface (MBI) to communicate with multiple devices. This
17 * driver implements access to this interface for those platforms that can 17 * driver implements access to this interface for those platforms that can
18 * enumerate the device using PCI. 18 * enumerate the device using PCI.
19 */ 19 */
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
index 7fa8b3b53bc0..d9b8a1c1ab0f 100644
--- a/arch/x86/platform/olpc/olpc-xo1-sci.c
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -109,7 +109,7 @@ static void detect_lid_state(void)
109 * the edge detector hookup on the gpio inputs on the geode is 109 * the edge detector hookup on the gpio inputs on the geode is
110 * odd, to say the least. See http://dev.laptop.org/ticket/5703 110 * odd, to say the least. See http://dev.laptop.org/ticket/5703
111 * for details, but in a nutshell: we don't use the edge 111 * for details, but in a nutshell: we don't use the edge
112 * detectors. instead, we make use of an anomoly: with the both 112 * detectors. instead, we make use of an anomaly: with the both
113 * edge detectors turned off, we still get an edge event on a 113 * edge detectors turned off, we still get an edge event on a
114 * positive edge transition. to take advantage of this, we use the 114 * positive edge transition. to take advantage of this, we use the
115 * front-end inverter to ensure that that's the edge we're always 115 * front-end inverter to ensure that that's the edge we're always
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
index 5f64f30873e2..b21a932c220c 100644
--- a/arch/x86/platform/uv/uv_nmi.c
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -560,7 +560,7 @@ static inline void uv_clear_nmi(int cpu)
560 } 560 }
561} 561}
562 562
563/* Ping non-responding CPU's attemping to force them into the NMI handler */ 563/* Ping non-responding CPU's attempting to force them into the NMI handler */
564static void uv_nmi_nr_cpus_ping(void) 564static void uv_nmi_nr_cpus_ping(void)
565{ 565{
566 int cpu; 566 int cpu;
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 075ed47993bb..d5f303c0e656 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -493,7 +493,7 @@ static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages,
493 * The remap information (which mfn remap to which pfn) is contained in the 493 * The remap information (which mfn remap to which pfn) is contained in the
494 * to be remapped memory itself in a linked list anchored at xen_remap_mfn. 494 * to be remapped memory itself in a linked list anchored at xen_remap_mfn.
495 * This scheme allows to remap the different chunks in arbitrary order while 495 * This scheme allows to remap the different chunks in arbitrary order while
496 * the resulting mapping will be independant from the order. 496 * the resulting mapping will be independent from the order.
497 */ 497 */
498void __init xen_remap_memory(void) 498void __init xen_remap_memory(void)
499{ 499{