aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-x86/seccomp_64.h1
-rw-r--r--include/asm-x86/suspend_32.h5
-rw-r--r--include/asm-x86/xor_32.h5
-rw-r--r--include/asm-x86/xor_64.h5
4 files changed, 16 insertions, 0 deletions
diff --git a/include/asm-x86/seccomp_64.h b/include/asm-x86/seccomp_64.h
index 553af65a2287..76cfe69aa63c 100644
--- a/include/asm-x86/seccomp_64.h
+++ b/include/asm-x86/seccomp_64.h
@@ -1,4 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2#define _ASM_SECCOMP_H
2 3
3#include <linux/thread_info.h> 4#include <linux/thread_info.h>
4 5
diff --git a/include/asm-x86/suspend_32.h b/include/asm-x86/suspend_32.h
index 24e1c080aa8a..8675c6782a7d 100644
--- a/include/asm-x86/suspend_32.h
+++ b/include/asm-x86/suspend_32.h
@@ -3,6 +3,9 @@
3 * Based on code 3 * Based on code
4 * Copyright 2001 Patrick Mochel <mochel@osdl.org> 4 * Copyright 2001 Patrick Mochel <mochel@osdl.org>
5 */ 5 */
6#ifndef __ASM_X86_32_SUSPEND_H
7#define __ASM_X86_32_SUSPEND_H
8
6#include <asm/desc.h> 9#include <asm/desc.h>
7#include <asm/i387.h> 10#include <asm/i387.h>
8 11
@@ -44,3 +47,5 @@ static inline void acpi_save_register_state(unsigned long return_point)
44/* routines for saving/restoring kernel state */ 47/* routines for saving/restoring kernel state */
45extern int acpi_save_state_mem(void); 48extern int acpi_save_state_mem(void);
46#endif 49#endif
50
51#endif /* __ASM_X86_32_SUSPEND_H */
diff --git a/include/asm-x86/xor_32.h b/include/asm-x86/xor_32.h
index 067b5c1835a3..921b45840449 100644
--- a/include/asm-x86/xor_32.h
+++ b/include/asm-x86/xor_32.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_32_H
2#define ASM_X86__XOR_32_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -881,3 +884,5 @@ do { \
881 deals with a load to a line that is being prefetched. */ 884 deals with a load to a line that is being prefetched. */
882#define XOR_SELECT_TEMPLATE(FASTEST) \ 885#define XOR_SELECT_TEMPLATE(FASTEST) \
883 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST) 886 (cpu_has_xmm ? &xor_block_pIII_sse : FASTEST)
887
888#endif /* ASM_X86__XOR_32_H */
diff --git a/include/asm-x86/xor_64.h b/include/asm-x86/xor_64.h
index 24957e39ac8a..2d3a18de295b 100644
--- a/include/asm-x86/xor_64.h
+++ b/include/asm-x86/xor_64.h
@@ -1,3 +1,6 @@
1#ifndef ASM_X86__XOR_64_H
2#define ASM_X86__XOR_64_H
3
1/* 4/*
2 * Optimized RAID-5 checksumming functions for MMX and SSE. 5 * Optimized RAID-5 checksumming functions for MMX and SSE.
3 * 6 *
@@ -354,3 +357,5 @@ do { \
354 We may also be able to load into the L1 only depending on how the cpu 357 We may also be able to load into the L1 only depending on how the cpu
355 deals with a load to a line that is being prefetched. */ 358 deals with a load to a line that is being prefetched. */
356#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse) 359#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
360
361#endif /* ASM_X86__XOR_64_H */