diff options
Diffstat (limited to 'include')
36 files changed, 651 insertions, 167 deletions
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h index a62720a7edc0..ab0b85cf21f3 100644 --- a/include/acpi/acpiosxf.h +++ b/include/acpi/acpiosxf.h | |||
@@ -144,6 +144,7 @@ void __iomem *acpi_os_map_memory(acpi_physical_address where, | |||
144 | acpi_size length); | 144 | acpi_size length); |
145 | 145 | ||
146 | void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size); | 146 | void acpi_os_unmap_memory(void __iomem * logical_address, acpi_size size); |
147 | void early_acpi_os_unmap_memory(void __iomem * virt, acpi_size size); | ||
147 | 148 | ||
148 | #ifdef ACPI_FUTURE_USAGE | 149 | #ifdef ACPI_FUTURE_USAGE |
149 | acpi_status | 150 | acpi_status |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c8e8cf45830f..cc40102fe2f3 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -130,6 +130,10 @@ acpi_get_table_header(acpi_string signature, | |||
130 | struct acpi_table_header *out_table_header); | 130 | struct acpi_table_header *out_table_header); |
131 | 131 | ||
132 | acpi_status | 132 | acpi_status |
133 | acpi_get_table_with_size(acpi_string signature, | ||
134 | u32 instance, struct acpi_table_header **out_table, | ||
135 | acpi_size *tbl_size); | ||
136 | acpi_status | ||
133 | acpi_get_table(acpi_string signature, | 137 | acpi_get_table(acpi_string signature, |
134 | u32 instance, struct acpi_table_header **out_table); | 138 | u32 instance, struct acpi_table_header **out_table); |
135 | 139 | ||
diff --git a/include/asm-frv/swab.h b/include/asm-frv/swab.h index afb3396ba5ed..f305834b4799 100644 --- a/include/asm-frv/swab.h +++ b/include/asm-frv/swab.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_SWAB_H | 1 | #ifndef _ASM_SWAB_H |
2 | #define _ASM_SWAB_H | 2 | #define _ASM_SWAB_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) | 6 | #if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__) |
7 | # define __SWAB_64_THRU_32__ | 7 | # define __SWAB_64_THRU_32__ |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b0e63c672ebd..00f45ff081a6 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -80,4 +80,56 @@ extern void setup_per_cpu_areas(void); | |||
80 | #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ | 80 | #define DECLARE_PER_CPU(type, name) extern PER_CPU_ATTRIBUTES \ |
81 | __typeof__(type) per_cpu_var(name) | 81 | __typeof__(type) per_cpu_var(name) |
82 | 82 | ||
83 | /* | ||
84 | * Optional methods for optimized non-lvalue per-cpu variable access. | ||
85 | * | ||
86 | * @var can be a percpu variable or a field of it and its size should | ||
87 | * equal char, int or long. percpu_read() evaluates to a lvalue and | ||
88 | * all others to void. | ||
89 | * | ||
90 | * These operations are guaranteed to be atomic w.r.t. preemption. | ||
91 | * The generic versions use plain get/put_cpu_var(). Archs are | ||
92 | * encouraged to implement single-instruction alternatives which don't | ||
93 | * require preemption protection. | ||
94 | */ | ||
95 | #ifndef percpu_read | ||
96 | # define percpu_read(var) \ | ||
97 | ({ \ | ||
98 | typeof(per_cpu_var(var)) __tmp_var__; \ | ||
99 | __tmp_var__ = get_cpu_var(var); \ | ||
100 | put_cpu_var(var); \ | ||
101 | __tmp_var__; \ | ||
102 | }) | ||
103 | #endif | ||
104 | |||
105 | #define __percpu_generic_to_op(var, val, op) \ | ||
106 | do { \ | ||
107 | get_cpu_var(var) op val; \ | ||
108 | put_cpu_var(var); \ | ||
109 | } while (0) | ||
110 | |||
111 | #ifndef percpu_write | ||
112 | # define percpu_write(var, val) __percpu_generic_to_op(var, (val), =) | ||
113 | #endif | ||
114 | |||
115 | #ifndef percpu_add | ||
116 | # define percpu_add(var, val) __percpu_generic_to_op(var, (val), +=) | ||
117 | #endif | ||
118 | |||
119 | #ifndef percpu_sub | ||
120 | # define percpu_sub(var, val) __percpu_generic_to_op(var, (val), -=) | ||
121 | #endif | ||
122 | |||
123 | #ifndef percpu_and | ||
124 | # define percpu_and(var, val) __percpu_generic_to_op(var, (val), &=) | ||
125 | #endif | ||
126 | |||
127 | #ifndef percpu_or | ||
128 | # define percpu_or(var, val) __percpu_generic_to_op(var, (val), |=) | ||
129 | #endif | ||
130 | |||
131 | #ifndef percpu_xor | ||
132 | # define percpu_xor(var, val) __percpu_generic_to_op(var, (val), ^=) | ||
133 | #endif | ||
134 | |||
83 | #endif /* _ASM_GENERIC_PERCPU_H_ */ | 135 | #endif /* _ASM_GENERIC_PERCPU_H_ */ |
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h index 79a7ff925bf8..4ce48e878530 100644 --- a/include/asm-generic/sections.h +++ b/include/asm-generic/sections.h | |||
@@ -9,7 +9,7 @@ extern char __bss_start[], __bss_stop[]; | |||
9 | extern char __init_begin[], __init_end[]; | 9 | extern char __init_begin[], __init_end[]; |
10 | extern char _sinittext[], _einittext[]; | 10 | extern char _sinittext[], _einittext[]; |
11 | extern char _end[]; | 11 | extern char _end[]; |
12 | extern char __per_cpu_start[], __per_cpu_end[]; | 12 | extern char __per_cpu_load[], __per_cpu_start[], __per_cpu_end[]; |
13 | extern char __kprobes_text_start[], __kprobes_text_end[]; | 13 | extern char __kprobes_text_start[], __kprobes_text_end[]; |
14 | extern char __initdata_begin[], __initdata_end[]; | 14 | extern char __initdata_begin[], __initdata_end[]; |
15 | extern char __start_rodata[], __end_rodata[]; | 15 | extern char __start_rodata[], __end_rodata[]; |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index c61fab1dd2f8..5406e70aba86 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -430,12 +430,59 @@ | |||
430 | *(.initcall7.init) \ | 430 | *(.initcall7.init) \ |
431 | *(.initcall7s.init) | 431 | *(.initcall7s.init) |
432 | 432 | ||
433 | /** | ||
434 | * PERCPU_VADDR - define output section for percpu area | ||
435 | * @vaddr: explicit base address (optional) | ||
436 | * @phdr: destination PHDR (optional) | ||
437 | * | ||
438 | * Macro which expands to output section for percpu area. If @vaddr | ||
439 | * is not blank, it specifies explicit base address and all percpu | ||
440 | * symbols will be offset from the given address. If blank, @vaddr | ||
441 | * always equals @laddr + LOAD_OFFSET. | ||
442 | * | ||
443 | * @phdr defines the output PHDR to use if not blank. Be warned that | ||
444 | * output PHDR is sticky. If @phdr is specified, the next output | ||
445 | * section in the linker script will go there too. @phdr should have | ||
446 | * a leading colon. | ||
447 | * | ||
448 | * Note that this macros defines __per_cpu_load as an absolute symbol. | ||
449 | * If there is no need to put the percpu section at a predetermined | ||
450 | * address, use PERCPU(). | ||
451 | */ | ||
452 | #define PERCPU_VADDR(vaddr, phdr) \ | ||
453 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | ||
454 | .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ | ||
455 | - LOAD_OFFSET) { \ | ||
456 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
457 | *(.data.percpu.first) \ | ||
458 | *(.data.percpu.page_aligned) \ | ||
459 | *(.data.percpu) \ | ||
460 | *(.data.percpu.shared_aligned) \ | ||
461 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | ||
462 | } phdr \ | ||
463 | . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu); | ||
464 | |||
465 | /** | ||
466 | * PERCPU - define output section for percpu area, simple version | ||
467 | * @align: required alignment | ||
468 | * | ||
469 | * Align to @align and outputs output section for percpu area. This | ||
470 | * macro doesn't maniuplate @vaddr or @phdr and __per_cpu_load and | ||
471 | * __per_cpu_start will be identical. | ||
472 | * | ||
473 | * This macro is equivalent to ALIGN(align); PERCPU_VADDR( , ) except | ||
474 | * that __per_cpu_load is defined as a relative symbol against | ||
475 | * .data.percpu which is required for relocatable x86_32 | ||
476 | * configuration. | ||
477 | */ | ||
433 | #define PERCPU(align) \ | 478 | #define PERCPU(align) \ |
434 | . = ALIGN(align); \ | 479 | . = ALIGN(align); \ |
435 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 480 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ |
436 | .data.percpu : AT(ADDR(.data.percpu) - LOAD_OFFSET) { \ | 481 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
482 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | ||
483 | *(.data.percpu.first) \ | ||
437 | *(.data.percpu.page_aligned) \ | 484 | *(.data.percpu.page_aligned) \ |
438 | *(.data.percpu) \ | 485 | *(.data.percpu) \ |
439 | *(.data.percpu.shared_aligned) \ | 486 | *(.data.percpu.shared_aligned) \ |
440 | } \ | 487 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
441 | VMLINUX_SYMBOL(__per_cpu_end) = .; | 488 | } |
diff --git a/include/asm-m32r/swab.h b/include/asm-m32r/swab.h index 97973e101825..54dab001d6d1 100644 --- a/include/asm-m32r/swab.h +++ b/include/asm-m32r/swab.h | |||
@@ -1,7 +1,7 @@ | |||
1 | #ifndef _ASM_M32R_SWAB_H | 1 | #ifndef _ASM_M32R_SWAB_H |
2 | #define _ASM_M32R_SWAB_H | 2 | #define _ASM_M32R_SWAB_H |
3 | 3 | ||
4 | #include <asm/types.h> | 4 | #include <linux/types.h> |
5 | 5 | ||
6 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) | 6 | #if !defined(__STRICT_ANSI__) || defined(__KERNEL__) |
7 | # define __SWAB_64_THRU_32__ | 7 | # define __SWAB_64_THRU_32__ |
diff --git a/include/asm-mn10300/swab.h b/include/asm-mn10300/swab.h index 4504d1b4b477..bd818a820ca8 100644 --- a/include/asm-mn10300/swab.h +++ b/include/asm-mn10300/swab.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef _ASM_SWAB_H | 11 | #ifndef _ASM_SWAB_H |
12 | #define _ASM_SWAB_H | 12 | #define _ASM_SWAB_H |
13 | 13 | ||
14 | #include <asm/types.h> | 14 | #include <linux/types.h> |
15 | 15 | ||
16 | #ifdef __GNUC__ | 16 | #ifdef __GNUC__ |
17 | 17 | ||
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 6fce2fc2d124..78199151c00b 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -79,6 +79,7 @@ typedef int (*acpi_table_handler) (struct acpi_table_header *table); | |||
79 | typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); | 79 | typedef int (*acpi_table_entry_handler) (struct acpi_subtable_header *header, const unsigned long end); |
80 | 80 | ||
81 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); | 81 | char * __acpi_map_table (unsigned long phys_addr, unsigned long size); |
82 | void __acpi_unmap_table(char *map, unsigned long size); | ||
82 | int early_acpi_boot_init(void); | 83 | int early_acpi_boot_init(void); |
83 | int acpi_boot_init (void); | 84 | int acpi_boot_init (void); |
84 | int acpi_boot_table_init (void); | 85 | int acpi_boot_table_init (void); |
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h index 95837bfb5256..455d83219fae 100644 --- a/include/linux/bootmem.h +++ b/include/linux/bootmem.h | |||
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size); | |||
65 | #define BOOTMEM_DEFAULT 0 | 65 | #define BOOTMEM_DEFAULT 0 |
66 | #define BOOTMEM_EXCLUSIVE (1<<0) | 66 | #define BOOTMEM_EXCLUSIVE (1<<0) |
67 | 67 | ||
68 | extern int reserve_bootmem(unsigned long addr, | ||
69 | unsigned long size, | ||
70 | int flags); | ||
68 | extern int reserve_bootmem_node(pg_data_t *pgdat, | 71 | extern int reserve_bootmem_node(pg_data_t *pgdat, |
69 | unsigned long physaddr, | 72 | unsigned long physaddr, |
70 | unsigned long size, | 73 | unsigned long size, |
71 | int flags); | 74 | int flags); |
72 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | ||
73 | extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags); | ||
74 | #endif | ||
75 | 75 | ||
76 | extern void *__alloc_bootmem_nopanic(unsigned long size, | 76 | extern void *__alloc_bootmem(unsigned long size, |
77 | unsigned long align, | 77 | unsigned long align, |
78 | unsigned long goal); | 78 | unsigned long goal); |
79 | extern void *__alloc_bootmem(unsigned long size, | 79 | extern void *__alloc_bootmem_nopanic(unsigned long size, |
80 | unsigned long align, | 80 | unsigned long align, |
81 | unsigned long goal); | 81 | unsigned long goal); |
82 | extern void *__alloc_bootmem_low(unsigned long size, | ||
83 | unsigned long align, | ||
84 | unsigned long goal); | ||
85 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, | 82 | extern void *__alloc_bootmem_node(pg_data_t *pgdat, |
86 | unsigned long size, | 83 | unsigned long size, |
87 | unsigned long align, | 84 | unsigned long align, |
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat, | |||
90 | unsigned long size, | 87 | unsigned long size, |
91 | unsigned long align, | 88 | unsigned long align, |
92 | unsigned long goal); | 89 | unsigned long goal); |
90 | extern void *__alloc_bootmem_low(unsigned long size, | ||
91 | unsigned long align, | ||
92 | unsigned long goal); | ||
93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, | 93 | extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, |
94 | unsigned long size, | 94 | unsigned long size, |
95 | unsigned long align, | 95 | unsigned long align, |
96 | unsigned long goal); | 96 | unsigned long goal); |
97 | #ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE | 97 | |
98 | #define alloc_bootmem(x) \ | 98 | #define alloc_bootmem(x) \ |
99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 99 | __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
100 | #define alloc_bootmem_nopanic(x) \ | 100 | #define alloc_bootmem_nopanic(x) \ |
101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 101 | __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
102 | #define alloc_bootmem_low(x) \ | ||
103 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
104 | #define alloc_bootmem_pages(x) \ | 102 | #define alloc_bootmem_pages(x) \ |
105 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 103 | __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
106 | #define alloc_bootmem_pages_nopanic(x) \ | 104 | #define alloc_bootmem_pages_nopanic(x) \ |
107 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 105 | __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
108 | #define alloc_bootmem_low_pages(x) \ | ||
109 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
110 | #define alloc_bootmem_node(pgdat, x) \ | 106 | #define alloc_bootmem_node(pgdat, x) \ |
111 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | 107 | __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) |
112 | #define alloc_bootmem_pages_node(pgdat, x) \ | 108 | #define alloc_bootmem_pages_node(pgdat, x) \ |
113 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | 109 | __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) |
110 | #define alloc_bootmem_pages_node_nopanic(pgdat, x) \ | ||
111 | __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | ||
112 | |||
113 | #define alloc_bootmem_low(x) \ | ||
114 | __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0) | ||
115 | #define alloc_bootmem_low_pages(x) \ | ||
116 | __alloc_bootmem_low(x, PAGE_SIZE, 0) | ||
114 | #define alloc_bootmem_low_pages_node(pgdat, x) \ | 117 | #define alloc_bootmem_low_pages_node(pgdat, x) \ |
115 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) | 118 | __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) |
116 | #endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */ | ||
117 | 119 | ||
118 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, | 120 | extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, |
119 | int flags); | 121 | int flags); |
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h index 07ae8f846055..5b5d4731f956 100644 --- a/include/linux/coda_psdev.h +++ b/include/linux/coda_psdev.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #define CODA_PSDEV_MAJOR 67 | 6 | #define CODA_PSDEV_MAJOR 67 |
7 | #define MAX_CODADEVS 5 /* how many do we allow */ | 7 | #define MAX_CODADEVS 5 /* how many do we allow */ |
8 | 8 | ||
9 | #ifdef __KERNEL__ | ||
9 | struct kstatfs; | 10 | struct kstatfs; |
10 | 11 | ||
11 | /* communication pending/processing queues */ | 12 | /* communication pending/processing queues */ |
@@ -24,7 +25,6 @@ static inline struct venus_comm *coda_vcp(struct super_block *sb) | |||
24 | return (struct venus_comm *)((sb)->s_fs_info); | 25 | return (struct venus_comm *)((sb)->s_fs_info); |
25 | } | 26 | } |
26 | 27 | ||
27 | |||
28 | /* upcalls */ | 28 | /* upcalls */ |
29 | int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); | 29 | int venus_rootfid(struct super_block *sb, struct CodaFid *fidp); |
30 | int venus_getattr(struct super_block *sb, struct CodaFid *fid, | 30 | int venus_getattr(struct super_block *sb, struct CodaFid *fid, |
@@ -64,6 +64,12 @@ int coda_downcall(int opcode, union outputArgs *out, struct super_block *sb); | |||
64 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); | 64 | int venus_fsync(struct super_block *sb, struct CodaFid *fid); |
65 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); | 65 | int venus_statfs(struct dentry *dentry, struct kstatfs *sfs); |
66 | 66 | ||
67 | /* | ||
68 | * Statistics | ||
69 | */ | ||
70 | |||
71 | extern struct venus_comm coda_comms[]; | ||
72 | #endif /* __KERNEL__ */ | ||
67 | 73 | ||
68 | /* messages between coda filesystem in kernel and Venus */ | 74 | /* messages between coda filesystem in kernel and Venus */ |
69 | struct upc_req { | 75 | struct upc_req { |
@@ -82,11 +88,4 @@ struct upc_req { | |||
82 | #define REQ_WRITE 0x4 | 88 | #define REQ_WRITE 0x4 |
83 | #define REQ_ABORT 0x8 | 89 | #define REQ_ABORT 0x8 |
84 | 90 | ||
85 | |||
86 | /* | ||
87 | * Statistics | ||
88 | */ | ||
89 | |||
90 | extern struct venus_comm coda_comms[]; | ||
91 | |||
92 | #endif | 91 | #endif |
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h new file mode 100644 index 000000000000..115272137a9c --- /dev/null +++ b/include/linux/decompress/bunzip2.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef DECOMPRESS_BUNZIP2_H | ||
2 | #define DECOMPRESS_BUNZIP2_H | ||
3 | |||
4 | int bunzip2(unsigned char *inbuf, int len, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *pos, | ||
9 | void(*error)(char *x)); | ||
10 | #endif | ||
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h new file mode 100644 index 000000000000..6dfb856327bb --- /dev/null +++ b/include/linux/decompress/generic.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef DECOMPRESS_GENERIC_H | ||
2 | #define DECOMPRESS_GENERIC_H | ||
3 | |||
4 | /* Minimal chunksize to be read. | ||
5 | *Bzip2 prefers at least 4096 | ||
6 | *Lzma prefers 0x10000 */ | ||
7 | #define COMPR_IOBUF_SIZE 4096 | ||
8 | |||
9 | typedef int (*decompress_fn) (unsigned char *inbuf, int len, | ||
10 | int(*fill)(void*, unsigned int), | ||
11 | int(*writebb)(void*, unsigned int), | ||
12 | unsigned char *output, | ||
13 | int *posp, | ||
14 | void(*error)(char *x)); | ||
15 | |||
16 | /* inbuf - input buffer | ||
17 | *len - len of pre-read data in inbuf | ||
18 | *fill - function to fill inbuf if empty | ||
19 | *writebb - function to write out outbug | ||
20 | *posp - if non-null, input position (number of bytes read) will be | ||
21 | * returned here | ||
22 | * | ||
23 | *If len != 0, the inbuf is initialized (with as much data), and fill | ||
24 | *should not be called | ||
25 | *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE | ||
26 | *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE | ||
27 | */ | ||
28 | |||
29 | /* Utility routine to detect the decompression method */ | ||
30 | decompress_fn decompress_method(const unsigned char *inbuf, int len, | ||
31 | const char **name); | ||
32 | |||
33 | #endif | ||
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h new file mode 100644 index 000000000000..f9b06ccc3e5c --- /dev/null +++ b/include/linux/decompress/inflate.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef INFLATE_H | ||
2 | #define INFLATE_H | ||
3 | |||
4 | /* Other housekeeping constants */ | ||
5 | #define INBUFSIZ 4096 | ||
6 | |||
7 | int gunzip(unsigned char *inbuf, int len, | ||
8 | int(*fill)(void*, unsigned int), | ||
9 | int(*flush)(void*, unsigned int), | ||
10 | unsigned char *output, | ||
11 | int *pos, | ||
12 | void(*error_fn)(char *x)); | ||
13 | #endif | ||
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h new file mode 100644 index 000000000000..12ff8c3f1d05 --- /dev/null +++ b/include/linux/decompress/mm.h | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * linux/compr_mm.h | ||
3 | * | ||
4 | * Memory management for pre-boot and ramdisk uncompressors | ||
5 | * | ||
6 | * Authors: Alain Knaff <alain@knaff.lu> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #ifndef DECOMPR_MM_H | ||
11 | #define DECOMPR_MM_H | ||
12 | |||
13 | #ifdef STATIC | ||
14 | |||
15 | /* Code active when included from pre-boot environment: */ | ||
16 | |||
17 | /* A trivial malloc implementation, adapted from | ||
18 | * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 | ||
19 | */ | ||
20 | static unsigned long malloc_ptr; | ||
21 | static int malloc_count; | ||
22 | |||
23 | static void *malloc(int size) | ||
24 | { | ||
25 | void *p; | ||
26 | |||
27 | if (size < 0) | ||
28 | error("Malloc error"); | ||
29 | if (!malloc_ptr) | ||
30 | malloc_ptr = free_mem_ptr; | ||
31 | |||
32 | malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */ | ||
33 | |||
34 | p = (void *)malloc_ptr; | ||
35 | malloc_ptr += size; | ||
36 | |||
37 | if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr) | ||
38 | error("Out of memory"); | ||
39 | |||
40 | malloc_count++; | ||
41 | return p; | ||
42 | } | ||
43 | |||
44 | static void free(void *where) | ||
45 | { | ||
46 | malloc_count--; | ||
47 | if (!malloc_count) | ||
48 | malloc_ptr = free_mem_ptr; | ||
49 | } | ||
50 | |||
51 | #define large_malloc(a) malloc(a) | ||
52 | #define large_free(a) free(a) | ||
53 | |||
54 | #define set_error_fn(x) | ||
55 | |||
56 | #define INIT | ||
57 | |||
58 | #else /* STATIC */ | ||
59 | |||
60 | /* Code active when compiled standalone for use when loading ramdisk: */ | ||
61 | |||
62 | #include <linux/kernel.h> | ||
63 | #include <linux/fs.h> | ||
64 | #include <linux/string.h> | ||
65 | #include <linux/vmalloc.h> | ||
66 | |||
67 | /* Use defines rather than static inline in order to avoid spurious | ||
68 | * warnings when not needed (indeed large_malloc / large_free are not | ||
69 | * needed by inflate */ | ||
70 | |||
71 | #define malloc(a) kmalloc(a, GFP_KERNEL) | ||
72 | #define free(a) kfree(a) | ||
73 | |||
74 | #define large_malloc(a) vmalloc(a) | ||
75 | #define large_free(a) vfree(a) | ||
76 | |||
77 | static void(*error)(char *m); | ||
78 | #define set_error_fn(x) error = x; | ||
79 | |||
80 | #define INIT __init | ||
81 | #define STATIC | ||
82 | |||
83 | #include <linux/init.h> | ||
84 | |||
85 | #endif /* STATIC */ | ||
86 | |||
87 | #endif /* DECOMPR_MM_H */ | ||
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h new file mode 100644 index 000000000000..7796538f1bf4 --- /dev/null +++ b/include/linux/decompress/unlzma.h | |||
@@ -0,0 +1,12 @@ | |||
1 | #ifndef DECOMPRESS_UNLZMA_H | ||
2 | #define DECOMPRESS_UNLZMA_H | ||
3 | |||
4 | int unlzma(unsigned char *, int, | ||
5 | int(*fill)(void*, unsigned int), | ||
6 | int(*flush)(void*, unsigned int), | ||
7 | unsigned char *output, | ||
8 | int *posp, | ||
9 | void(*error)(char *x) | ||
10 | ); | ||
11 | |||
12 | #endif | ||
diff --git a/include/linux/elfcore.h b/include/linux/elfcore.h index 5ca54d77079f..7605c5e9589f 100644 --- a/include/linux/elfcore.h +++ b/include/linux/elfcore.h | |||
@@ -111,6 +111,15 @@ static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *re | |||
111 | #endif | 111 | #endif |
112 | } | 112 | } |
113 | 113 | ||
114 | static inline void elf_core_copy_kernel_regs(elf_gregset_t *elfregs, struct pt_regs *regs) | ||
115 | { | ||
116 | #ifdef ELF_CORE_COPY_KERNEL_REGS | ||
117 | ELF_CORE_COPY_KERNEL_REGS((*elfregs), regs); | ||
118 | #else | ||
119 | elf_core_copy_regs(elfregs, regs); | ||
120 | #endif | ||
121 | } | ||
122 | |||
114 | static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) | 123 | static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) |
115 | { | 124 | { |
116 | #ifdef ELF_CORE_COPY_TASK_REGS | 125 | #ifdef ELF_CORE_COPY_TASK_REGS |
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index dd20cd78faa8..0bbc15f54536 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/mmzone.h> | 4 | #include <linux/mmzone.h> |
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/linkage.h> | 6 | #include <linux/linkage.h> |
7 | #include <linux/topology.h> | ||
7 | 8 | ||
8 | struct vm_area_struct; | 9 | struct vm_area_struct; |
9 | 10 | ||
diff --git a/include/linux/in6.h b/include/linux/in6.h index bc492048c349..718bf21c5754 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h | |||
@@ -44,11 +44,11 @@ struct in6_addr | |||
44 | * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined | 44 | * NOTE: Be aware the IN6ADDR_* constants and in6addr_* externals are defined |
45 | * in network byte order, not in host byte order as are the IPv4 equivalents | 45 | * in network byte order, not in host byte order as are the IPv4 equivalents |
46 | */ | 46 | */ |
47 | #ifdef __KERNEL__ | ||
47 | extern const struct in6_addr in6addr_any; | 48 | extern const struct in6_addr in6addr_any; |
48 | #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } | 49 | #define IN6ADDR_ANY_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 } } } |
49 | extern const struct in6_addr in6addr_loopback; | 50 | extern const struct in6_addr in6addr_loopback; |
50 | #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } | 51 | #define IN6ADDR_LOOPBACK_INIT { { { 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } |
51 | #ifdef __KERNEL__ | ||
52 | extern const struct in6_addr in6addr_linklocal_allnodes; | 52 | extern const struct in6_addr in6addr_linklocal_allnodes; |
53 | #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ | 53 | #define IN6ADDR_LINKLOCAL_ALLNODES_INIT \ |
54 | { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } | 54 | { { { 0xff,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1 } } } |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 91658d076598..0c9cb63e6895 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -484,6 +484,7 @@ int show_interrupts(struct seq_file *p, void *v); | |||
484 | struct irq_desc; | 484 | struct irq_desc; |
485 | 485 | ||
486 | extern int early_irq_init(void); | 486 | extern int early_irq_init(void); |
487 | extern int arch_probe_nr_irqs(void); | ||
487 | extern int arch_early_irq_init(void); | 488 | extern int arch_early_irq_init(void); |
488 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); | 489 | extern int arch_init_chip_data(struct irq_desc *desc, int cpu); |
489 | 490 | ||
diff --git a/include/linux/irq.h b/include/linux/irq.h index 6db939a575bd..873e4ac11b81 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -180,11 +180,11 @@ struct irq_desc { | |||
180 | unsigned int irqs_unhandled; | 180 | unsigned int irqs_unhandled; |
181 | spinlock_t lock; | 181 | spinlock_t lock; |
182 | #ifdef CONFIG_SMP | 182 | #ifdef CONFIG_SMP |
183 | cpumask_t affinity; | 183 | cpumask_var_t affinity; |
184 | unsigned int cpu; | 184 | unsigned int cpu; |
185 | #endif | ||
186 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 185 | #ifdef CONFIG_GENERIC_PENDING_IRQ |
187 | cpumask_t pending_mask; | 186 | cpumask_var_t pending_mask; |
187 | #endif | ||
188 | #endif | 188 | #endif |
189 | #ifdef CONFIG_PROC_FS | 189 | #ifdef CONFIG_PROC_FS |
190 | struct proc_dir_entry *dir; | 190 | struct proc_dir_entry *dir; |
@@ -414,4 +414,84 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | |||
414 | 414 | ||
415 | #endif /* !CONFIG_S390 */ | 415 | #endif /* !CONFIG_S390 */ |
416 | 416 | ||
417 | #ifdef CONFIG_SMP | ||
418 | /** | ||
419 | * init_alloc_desc_masks - allocate cpumasks for irq_desc | ||
420 | * @desc: pointer to irq_desc struct | ||
421 | * @cpu: cpu which will be handling the cpumasks | ||
422 | * @boot: true if need bootmem | ||
423 | * | ||
424 | * Allocates affinity and pending_mask cpumask if required. | ||
425 | * Returns true if successful (or not required). | ||
426 | * Side effect: affinity has all bits set, pending_mask has all bits clear. | ||
427 | */ | ||
428 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | ||
429 | bool boot) | ||
430 | { | ||
431 | int node; | ||
432 | |||
433 | if (boot) { | ||
434 | alloc_bootmem_cpumask_var(&desc->affinity); | ||
435 | cpumask_setall(desc->affinity); | ||
436 | |||
437 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
438 | alloc_bootmem_cpumask_var(&desc->pending_mask); | ||
439 | cpumask_clear(desc->pending_mask); | ||
440 | #endif | ||
441 | return true; | ||
442 | } | ||
443 | |||
444 | node = cpu_to_node(cpu); | ||
445 | |||
446 | if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node)) | ||
447 | return false; | ||
448 | cpumask_setall(desc->affinity); | ||
449 | |||
450 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
451 | if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) { | ||
452 | free_cpumask_var(desc->affinity); | ||
453 | return false; | ||
454 | } | ||
455 | cpumask_clear(desc->pending_mask); | ||
456 | #endif | ||
457 | return true; | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * init_copy_desc_masks - copy cpumasks for irq_desc | ||
462 | * @old_desc: pointer to old irq_desc struct | ||
463 | * @new_desc: pointer to new irq_desc struct | ||
464 | * | ||
465 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
466 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
467 | * irq_desc struct so the copy is redundant. | ||
468 | */ | ||
469 | |||
470 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
471 | struct irq_desc *new_desc) | ||
472 | { | ||
473 | #ifdef CONFIG_CPUMASKS_OFFSTACK | ||
474 | cpumask_copy(new_desc->affinity, old_desc->affinity); | ||
475 | |||
476 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
477 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | ||
478 | #endif | ||
479 | #endif | ||
480 | } | ||
481 | |||
482 | #else /* !CONFIG_SMP */ | ||
483 | |||
484 | static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu, | ||
485 | bool boot) | ||
486 | { | ||
487 | return true; | ||
488 | } | ||
489 | |||
490 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | ||
491 | struct irq_desc *new_desc) | ||
492 | { | ||
493 | } | ||
494 | |||
495 | #endif /* CONFIG_SMP */ | ||
496 | |||
417 | #endif /* _LINUX_IRQ_H */ | 497 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 52ebbb4b161d..ec87b212ff7d 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -20,6 +20,7 @@ | |||
20 | 20 | ||
21 | # define for_each_irq_desc_reverse(irq, desc) \ | 21 | # define for_each_irq_desc_reverse(irq, desc) \ |
22 | for (irq = nr_irqs - 1; irq >= 0; irq--) | 22 | for (irq = nr_irqs - 1; irq >= 0; irq--) |
23 | |||
23 | #else /* CONFIG_GENERIC_HARDIRQS */ | 24 | #else /* CONFIG_GENERIC_HARDIRQS */ |
24 | 25 | ||
25 | extern int nr_irqs; | 26 | extern int nr_irqs; |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 32851eef48f0..2ec6cc14a114 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
@@ -182,6 +182,14 @@ struct kprobe_blackpoint { | |||
182 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); | 182 | DECLARE_PER_CPU(struct kprobe *, current_kprobe); |
183 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | 183 | DECLARE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); |
184 | 184 | ||
185 | /* | ||
186 | * For #ifdef avoidance: | ||
187 | */ | ||
188 | static inline int kprobes_built_in(void) | ||
189 | { | ||
190 | return 1; | ||
191 | } | ||
192 | |||
185 | #ifdef CONFIG_KRETPROBES | 193 | #ifdef CONFIG_KRETPROBES |
186 | extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, | 194 | extern void arch_prepare_kretprobe(struct kretprobe_instance *ri, |
187 | struct pt_regs *regs); | 195 | struct pt_regs *regs); |
@@ -271,8 +279,16 @@ void unregister_kretprobes(struct kretprobe **rps, int num); | |||
271 | void kprobe_flush_task(struct task_struct *tk); | 279 | void kprobe_flush_task(struct task_struct *tk); |
272 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); | 280 | void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); |
273 | 281 | ||
274 | #else /* CONFIG_KPROBES */ | 282 | #else /* !CONFIG_KPROBES: */ |
275 | 283 | ||
284 | static inline int kprobes_built_in(void) | ||
285 | { | ||
286 | return 0; | ||
287 | } | ||
288 | static inline int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
289 | { | ||
290 | return 0; | ||
291 | } | ||
276 | static inline struct kprobe *get_kprobe(void *addr) | 292 | static inline struct kprobe *get_kprobe(void *addr) |
277 | { | 293 | { |
278 | return NULL; | 294 | return NULL; |
@@ -329,5 +345,5 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) | |||
329 | static inline void kprobe_flush_task(struct task_struct *tk) | 345 | static inline void kprobe_flush_task(struct task_struct *tk) |
330 | { | 346 | { |
331 | } | 347 | } |
332 | #endif /* CONFIG_KPROBES */ | 348 | #endif /* CONFIG_KPROBES */ |
333 | #endif /* _LINUX_KPROBES_H */ | 349 | #endif /* _LINUX_KPROBES_H */ |
diff --git a/include/linux/magic.h b/include/linux/magic.h index 0b4df7eba852..5b4e28bcb788 100644 --- a/include/linux/magic.h +++ b/include/linux/magic.h | |||
@@ -49,4 +49,5 @@ | |||
49 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA | 49 | #define FUTEXFS_SUPER_MAGIC 0xBAD1DEA |
50 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA | 50 | #define INOTIFYFS_SUPER_MAGIC 0x2BAD1DEA |
51 | 51 | ||
52 | #define STACK_END_MAGIC 0x57AC6E9D | ||
52 | #endif /* __LINUX_MAGIC_H__ */ | 53 | #endif /* __LINUX_MAGIC_H__ */ |
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h index 139d7c88d9c9..3d1b7bde1283 100644 --- a/include/linux/mmiotrace.h +++ b/include/linux/mmiotrace.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef MMIOTRACE_H | 1 | #ifndef _LINUX_MMIOTRACE_H |
2 | #define MMIOTRACE_H | 2 | #define _LINUX_MMIOTRACE_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/list.h> | 5 | #include <linux/list.h> |
@@ -13,28 +13,34 @@ typedef void (*kmmio_post_handler_t)(struct kmmio_probe *, | |||
13 | unsigned long condition, struct pt_regs *); | 13 | unsigned long condition, struct pt_regs *); |
14 | 14 | ||
15 | struct kmmio_probe { | 15 | struct kmmio_probe { |
16 | struct list_head list; /* kmmio internal list */ | 16 | /* kmmio internal list: */ |
17 | unsigned long addr; /* start location of the probe point */ | 17 | struct list_head list; |
18 | unsigned long len; /* length of the probe region */ | 18 | /* start location of the probe point: */ |
19 | kmmio_pre_handler_t pre_handler; /* Called before addr is executed. */ | 19 | unsigned long addr; |
20 | kmmio_post_handler_t post_handler; /* Called after addr is executed */ | 20 | /* length of the probe region: */ |
21 | void *private; | 21 | unsigned long len; |
22 | /* Called before addr is executed: */ | ||
23 | kmmio_pre_handler_t pre_handler; | ||
24 | /* Called after addr is executed: */ | ||
25 | kmmio_post_handler_t post_handler; | ||
26 | void *private; | ||
22 | }; | 27 | }; |
23 | 28 | ||
29 | extern unsigned int kmmio_count; | ||
30 | |||
31 | extern int register_kmmio_probe(struct kmmio_probe *p); | ||
32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); | ||
33 | |||
34 | #ifdef CONFIG_MMIOTRACE | ||
24 | /* kmmio is active by some kmmio_probes? */ | 35 | /* kmmio is active by some kmmio_probes? */ |
25 | static inline int is_kmmio_active(void) | 36 | static inline int is_kmmio_active(void) |
26 | { | 37 | { |
27 | extern unsigned int kmmio_count; | ||
28 | return kmmio_count; | 38 | return kmmio_count; |
29 | } | 39 | } |
30 | 40 | ||
31 | extern int register_kmmio_probe(struct kmmio_probe *p); | ||
32 | extern void unregister_kmmio_probe(struct kmmio_probe *p); | ||
33 | |||
34 | /* Called from page fault handler. */ | 41 | /* Called from page fault handler. */ |
35 | extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); | 42 | extern int kmmio_handler(struct pt_regs *regs, unsigned long addr); |
36 | 43 | ||
37 | #ifdef CONFIG_MMIOTRACE | ||
38 | /* Called from ioremap.c */ | 44 | /* Called from ioremap.c */ |
39 | extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, | 45 | extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size, |
40 | void __iomem *addr); | 46 | void __iomem *addr); |
@@ -43,7 +49,17 @@ extern void mmiotrace_iounmap(volatile void __iomem *addr); | |||
43 | /* For anyone to insert markers. Remember trailing newline. */ | 49 | /* For anyone to insert markers. Remember trailing newline. */ |
44 | extern int mmiotrace_printk(const char *fmt, ...) | 50 | extern int mmiotrace_printk(const char *fmt, ...) |
45 | __attribute__ ((format (printf, 1, 2))); | 51 | __attribute__ ((format (printf, 1, 2))); |
46 | #else | 52 | #else /* !CONFIG_MMIOTRACE: */ |
53 | static inline int is_kmmio_active(void) | ||
54 | { | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | static inline int kmmio_handler(struct pt_regs *regs, unsigned long addr) | ||
59 | { | ||
60 | return 0; | ||
61 | } | ||
62 | |||
47 | static inline void mmiotrace_ioremap(resource_size_t offset, | 63 | static inline void mmiotrace_ioremap(resource_size_t offset, |
48 | unsigned long size, void __iomem *addr) | 64 | unsigned long size, void __iomem *addr) |
49 | { | 65 | { |
@@ -63,28 +79,28 @@ static inline int mmiotrace_printk(const char *fmt, ...) | |||
63 | #endif /* CONFIG_MMIOTRACE */ | 79 | #endif /* CONFIG_MMIOTRACE */ |
64 | 80 | ||
65 | enum mm_io_opcode { | 81 | enum mm_io_opcode { |
66 | MMIO_READ = 0x1, /* struct mmiotrace_rw */ | 82 | MMIO_READ = 0x1, /* struct mmiotrace_rw */ |
67 | MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ | 83 | MMIO_WRITE = 0x2, /* struct mmiotrace_rw */ |
68 | MMIO_PROBE = 0x3, /* struct mmiotrace_map */ | 84 | MMIO_PROBE = 0x3, /* struct mmiotrace_map */ |
69 | MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ | 85 | MMIO_UNPROBE = 0x4, /* struct mmiotrace_map */ |
70 | MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ | 86 | MMIO_UNKNOWN_OP = 0x5, /* struct mmiotrace_rw */ |
71 | }; | 87 | }; |
72 | 88 | ||
73 | struct mmiotrace_rw { | 89 | struct mmiotrace_rw { |
74 | resource_size_t phys; /* PCI address of register */ | 90 | resource_size_t phys; /* PCI address of register */ |
75 | unsigned long value; | 91 | unsigned long value; |
76 | unsigned long pc; /* optional program counter */ | 92 | unsigned long pc; /* optional program counter */ |
77 | int map_id; | 93 | int map_id; |
78 | unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ | 94 | unsigned char opcode; /* one of MMIO_{READ,WRITE,UNKNOWN_OP} */ |
79 | unsigned char width; /* size of register access in bytes */ | 95 | unsigned char width; /* size of register access in bytes */ |
80 | }; | 96 | }; |
81 | 97 | ||
82 | struct mmiotrace_map { | 98 | struct mmiotrace_map { |
83 | resource_size_t phys; /* base address in PCI space */ | 99 | resource_size_t phys; /* base address in PCI space */ |
84 | unsigned long virt; /* base virtual address */ | 100 | unsigned long virt; /* base virtual address */ |
85 | unsigned long len; /* mapping size */ | 101 | unsigned long len; /* mapping size */ |
86 | int map_id; | 102 | int map_id; |
87 | unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ | 103 | unsigned char opcode; /* MMIO_PROBE or MMIO_UNPROBE */ |
88 | }; | 104 | }; |
89 | 105 | ||
90 | /* in kernel/trace/trace_mmiotrace.c */ | 106 | /* in kernel/trace/trace_mmiotrace.c */ |
@@ -94,4 +110,4 @@ extern void mmio_trace_rw(struct mmiotrace_rw *rw); | |||
94 | extern void mmio_trace_mapping(struct mmiotrace_map *map); | 110 | extern void mmio_trace_mapping(struct mmiotrace_map *map); |
95 | extern int mmio_trace_printk(const char *fmt, va_list args); | 111 | extern int mmio_trace_printk(const char *fmt, va_list args); |
96 | 112 | ||
97 | #endif /* MMIOTRACE_H */ | 113 | #endif /* _LINUX_MMIOTRACE_H */ |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 1aca6cebbb78..e6aacf77986a 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -764,12 +764,6 @@ extern int numa_zonelist_order_handler(struct ctl_table *, int, | |||
764 | extern char numa_zonelist_order[]; | 764 | extern char numa_zonelist_order[]; |
765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ | 765 | #define NUMA_ZONELIST_ORDER_LEN 16 /* string buffer size */ |
766 | 766 | ||
767 | #include <linux/topology.h> | ||
768 | /* Returns the number of the current Node. */ | ||
769 | #ifndef numa_node_id | ||
770 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
771 | #endif | ||
772 | |||
773 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 767 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
774 | 768 | ||
775 | extern struct pglist_data contig_page_data; | 769 | extern struct pglist_data contig_page_data; |
diff --git a/include/linux/nubus.h b/include/linux/nubus.h index 7382af374731..e137b3c486a7 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h | |||
@@ -237,6 +237,7 @@ struct nubus_dirent | |||
237 | int mask; | 237 | int mask; |
238 | }; | 238 | }; |
239 | 239 | ||
240 | #ifdef __KERNEL__ | ||
240 | struct nubus_board { | 241 | struct nubus_board { |
241 | struct nubus_board* next; | 242 | struct nubus_board* next; |
242 | struct nubus_dev* first_dev; | 243 | struct nubus_dev* first_dev; |
@@ -351,6 +352,7 @@ void nubus_get_rsrc_mem(void* dest, | |||
351 | void nubus_get_rsrc_str(void* dest, | 352 | void nubus_get_rsrc_str(void* dest, |
352 | const struct nubus_dirent *dirent, | 353 | const struct nubus_dirent *dirent, |
353 | int maxlen); | 354 | int maxlen); |
355 | #endif /* __KERNEL__ */ | ||
354 | 356 | ||
355 | /* We'd like to get rid of this eventually. Only daynaport.c uses it now. */ | 357 | /* We'd like to get rid of this eventually. Only daynaport.c uses it now. */ |
356 | static inline void *nubus_slot_addr(int slot) | 358 | static inline void *nubus_slot_addr(int slot) |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 9f2a3751873a..54a968b4b924 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -5,53 +5,66 @@ | |||
5 | #include <linux/slab.h> /* For kmalloc() */ | 5 | #include <linux/slab.h> /* For kmalloc() */ |
6 | #include <linux/smp.h> | 6 | #include <linux/smp.h> |
7 | #include <linux/cpumask.h> | 7 | #include <linux/cpumask.h> |
8 | #include <linux/pfn.h> | ||
8 | 9 | ||
9 | #include <asm/percpu.h> | 10 | #include <asm/percpu.h> |
10 | 11 | ||
12 | #ifndef PER_CPU_BASE_SECTION | ||
13 | #ifdef CONFIG_SMP | ||
14 | #define PER_CPU_BASE_SECTION ".data.percpu" | ||
15 | #else | ||
16 | #define PER_CPU_BASE_SECTION ".data" | ||
17 | #endif | ||
18 | #endif | ||
19 | |||
11 | #ifdef CONFIG_SMP | 20 | #ifdef CONFIG_SMP |
12 | #define DEFINE_PER_CPU(type, name) \ | ||
13 | __attribute__((__section__(".data.percpu"))) \ | ||
14 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | ||
15 | 21 | ||
16 | #ifdef MODULE | 22 | #ifdef MODULE |
17 | #define SHARED_ALIGNED_SECTION ".data.percpu" | 23 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
18 | #else | 24 | #else |
19 | #define SHARED_ALIGNED_SECTION ".data.percpu.shared_aligned" | 25 | #define PER_CPU_SHARED_ALIGNED_SECTION ".shared_aligned" |
20 | #endif | 26 | #endif |
27 | #define PER_CPU_FIRST_SECTION ".first" | ||
21 | 28 | ||
22 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 29 | #else |
23 | __attribute__((__section__(SHARED_ALIGNED_SECTION))) \ | 30 | |
24 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name \ | 31 | #define PER_CPU_SHARED_ALIGNED_SECTION "" |
25 | ____cacheline_aligned_in_smp | 32 | #define PER_CPU_FIRST_SECTION "" |
26 | 33 | ||
27 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 34 | #endif |
28 | __attribute__((__section__(".data.percpu.page_aligned"))) \ | 35 | |
36 | #define DEFINE_PER_CPU_SECTION(type, name, section) \ | ||
37 | __attribute__((__section__(PER_CPU_BASE_SECTION section))) \ | ||
29 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 38 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name |
30 | #else | 39 | |
31 | #define DEFINE_PER_CPU(type, name) \ | 40 | #define DEFINE_PER_CPU(type, name) \ |
32 | PER_CPU_ATTRIBUTES __typeof__(type) per_cpu__##name | 41 | DEFINE_PER_CPU_SECTION(type, name, "") |
33 | 42 | ||
34 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ | 43 | #define DEFINE_PER_CPU_SHARED_ALIGNED(type, name) \ |
35 | DEFINE_PER_CPU(type, name) | 44 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_SHARED_ALIGNED_SECTION) \ |
45 | ____cacheline_aligned_in_smp | ||
36 | 46 | ||
37 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ | 47 | #define DEFINE_PER_CPU_PAGE_ALIGNED(type, name) \ |
38 | DEFINE_PER_CPU(type, name) | 48 | DEFINE_PER_CPU_SECTION(type, name, ".page_aligned") |
39 | #endif | 49 | |
50 | #define DEFINE_PER_CPU_FIRST(type, name) \ | ||
51 | DEFINE_PER_CPU_SECTION(type, name, PER_CPU_FIRST_SECTION) | ||
40 | 52 | ||
41 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) | 53 | #define EXPORT_PER_CPU_SYMBOL(var) EXPORT_SYMBOL(per_cpu__##var) |
42 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) | 54 | #define EXPORT_PER_CPU_SYMBOL_GPL(var) EXPORT_SYMBOL_GPL(per_cpu__##var) |
43 | 55 | ||
44 | /* Enough to cover all DEFINE_PER_CPUs in kernel, including modules. */ | 56 | /* enough to cover all DEFINE_PER_CPUs in modules */ |
45 | #ifndef PERCPU_ENOUGH_ROOM | ||
46 | #ifdef CONFIG_MODULES | 57 | #ifdef CONFIG_MODULES |
47 | #define PERCPU_MODULE_RESERVE 8192 | 58 | #define PERCPU_MODULE_RESERVE (8 << 10) |
48 | #else | 59 | #else |
49 | #define PERCPU_MODULE_RESERVE 0 | 60 | #define PERCPU_MODULE_RESERVE 0 |
50 | #endif | 61 | #endif |
51 | 62 | ||
63 | #ifndef PERCPU_ENOUGH_ROOM | ||
52 | #define PERCPU_ENOUGH_ROOM \ | 64 | #define PERCPU_ENOUGH_ROOM \ |
53 | (__per_cpu_end - __per_cpu_start + PERCPU_MODULE_RESERVE) | 65 | (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) + \ |
54 | #endif /* PERCPU_ENOUGH_ROOM */ | 66 | PERCPU_MODULE_RESERVE) |
67 | #endif | ||
55 | 68 | ||
56 | /* | 69 | /* |
57 | * Must be an lvalue. Since @var must be a simple identifier, | 70 | * Must be an lvalue. Since @var must be a simple identifier, |
@@ -65,52 +78,90 @@ | |||
65 | 78 | ||
66 | #ifdef CONFIG_SMP | 79 | #ifdef CONFIG_SMP |
67 | 80 | ||
81 | #ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA | ||
82 | |||
83 | /* minimum unit size, also is the maximum supported allocation size */ | ||
84 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | ||
85 | |||
86 | /* | ||
87 | * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy | ||
88 | * back on the first chunk for dynamic percpu allocation if arch is | ||
89 | * manually allocating and mapping it for faster access (as a part of | ||
90 | * large page mapping for example). | ||
91 | * | ||
92 | * The following values give between one and two pages of free space | ||
93 | * after typical minimal boot (2-way SMP, single disk and NIC) with | ||
94 | * both defconfig and a distro config on x86_64 and 32. More | ||
95 | * intelligent way to determine this would be nice. | ||
96 | */ | ||
97 | #if BITS_PER_LONG > 32 | ||
98 | #define PERCPU_DYNAMIC_RESERVE (20 << 10) | ||
99 | #else | ||
100 | #define PERCPU_DYNAMIC_RESERVE (12 << 10) | ||
101 | #endif | ||
102 | |||
103 | extern void *pcpu_base_addr; | ||
104 | |||
105 | typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno); | ||
106 | typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr); | ||
107 | |||
108 | extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn, | ||
109 | size_t static_size, size_t reserved_size, | ||
110 | ssize_t unit_size, ssize_t dyn_size, | ||
111 | void *base_addr, | ||
112 | pcpu_populate_pte_fn_t populate_pte_fn); | ||
113 | |||
114 | /* | ||
115 | * Use this to get to a cpu's version of the per-cpu object | ||
116 | * dynamically allocated. Non-atomic access to the current CPU's | ||
117 | * version should probably be combined with get_cpu()/put_cpu(). | ||
118 | */ | ||
119 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | ||
120 | |||
121 | extern void *__alloc_reserved_percpu(size_t size, size_t align); | ||
122 | |||
123 | #else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ | ||
124 | |||
68 | struct percpu_data { | 125 | struct percpu_data { |
69 | void *ptrs[1]; | 126 | void *ptrs[1]; |
70 | }; | 127 | }; |
71 | 128 | ||
72 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) | 129 | #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) |
73 | /* | 130 | |
74 | * Use this to get to a cpu's version of the per-cpu object dynamically | 131 | #define per_cpu_ptr(ptr, cpu) \ |
75 | * allocated. Non-atomic access to the current CPU's version should | 132 | ({ \ |
76 | * probably be combined with get_cpu()/put_cpu(). | 133 | struct percpu_data *__p = __percpu_disguise(ptr); \ |
77 | */ | 134 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ |
78 | #define percpu_ptr(ptr, cpu) \ | ||
79 | ({ \ | ||
80 | struct percpu_data *__p = __percpu_disguise(ptr); \ | ||
81 | (__typeof__(ptr))__p->ptrs[(cpu)]; \ | ||
82 | }) | 135 | }) |
83 | 136 | ||
84 | extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); | 137 | #endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */ |
85 | extern void percpu_free(void *__pdata); | 138 | |
139 | extern void *__alloc_percpu(size_t size, size_t align); | ||
140 | extern void free_percpu(void *__pdata); | ||
86 | 141 | ||
87 | #else /* CONFIG_SMP */ | 142 | #else /* CONFIG_SMP */ |
88 | 143 | ||
89 | #define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | 144 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) |
90 | 145 | ||
91 | static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) | 146 | static inline void *__alloc_percpu(size_t size, size_t align) |
92 | { | 147 | { |
93 | return kzalloc(size, gfp); | 148 | /* |
149 | * Can't easily make larger alignment work with kmalloc. WARN | ||
150 | * on it. Larger alignment should only be used for module | ||
151 | * percpu sections on SMP for which this path isn't used. | ||
152 | */ | ||
153 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
154 | return kzalloc(size, GFP_KERNEL); | ||
94 | } | 155 | } |
95 | 156 | ||
96 | static inline void percpu_free(void *__pdata) | 157 | static inline void free_percpu(void *p) |
97 | { | 158 | { |
98 | kfree(__pdata); | 159 | kfree(p); |
99 | } | 160 | } |
100 | 161 | ||
101 | #endif /* CONFIG_SMP */ | 162 | #endif /* CONFIG_SMP */ |
102 | 163 | ||
103 | #define percpu_alloc_mask(size, gfp, mask) \ | 164 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \ |
104 | __percpu_alloc_mask((size), (gfp), &(mask)) | 165 | __alignof__(type)) |
105 | |||
106 | #define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map) | ||
107 | |||
108 | /* (legacy) interface for use without CPU hotplug handling */ | ||
109 | |||
110 | #define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \ | ||
111 | cpu_possible_map) | ||
112 | #define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type)) | ||
113 | #define free_percpu(ptr) percpu_free((ptr)) | ||
114 | #define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu)) | ||
115 | 166 | ||
116 | #endif /* __LINUX_PERCPU_H */ | 167 | #endif /* __LINUX_PERCPU_H */ |
diff --git a/include/linux/reiserfs_fs.h b/include/linux/reiserfs_fs.h index bc5114d35e99..e356c99f0659 100644 --- a/include/linux/reiserfs_fs.h +++ b/include/linux/reiserfs_fs.h | |||
@@ -28,8 +28,6 @@ | |||
28 | #include <linux/reiserfs_fs_sb.h> | 28 | #include <linux/reiserfs_fs_sb.h> |
29 | #endif | 29 | #endif |
30 | 30 | ||
31 | struct fid; | ||
32 | |||
33 | /* | 31 | /* |
34 | * include/linux/reiser_fs.h | 32 | * include/linux/reiser_fs.h |
35 | * | 33 | * |
@@ -37,6 +35,33 @@ struct fid; | |||
37 | * | 35 | * |
38 | */ | 36 | */ |
39 | 37 | ||
38 | /* ioctl's command */ | ||
39 | #define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) | ||
40 | /* define following flags to be the same as in ext2, so that chattr(1), | ||
41 | lsattr(1) will work with us. */ | ||
42 | #define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS | ||
43 | #define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS | ||
44 | #define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION | ||
45 | #define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION | ||
46 | |||
47 | #ifdef __KERNEL__ | ||
48 | /* the 32 bit compat definitions with int argument */ | ||
49 | #define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) | ||
50 | #define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS | ||
51 | #define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS | ||
52 | #define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION | ||
53 | #define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION | ||
54 | |||
55 | /* Locking primitives */ | ||
56 | /* Right now we are still falling back to (un)lock_kernel, but eventually that | ||
57 | would evolve into real per-fs locks */ | ||
58 | #define reiserfs_write_lock( sb ) lock_kernel() | ||
59 | #define reiserfs_write_unlock( sb ) unlock_kernel() | ||
60 | |||
61 | /* xattr stuff */ | ||
62 | #define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem) | ||
63 | struct fid; | ||
64 | |||
40 | /* in reading the #defines, it may help to understand that they employ | 65 | /* in reading the #defines, it may help to understand that they employ |
41 | the following abbreviations: | 66 | the following abbreviations: |
42 | 67 | ||
@@ -698,6 +723,7 @@ static inline void cpu_key_k_offset_dec(struct cpu_key *key) | |||
698 | /* object identifier for root dir */ | 723 | /* object identifier for root dir */ |
699 | #define REISERFS_ROOT_OBJECTID 2 | 724 | #define REISERFS_ROOT_OBJECTID 2 |
700 | #define REISERFS_ROOT_PARENT_OBJECTID 1 | 725 | #define REISERFS_ROOT_PARENT_OBJECTID 1 |
726 | |||
701 | extern struct reiserfs_key root_key; | 727 | extern struct reiserfs_key root_key; |
702 | 728 | ||
703 | /* | 729 | /* |
@@ -1540,7 +1566,6 @@ struct reiserfs_iget_args { | |||
1540 | /* FUNCTION DECLARATIONS */ | 1566 | /* FUNCTION DECLARATIONS */ |
1541 | /***************************************************************************/ | 1567 | /***************************************************************************/ |
1542 | 1568 | ||
1543 | /*#ifdef __KERNEL__*/ | ||
1544 | #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12) | 1569 | #define get_journal_desc_magic(bh) (bh->b_data + bh->b_size - 12) |
1545 | 1570 | ||
1546 | #define journal_trans_half(blocksize) \ | 1571 | #define journal_trans_half(blocksize) \ |
@@ -2178,29 +2203,6 @@ long reiserfs_compat_ioctl(struct file *filp, | |||
2178 | unsigned int cmd, unsigned long arg); | 2203 | unsigned int cmd, unsigned long arg); |
2179 | int reiserfs_unpack(struct inode *inode, struct file *filp); | 2204 | int reiserfs_unpack(struct inode *inode, struct file *filp); |
2180 | 2205 | ||
2181 | /* ioctl's command */ | ||
2182 | #define REISERFS_IOC_UNPACK _IOW(0xCD,1,long) | ||
2183 | /* define following flags to be the same as in ext2, so that chattr(1), | ||
2184 | lsattr(1) will work with us. */ | ||
2185 | #define REISERFS_IOC_GETFLAGS FS_IOC_GETFLAGS | ||
2186 | #define REISERFS_IOC_SETFLAGS FS_IOC_SETFLAGS | ||
2187 | #define REISERFS_IOC_GETVERSION FS_IOC_GETVERSION | ||
2188 | #define REISERFS_IOC_SETVERSION FS_IOC_SETVERSION | ||
2189 | |||
2190 | /* the 32 bit compat definitions with int argument */ | ||
2191 | #define REISERFS_IOC32_UNPACK _IOW(0xCD, 1, int) | ||
2192 | #define REISERFS_IOC32_GETFLAGS FS_IOC32_GETFLAGS | ||
2193 | #define REISERFS_IOC32_SETFLAGS FS_IOC32_SETFLAGS | ||
2194 | #define REISERFS_IOC32_GETVERSION FS_IOC32_GETVERSION | ||
2195 | #define REISERFS_IOC32_SETVERSION FS_IOC32_SETVERSION | ||
2196 | |||
2197 | /* Locking primitives */ | ||
2198 | /* Right now we are still falling back to (un)lock_kernel, but eventually that | ||
2199 | would evolve into real per-fs locks */ | ||
2200 | #define reiserfs_write_lock( sb ) lock_kernel() | ||
2201 | #define reiserfs_write_unlock( sb ) unlock_kernel() | ||
2202 | |||
2203 | /* xattr stuff */ | ||
2204 | #define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem) | ||
2205 | 2206 | ||
2207 | #endif /* __KERNEL__ */ | ||
2206 | #endif /* _LINUX_REISER_FS_H */ | 2208 | #endif /* _LINUX_REISER_FS_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 011db2f4c94c..46d680643f89 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1178,10 +1178,9 @@ struct task_struct { | |||
1178 | pid_t pid; | 1178 | pid_t pid; |
1179 | pid_t tgid; | 1179 | pid_t tgid; |
1180 | 1180 | ||
1181 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
1182 | /* Canary value for the -fstack-protector gcc feature */ | 1181 | /* Canary value for the -fstack-protector gcc feature */ |
1183 | unsigned long stack_canary; | 1182 | unsigned long stack_canary; |
1184 | #endif | 1183 | |
1185 | /* | 1184 | /* |
1186 | * pointers to (original) parent process, youngest child, younger sibling, | 1185 | * pointers to (original) parent process, youngest child, younger sibling, |
1187 | * older sibling, respectively. (p->father can be replaced with | 1186 | * older sibling, respectively. (p->father can be replaced with |
@@ -2090,6 +2089,19 @@ static inline int object_is_on_stack(void *obj) | |||
2090 | 2089 | ||
2091 | extern void thread_info_cache_init(void); | 2090 | extern void thread_info_cache_init(void); |
2092 | 2091 | ||
2092 | #ifdef CONFIG_DEBUG_STACK_USAGE | ||
2093 | static inline unsigned long stack_not_used(struct task_struct *p) | ||
2094 | { | ||
2095 | unsigned long *n = end_of_stack(p); | ||
2096 | |||
2097 | do { /* Skip over canary */ | ||
2098 | n++; | ||
2099 | } while (!*n); | ||
2100 | |||
2101 | return (unsigned long)n - (unsigned long)end_of_stack(p); | ||
2102 | } | ||
2103 | #endif | ||
2104 | |||
2093 | /* set thread flags in other task's structures | 2105 | /* set thread flags in other task's structures |
2094 | * - see asm/thread_info.h for TIF_xxxx flags available | 2106 | * - see asm/thread_info.h for TIF_xxxx flags available |
2095 | */ | 2107 | */ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 715196b09d67..bbacb7baa446 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -176,6 +176,12 @@ static inline void init_call_single_data(void) | |||
176 | #define put_cpu() preempt_enable() | 176 | #define put_cpu() preempt_enable() |
177 | #define put_cpu_no_resched() preempt_enable_no_resched() | 177 | #define put_cpu_no_resched() preempt_enable_no_resched() |
178 | 178 | ||
179 | /* | ||
180 | * Callback to arch code if there's nosmp or maxcpus=0 on the | ||
181 | * boot command line: | ||
182 | */ | ||
183 | extern void arch_disable_smp_support(void); | ||
184 | |||
179 | void smp_setup_processor_id(void); | 185 | void smp_setup_processor_id(void); |
180 | 186 | ||
181 | #endif /* __LINUX_SMP_H */ | 187 | #endif /* __LINUX_SMP_H */ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index 20fc4bbfca42..afc01909a428 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -24,10 +24,12 @@ struct __kernel_sockaddr_storage { | |||
24 | #include <linux/types.h> /* pid_t */ | 24 | #include <linux/types.h> /* pid_t */ |
25 | #include <linux/compiler.h> /* __user */ | 25 | #include <linux/compiler.h> /* __user */ |
26 | 26 | ||
27 | #ifdef CONFIG_PROC_FS | 27 | #ifdef __KERNEL__ |
28 | # ifdef CONFIG_PROC_FS | ||
28 | struct seq_file; | 29 | struct seq_file; |
29 | extern void socket_seq_show(struct seq_file *seq); | 30 | extern void socket_seq_show(struct seq_file *seq); |
30 | #endif | 31 | # endif |
32 | #endif /* __KERNEL__ */ | ||
31 | 33 | ||
32 | typedef unsigned short sa_family_t; | 34 | typedef unsigned short sa_family_t; |
33 | 35 | ||
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h new file mode 100644 index 000000000000..6f3e54c704c0 --- /dev/null +++ b/include/linux/stackprotector.h | |||
@@ -0,0 +1,16 @@ | |||
1 | #ifndef _LINUX_STACKPROTECTOR_H | ||
2 | #define _LINUX_STACKPROTECTOR_H 1 | ||
3 | |||
4 | #include <linux/compiler.h> | ||
5 | #include <linux/sched.h> | ||
6 | #include <linux/random.h> | ||
7 | |||
8 | #ifdef CONFIG_CC_STACKPROTECTOR | ||
9 | # include <asm/stackprotector.h> | ||
10 | #else | ||
11 | static inline void boot_init_stack_canary(void) | ||
12 | { | ||
13 | } | ||
14 | #endif | ||
15 | |||
16 | #endif | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index e632d29f0544..7402c1a27c4f 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -38,11 +38,7 @@ | |||
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | #ifndef nr_cpus_node | 40 | #ifndef nr_cpus_node |
41 | #define nr_cpus_node(node) \ | 41 | #define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) |
42 | ({ \ | ||
43 | node_to_cpumask_ptr(__tmp__, node); \ | ||
44 | cpus_weight(*__tmp__); \ | ||
45 | }) | ||
46 | #endif | 42 | #endif |
47 | 43 | ||
48 | #define for_each_node_with_cpus(node) \ | 44 | #define for_each_node_with_cpus(node) \ |
@@ -193,5 +189,16 @@ int arch_update_cpu_topology(void); | |||
193 | #ifndef topology_core_siblings | 189 | #ifndef topology_core_siblings |
194 | #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) | 190 | #define topology_core_siblings(cpu) cpumask_of_cpu(cpu) |
195 | #endif | 191 | #endif |
192 | #ifndef topology_thread_cpumask | ||
193 | #define topology_thread_cpumask(cpu) cpumask_of(cpu) | ||
194 | #endif | ||
195 | #ifndef topology_core_cpumask | ||
196 | #define topology_core_cpumask(cpu) cpumask_of(cpu) | ||
197 | #endif | ||
198 | |||
199 | /* Returns the number of the current Node. */ | ||
200 | #ifndef numa_node_id | ||
201 | #define numa_node_id() (cpu_to_node(raw_smp_processor_id())) | ||
202 | #endif | ||
196 | 203 | ||
197 | #endif /* _LINUX_TOPOLOGY_H */ | 204 | #endif /* _LINUX_TOPOLOGY_H */ |
diff --git a/include/linux/types.h b/include/linux/types.h index 712ca53bc348..fca82ed55f49 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -1,6 +1,9 @@ | |||
1 | #ifndef _LINUX_TYPES_H | 1 | #ifndef _LINUX_TYPES_H |
2 | #define _LINUX_TYPES_H | 2 | #define _LINUX_TYPES_H |
3 | 3 | ||
4 | #include <asm/types.h> | ||
5 | |||
6 | #ifndef __ASSEMBLY__ | ||
4 | #ifdef __KERNEL__ | 7 | #ifdef __KERNEL__ |
5 | 8 | ||
6 | #define DECLARE_BITMAP(name,bits) \ | 9 | #define DECLARE_BITMAP(name,bits) \ |
@@ -9,7 +12,6 @@ | |||
9 | #endif | 12 | #endif |
10 | 13 | ||
11 | #include <linux/posix_types.h> | 14 | #include <linux/posix_types.h> |
12 | #include <asm/types.h> | ||
13 | 15 | ||
14 | #ifndef __KERNEL_STRICT_NAMES | 16 | #ifndef __KERNEL_STRICT_NAMES |
15 | 17 | ||
@@ -212,5 +214,5 @@ struct ustat { | |||
212 | }; | 214 | }; |
213 | 215 | ||
214 | #endif /* __KERNEL__ */ | 216 | #endif /* __KERNEL__ */ |
215 | 217 | #endif /* __ASSEMBLY__ */ | |
216 | #endif /* _LINUX_TYPES_H */ | 218 | #endif /* _LINUX_TYPES_H */ |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 9c0890c7a06a..a43ebec3a7b9 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr); | |||
95 | 95 | ||
96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, | 96 | extern int map_vm_area(struct vm_struct *area, pgprot_t prot, |
97 | struct page ***pages); | 97 | struct page ***pages); |
98 | extern int map_kernel_range_noflush(unsigned long start, unsigned long size, | ||
99 | pgprot_t prot, struct page **pages); | ||
100 | extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size); | ||
98 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); | 101 | extern void unmap_kernel_range(unsigned long addr, unsigned long size); |
99 | 102 | ||
100 | /* Allocate/destroy a 'vmalloc' VM area. */ | 103 | /* Allocate/destroy a 'vmalloc' VM area. */ |
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count); | |||
110 | */ | 113 | */ |
111 | extern rwlock_t vmlist_lock; | 114 | extern rwlock_t vmlist_lock; |
112 | extern struct vm_struct *vmlist; | 115 | extern struct vm_struct *vmlist; |
116 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | ||
113 | 117 | ||
114 | #endif /* _LINUX_VMALLOC_H */ | 118 | #endif /* _LINUX_VMALLOC_H */ |