aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/audit_change_attr.h4
-rw-r--r--include/asm-generic/audit_dir_write.h14
-rw-r--r--include/asm-generic/audit_read.h5
-rw-r--r--include/asm-generic/audit_write.h2
-rw-r--r--include/asm-generic/bitops/find.h4
-rw-r--r--include/asm-generic/bitops/le.h7
-rw-r--r--include/asm-generic/bug.h37
-rw-r--r--include/asm-generic/cacheflush.h5
-rw-r--r--include/asm-generic/pgtable.h12
-rw-r--r--include/asm-generic/ptrace.h74
-rw-r--r--include/asm-generic/resource.h2
-rw-r--r--include/asm-generic/tlb.h156
-rw-r--r--include/asm-generic/unistd.h221
-rw-r--r--include/asm-generic/vmlinux.lds.h95
-rw-r--r--include/asm-generic/xor.h2
15 files changed, 413 insertions, 227 deletions
diff --git a/include/asm-generic/audit_change_attr.h b/include/asm-generic/audit_change_attr.h
index bcbab3e4a3b..89b73e5d0fd 100644
--- a/include/asm-generic/audit_change_attr.h
+++ b/include/asm-generic/audit_change_attr.h
@@ -1,4 +1,6 @@
1#ifdef __NR_chmod
1__NR_chmod, 2__NR_chmod,
3#endif
2__NR_fchmod, 4__NR_fchmod,
3#ifdef __NR_chown 5#ifdef __NR_chown
4__NR_chown, 6__NR_chown,
@@ -20,7 +22,9 @@ __NR_chown32,
20__NR_fchown32, 22__NR_fchown32,
21__NR_lchown32, 23__NR_lchown32,
22#endif 24#endif
25#ifdef __NR_link
23__NR_link, 26__NR_link,
27#endif
24#ifdef __NR_linkat 28#ifdef __NR_linkat
25__NR_linkat, 29__NR_linkat,
26#endif 30#endif
diff --git a/include/asm-generic/audit_dir_write.h b/include/asm-generic/audit_dir_write.h
index 6621bd82cbe..7b61db4fe72 100644
--- a/include/asm-generic/audit_dir_write.h
+++ b/include/asm-generic/audit_dir_write.h
@@ -1,13 +1,27 @@
1#ifdef __NR_rename
1__NR_rename, 2__NR_rename,
3#endif
4#ifdef __NR_mkdir
2__NR_mkdir, 5__NR_mkdir,
6#endif
7#ifdef __NR_rmdir
3__NR_rmdir, 8__NR_rmdir,
9#endif
4#ifdef __NR_creat 10#ifdef __NR_creat
5__NR_creat, 11__NR_creat,
6#endif 12#endif
13#ifdef __NR_link
7__NR_link, 14__NR_link,
15#endif
16#ifdef __NR_unlink
8__NR_unlink, 17__NR_unlink,
18#endif
19#ifdef __NR_symlink
9__NR_symlink, 20__NR_symlink,
21#endif
22#ifdef __NR_mknod
10__NR_mknod, 23__NR_mknod,
24#endif
11#ifdef __NR_mkdirat 25#ifdef __NR_mkdirat
12__NR_mkdirat, 26__NR_mkdirat,
13__NR_mknodat, 27__NR_mknodat,
diff --git a/include/asm-generic/audit_read.h b/include/asm-generic/audit_read.h
index 0e87464d984..3b249cb857d 100644
--- a/include/asm-generic/audit_read.h
+++ b/include/asm-generic/audit_read.h
@@ -1,4 +1,6 @@
1#ifdef __NR_readlink
1__NR_readlink, 2__NR_readlink,
3#endif
2__NR_quotactl, 4__NR_quotactl,
3__NR_listxattr, 5__NR_listxattr,
4__NR_llistxattr, 6__NR_llistxattr,
@@ -6,3 +8,6 @@ __NR_flistxattr,
6__NR_getxattr, 8__NR_getxattr,
7__NR_lgetxattr, 9__NR_lgetxattr,
8__NR_fgetxattr, 10__NR_fgetxattr,
11#ifdef __NR_readlinkat
12__NR_readlinkat,
13#endif
diff --git a/include/asm-generic/audit_write.h b/include/asm-generic/audit_write.h
index c5f1c2c920e..e7020c57b13 100644
--- a/include/asm-generic/audit_write.h
+++ b/include/asm-generic/audit_write.h
@@ -4,7 +4,9 @@ __NR_acct,
4__NR_swapon, 4__NR_swapon,
5#endif 5#endif
6__NR_quotactl, 6__NR_quotactl,
7#ifdef __NR_truncate
7__NR_truncate, 8__NR_truncate,
9#endif
8#ifdef __NR_truncate64 10#ifdef __NR_truncate64
9__NR_truncate64, 11__NR_truncate64,
10#endif 12#endif
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 110fa700f85..71c778033f5 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_GENERIC_BITOPS_FIND_H_ 1#ifndef _ASM_GENERIC_BITOPS_FIND_H_
2#define _ASM_GENERIC_BITOPS_FIND_H_ 2#define _ASM_GENERIC_BITOPS_FIND_H_
3 3
4#ifndef find_next_bit
4/** 5/**
5 * find_next_bit - find the next set bit in a memory region 6 * find_next_bit - find the next set bit in a memory region
6 * @addr: The address to base the search on 7 * @addr: The address to base the search on
@@ -9,7 +10,9 @@
9 */ 10 */
10extern unsigned long find_next_bit(const unsigned long *addr, unsigned long 11extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
11 size, unsigned long offset); 12 size, unsigned long offset);
13#endif
12 14
15#ifndef find_next_zero_bit
13/** 16/**
14 * find_next_zero_bit - find the next cleared bit in a memory region 17 * find_next_zero_bit - find the next cleared bit in a memory region
15 * @addr: The address to base the search on 18 * @addr: The address to base the search on
@@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long
18 */ 21 */
19extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned 22extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned
20 long size, unsigned long offset); 23 long size, unsigned long offset);
24#endif
21 25
22#ifdef CONFIG_GENERIC_FIND_FIRST_BIT 26#ifdef CONFIG_GENERIC_FIND_FIRST_BIT
23 27
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h
index 946a21b1b5d..f95c663a6a4 100644
--- a/include/asm-generic/bitops/le.h
+++ b/include/asm-generic/bitops/le.h
@@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr,
30 30
31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) 31#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
32 32
33#ifndef find_next_zero_bit_le
33extern unsigned long find_next_zero_bit_le(const void *addr, 34extern unsigned long find_next_zero_bit_le(const void *addr,
34 unsigned long size, unsigned long offset); 35 unsigned long size, unsigned long offset);
36#endif
37
38#ifndef find_next_bit_le
35extern unsigned long find_next_bit_le(const void *addr, 39extern unsigned long find_next_bit_le(const void *addr,
36 unsigned long size, unsigned long offset); 40 unsigned long size, unsigned long offset);
41#endif
37 42
43#ifndef find_first_zero_bit_le
38#define find_first_zero_bit_le(addr, size) \ 44#define find_first_zero_bit_le(addr, size) \
39 find_next_zero_bit_le((addr), (size), 0) 45 find_next_zero_bit_le((addr), (size), 0)
46#endif
40 47
41#else 48#else
42#error "Please fix <asm/byteorder.h>" 49#error "Please fix <asm/byteorder.h>"
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index e5a3f588000..91784841e40 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -162,9 +162,46 @@ extern void warn_slowpath_null(const char *file, const int line);
162 unlikely(__ret_warn_once); \ 162 unlikely(__ret_warn_once); \
163}) 163})
164 164
165#ifdef CONFIG_PRINTK
166
165#define WARN_ON_RATELIMIT(condition, state) \ 167#define WARN_ON_RATELIMIT(condition, state) \
166 WARN_ON((condition) && __ratelimit(state)) 168 WARN_ON((condition) && __ratelimit(state))
167 169
170#define __WARN_RATELIMIT(condition, state, format...) \
171({ \
172 int rtn = 0; \
173 if (unlikely(__ratelimit(state))) \
174 rtn = WARN(condition, format); \
175 rtn; \
176})
177
178#define WARN_RATELIMIT(condition, format...) \
179({ \
180 static DEFINE_RATELIMIT_STATE(_rs, \
181 DEFAULT_RATELIMIT_INTERVAL, \
182 DEFAULT_RATELIMIT_BURST); \
183 __WARN_RATELIMIT(condition, &_rs, format); \
184})
185
186#else
187
188#define WARN_ON_RATELIMIT(condition, state) \
189 WARN_ON(condition)
190
191#define __WARN_RATELIMIT(condition, state, format...) \
192({ \
193 int rtn = WARN(condition, format); \
194 rtn; \
195})
196
197#define WARN_RATELIMIT(condition, format...) \
198({ \
199 int rtn = WARN(condition, format); \
200 rtn; \
201})
202
203#endif
204
168/* 205/*
169 * WARN_ON_SMP() is for cases that the warning is either 206 * WARN_ON_SMP() is for cases that the warning is either
170 * meaningless for !SMP or may even cause failures. 207 * meaningless for !SMP or may even cause failures.
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index 57b5c3c82e8..87bc536ccde 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -24,7 +24,10 @@
24#define flush_cache_vunmap(start, end) do { } while (0) 24#define flush_cache_vunmap(start, end) do { } while (0)
25 25
26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ 26#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
27 memcpy(dst, src, len) 27 do { \
28 memcpy(dst, src, len); \
29 flush_icache_user_range(vma, page, vaddr, len); \
30 } while (0)
28#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ 31#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
29 memcpy(dst, src, len) 32 memcpy(dst, src, len)
30 33
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index b4bfe338ea0..e9b8e5926be 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 184#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
185#endif 185#endif
186 186
187#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY 187#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
188#define page_test_dirty(page) (0) 188#define page_test_and_clear_dirty(pfn, mapped) (0)
189#endif 189#endif
190 190
191#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY 191#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
192#define page_clear_dirty(page, mapped) do { } while (0)
193#endif
194
195#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
196#define pte_maybe_dirty(pte) pte_dirty(pte) 192#define pte_maybe_dirty(pte) pte_dirty(pte)
197#else 193#else
198#define pte_maybe_dirty(pte) (1) 194#define pte_maybe_dirty(pte) (1)
199#endif 195#endif
200 196
201#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG 197#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
202#define page_test_and_clear_young(page) (0) 198#define page_test_and_clear_young(pfn) (0)
203#endif 199#endif
204 200
205#ifndef __HAVE_ARCH_PGD_OFFSET_GATE 201#ifndef __HAVE_ARCH_PGD_OFFSET_GATE
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h
new file mode 100644
index 00000000000..82e674f6b33
--- /dev/null
+++ b/include/asm-generic/ptrace.h
@@ -0,0 +1,74 @@
1/*
2 * Common low level (register) ptrace helpers
3 *
4 * Copyright 2004-2011 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#ifndef __ASM_GENERIC_PTRACE_H__
10#define __ASM_GENERIC_PTRACE_H__
11
12#ifndef __ASSEMBLY__
13
14/* Helpers for working with the instruction pointer */
15#ifndef GET_IP
16#define GET_IP(regs) ((regs)->pc)
17#endif
18#ifndef SET_IP
19#define SET_IP(regs, val) (GET_IP(regs) = (val))
20#endif
21
22static inline unsigned long instruction_pointer(struct pt_regs *regs)
23{
24 return GET_IP(regs);
25}
26static inline void instruction_pointer_set(struct pt_regs *regs,
27 unsigned long val)
28{
29 SET_IP(regs, val);
30}
31
32#ifndef profile_pc
33#define profile_pc(regs) instruction_pointer(regs)
34#endif
35
36/* Helpers for working with the user stack pointer */
37#ifndef GET_USP
38#define GET_USP(regs) ((regs)->usp)
39#endif
40#ifndef SET_USP
41#define SET_USP(regs, val) (GET_USP(regs) = (val))
42#endif
43
44static inline unsigned long user_stack_pointer(struct pt_regs *regs)
45{
46 return GET_USP(regs);
47}
48static inline void user_stack_pointer_set(struct pt_regs *regs,
49 unsigned long val)
50{
51 SET_USP(regs, val);
52}
53
54/* Helpers for working with the frame pointer */
55#ifndef GET_FP
56#define GET_FP(regs) ((regs)->fp)
57#endif
58#ifndef SET_FP
59#define SET_FP(regs, val) (GET_FP(regs) = (val))
60#endif
61
62static inline unsigned long frame_pointer(struct pt_regs *regs)
63{
64 return GET_FP(regs);
65}
66static inline void frame_pointer_set(struct pt_regs *regs,
67 unsigned long val)
68{
69 SET_FP(regs, val);
70}
71
72#endif /* __ASSEMBLY__ */
73
74#endif
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h
index 587566f95f6..61fa862fe08 100644
--- a/include/asm-generic/resource.h
+++ b/include/asm-generic/resource.h
@@ -78,7 +78,7 @@
78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \ 78 [RLIMIT_CORE] = { 0, RLIM_INFINITY }, \
79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 79 [RLIMIT_RSS] = { RLIM_INFINITY, RLIM_INFINITY }, \
80 [RLIMIT_NPROC] = { 0, 0 }, \ 80 [RLIMIT_NPROC] = { 0, 0 }, \
81 [RLIMIT_NOFILE] = { INR_OPEN, INR_OPEN }, \ 81 [RLIMIT_NOFILE] = { INR_OPEN_CUR, INR_OPEN_MAX }, \
82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \ 82 [RLIMIT_MEMLOCK] = { MLOCK_LIMIT, MLOCK_LIMIT }, \
83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 83 [RLIMIT_AS] = { RLIM_INFINITY, RLIM_INFINITY }, \
84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \ 84 [RLIMIT_LOCKS] = { RLIM_INFINITY, RLIM_INFINITY }, \
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index e43f9766259..e58fa777fa0 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -5,6 +5,8 @@
5 * Copyright 2001 Red Hat, Inc. 5 * Copyright 2001 Red Hat, Inc.
6 * Based on code from mm/memory.c Copyright Linus Torvalds and others. 6 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
7 * 7 *
8 * Copyright 2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
9 *
8 * This program is free software; you can redistribute it and/or 10 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License 11 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version 12 * as published by the Free Software Foundation; either version
@@ -17,97 +19,111 @@
17#include <asm/pgalloc.h> 19#include <asm/pgalloc.h>
18#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
19 21
22#ifdef CONFIG_HAVE_RCU_TABLE_FREE
20/* 23/*
21 * For UP we don't need to worry about TLB flush 24 * Semi RCU freeing of the page directories.
22 * and page free order so much.. 25 *
26 * This is needed by some architectures to implement software pagetable walkers.
27 *
28 * gup_fast() and other software pagetable walkers do a lockless page-table
29 * walk and therefore needs some synchronization with the freeing of the page
30 * directories. The chosen means to accomplish that is by disabling IRQs over
31 * the walk.
32 *
33 * Architectures that use IPIs to flush TLBs will then automagically DTRT,
34 * since we unlink the page, flush TLBs, free the page. Since the disabling of
35 * IRQs delays the completion of the TLB flush we can never observe an already
36 * freed page.
37 *
38 * Architectures that do not have this (PPC) need to delay the freeing by some
39 * other means, this is that means.
40 *
41 * What we do is batch the freed directory pages (tables) and RCU free them.
42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
43 * holds off grace periods.
44 *
45 * However, in order to batch these pages we need to allocate storage, this
46 * allocation is deep inside the MM code and can thus easily fail on memory
47 * pressure. To guarantee progress we fall back to single table freeing, see
48 * the implementation of tlb_remove_table_one().
49 *
23 */ 50 */
24#ifdef CONFIG_SMP 51struct mmu_table_batch {
25 #ifdef ARCH_FREE_PTR_NR 52 struct rcu_head rcu;
26 #define FREE_PTR_NR ARCH_FREE_PTR_NR 53 unsigned int nr;
27 #else 54 void *tables[0];
28 #define FREE_PTE_NR 506 55};
29 #endif 56
30 #define tlb_fast_mode(tlb) ((tlb)->nr == ~0U) 57#define MAX_TABLE_BATCH \
31#else 58 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
32 #define FREE_PTE_NR 1 59
33 #define tlb_fast_mode(tlb) 1 60extern void tlb_table_flush(struct mmu_gather *tlb);
61extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
62
34#endif 63#endif
35 64
36/* struct mmu_gather is an opaque type used by the mm code for passing around 65/*
37 * any data needed by arch specific code for tlb_remove_page. 66 * If we can't allocate a page to make a big batch of page pointers
67 * to work on, then just handle a few from the on-stack structure.
38 */ 68 */
39struct mmu_gather { 69#define MMU_GATHER_BUNDLE 8
40 struct mm_struct *mm; 70
41 unsigned int nr; /* set to ~0U means fast mode */ 71struct mmu_gather_batch {
42 unsigned int need_flush;/* Really unmapped some ptes? */ 72 struct mmu_gather_batch *next;
43 unsigned int fullmm; /* non-zero means full mm flush */ 73 unsigned int nr;
44 struct page * pages[FREE_PTE_NR]; 74 unsigned int max;
75 struct page *pages[0];
45}; 76};
46 77
47/* Users of the generic TLB shootdown code must declare this storage space. */ 78#define MAX_GATHER_BATCH \
48DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 79 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
49 80
50/* tlb_gather_mmu 81/* struct mmu_gather is an opaque type used by the mm code for passing around
51 * Return a pointer to an initialized struct mmu_gather. 82 * any data needed by arch specific code for tlb_remove_page.
52 */ 83 */
53static inline struct mmu_gather * 84struct mmu_gather {
54tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 85 struct mm_struct *mm;
55{ 86#ifdef CONFIG_HAVE_RCU_TABLE_FREE
56 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 87 struct mmu_table_batch *batch;
57 88#endif
58 tlb->mm = mm; 89 unsigned int need_flush : 1, /* Did free PTEs */
90 fast_mode : 1; /* No batching */
59 91
60 /* Use fast mode if only one CPU is online */ 92 unsigned int fullmm;
61 tlb->nr = num_online_cpus() > 1 ? 0U : ~0U;
62 93
63 tlb->fullmm = full_mm_flush; 94 struct mmu_gather_batch *active;
95 struct mmu_gather_batch local;
96 struct page *__pages[MMU_GATHER_BUNDLE];
97};
64 98
65 return tlb; 99#define HAVE_GENERIC_MMU_GATHER
66}
67 100
68static inline void 101static inline int tlb_fast_mode(struct mmu_gather *tlb)
69tlb_flush_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
70{ 102{
71 if (!tlb->need_flush) 103#ifdef CONFIG_SMP
72 return; 104 return tlb->fast_mode;
73 tlb->need_flush = 0; 105#else
74 tlb_flush(tlb); 106 /*
75 if (!tlb_fast_mode(tlb)) { 107 * For UP we don't need to worry about TLB flush
76 free_pages_and_swap_cache(tlb->pages, tlb->nr); 108 * and page free order so much..
77 tlb->nr = 0; 109 */
78 } 110 return 1;
111#endif
79} 112}
80 113
81/* tlb_finish_mmu 114void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, bool fullmm);
82 * Called at the end of the shootdown operation to free up any resources 115void tlb_flush_mmu(struct mmu_gather *tlb);
83 * that were required. 116void tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end);
84 */ 117int __tlb_remove_page(struct mmu_gather *tlb, struct page *page);
85static inline void
86tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
87{
88 tlb_flush_mmu(tlb, start, end);
89
90 /* keep the page table cache within bounds */
91 check_pgt_cache();
92
93 put_cpu_var(mmu_gathers);
94}
95 118
96/* tlb_remove_page 119/* tlb_remove_page
97 * Must perform the equivalent to __free_pte(pte_get_and_clear(ptep)), while 120 * Similar to __tlb_remove_page but will call tlb_flush_mmu() itself when
98 * handling the additional races in SMP caused by other CPUs caching valid 121 * required.
99 * mappings in their TLBs.
100 */ 122 */
101static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 123static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
102{ 124{
103 tlb->need_flush = 1; 125 if (!__tlb_remove_page(tlb, page))
104 if (tlb_fast_mode(tlb)) { 126 tlb_flush_mmu(tlb);
105 free_page_and_swap_cache(page);
106 return;
107 }
108 tlb->pages[tlb->nr++] = page;
109 if (tlb->nr >= FREE_PTE_NR)
110 tlb_flush_mmu(tlb, 0, 0);
111} 127}
112 128
113/** 129/**
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
index 07c40d5149d..33d52470488 100644
--- a/include/asm-generic/unistd.h
+++ b/include/asm-generic/unistd.h
@@ -24,16 +24,24 @@
24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64) 24#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
25#endif 25#endif
26 26
27#ifdef __SYSCALL_COMPAT
28#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _comp)
29#define __SC_COMP_3264(_nr, _32, _64, _comp) __SYSCALL(_nr, _comp)
30#else
31#define __SC_COMP(_nr, _sys, _comp) __SYSCALL(_nr, _sys)
32#define __SC_COMP_3264(_nr, _32, _64, _comp) __SC_3264(_nr, _32, _64)
33#endif
34
27#define __NR_io_setup 0 35#define __NR_io_setup 0
28__SYSCALL(__NR_io_setup, sys_io_setup) 36__SC_COMP(__NR_io_setup, sys_io_setup, compat_sys_io_setup)
29#define __NR_io_destroy 1 37#define __NR_io_destroy 1
30__SYSCALL(__NR_io_destroy, sys_io_destroy) 38__SYSCALL(__NR_io_destroy, sys_io_destroy)
31#define __NR_io_submit 2 39#define __NR_io_submit 2
32__SYSCALL(__NR_io_submit, sys_io_submit) 40__SC_COMP(__NR_io_submit, sys_io_submit, compat_sys_io_submit)
33#define __NR_io_cancel 3 41#define __NR_io_cancel 3
34__SYSCALL(__NR_io_cancel, sys_io_cancel) 42__SYSCALL(__NR_io_cancel, sys_io_cancel)
35#define __NR_io_getevents 4 43#define __NR_io_getevents 4
36__SYSCALL(__NR_io_getevents, sys_io_getevents) 44__SC_COMP(__NR_io_getevents, sys_io_getevents, compat_sys_io_getevents)
37 45
38/* fs/xattr.c */ 46/* fs/xattr.c */
39#define __NR_setxattr 5 47#define __NR_setxattr 5
@@ -67,7 +75,7 @@ __SYSCALL(__NR_getcwd, sys_getcwd)
67 75
68/* fs/cookies.c */ 76/* fs/cookies.c */
69#define __NR_lookup_dcookie 18 77#define __NR_lookup_dcookie 18
70__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie) 78__SC_COMP(__NR_lookup_dcookie, sys_lookup_dcookie, compat_sys_lookup_dcookie)
71 79
72/* fs/eventfd.c */ 80/* fs/eventfd.c */
73#define __NR_eventfd2 19 81#define __NR_eventfd2 19
@@ -79,7 +87,7 @@ __SYSCALL(__NR_epoll_create1, sys_epoll_create1)
79#define __NR_epoll_ctl 21 87#define __NR_epoll_ctl 21
80__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl) 88__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
81#define __NR_epoll_pwait 22 89#define __NR_epoll_pwait 22
82__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait) 90__SC_COMP(__NR_epoll_pwait, sys_epoll_pwait, compat_sys_epoll_pwait)
83 91
84/* fs/fcntl.c */ 92/* fs/fcntl.c */
85#define __NR_dup 23 93#define __NR_dup 23
@@ -87,7 +95,7 @@ __SYSCALL(__NR_dup, sys_dup)
87#define __NR_dup3 24 95#define __NR_dup3 24
88__SYSCALL(__NR_dup3, sys_dup3) 96__SYSCALL(__NR_dup3, sys_dup3)
89#define __NR3264_fcntl 25 97#define __NR3264_fcntl 25
90__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl) 98__SC_COMP_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl, compat_sys_fcntl64)
91 99
92/* fs/inotify_user.c */ 100/* fs/inotify_user.c */
93#define __NR_inotify_init1 26 101#define __NR_inotify_init1 26
@@ -99,7 +107,7 @@ __SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
99 107
100/* fs/ioctl.c */ 108/* fs/ioctl.c */
101#define __NR_ioctl 29 109#define __NR_ioctl 29
102__SYSCALL(__NR_ioctl, sys_ioctl) 110__SC_COMP(__NR_ioctl, sys_ioctl, compat_sys_ioctl)
103 111
104/* fs/ioprio.c */ 112/* fs/ioprio.c */
105#define __NR_ioprio_set 30 113#define __NR_ioprio_set 30
@@ -129,26 +137,30 @@ __SYSCALL(__NR_renameat, sys_renameat)
129#define __NR_umount2 39 137#define __NR_umount2 39
130__SYSCALL(__NR_umount2, sys_umount) 138__SYSCALL(__NR_umount2, sys_umount)
131#define __NR_mount 40 139#define __NR_mount 40
132__SYSCALL(__NR_mount, sys_mount) 140__SC_COMP(__NR_mount, sys_mount, compat_sys_mount)
133#define __NR_pivot_root 41 141#define __NR_pivot_root 41
134__SYSCALL(__NR_pivot_root, sys_pivot_root) 142__SYSCALL(__NR_pivot_root, sys_pivot_root)
135 143
136/* fs/nfsctl.c */ 144/* fs/nfsctl.c */
137#define __NR_nfsservctl 42 145#define __NR_nfsservctl 42
138__SYSCALL(__NR_nfsservctl, sys_nfsservctl) 146__SC_COMP(__NR_nfsservctl, sys_nfsservctl, compat_sys_nfsservctl)
139 147
140/* fs/open.c */ 148/* fs/open.c */
141#define __NR3264_statfs 43 149#define __NR3264_statfs 43
142__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs) 150__SC_COMP_3264(__NR3264_statfs, sys_statfs64, sys_statfs, \
151 compat_sys_statfs64)
143#define __NR3264_fstatfs 44 152#define __NR3264_fstatfs 44
144__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs) 153__SC_COMP_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs, \
154 compat_sys_fstatfs64)
145#define __NR3264_truncate 45 155#define __NR3264_truncate 45
146__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate) 156__SC_COMP_3264(__NR3264_truncate, sys_truncate64, sys_truncate, \
157 compat_sys_truncate64)
147#define __NR3264_ftruncate 46 158#define __NR3264_ftruncate 46
148__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate) 159__SC_COMP_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate, \
160 compat_sys_ftruncate64)
149 161
150#define __NR_fallocate 47 162#define __NR_fallocate 47
151__SYSCALL(__NR_fallocate, sys_fallocate) 163__SC_COMP(__NR_fallocate, sys_fallocate, compat_sys_fallocate)
152#define __NR_faccessat 48 164#define __NR_faccessat 48
153__SYSCALL(__NR_faccessat, sys_faccessat) 165__SYSCALL(__NR_faccessat, sys_faccessat)
154#define __NR_chdir 49 166#define __NR_chdir 49
@@ -166,7 +178,7 @@ __SYSCALL(__NR_fchownat, sys_fchownat)
166#define __NR_fchown 55 178#define __NR_fchown 55
167__SYSCALL(__NR_fchown, sys_fchown) 179__SYSCALL(__NR_fchown, sys_fchown)
168#define __NR_openat 56 180#define __NR_openat 56
169__SYSCALL(__NR_openat, sys_openat) 181__SC_COMP(__NR_openat, sys_openat, compat_sys_openat)
170#define __NR_close 57 182#define __NR_close 57
171__SYSCALL(__NR_close, sys_close) 183__SYSCALL(__NR_close, sys_close)
172#define __NR_vhangup 58 184#define __NR_vhangup 58
@@ -182,7 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
182 194
183/* fs/readdir.c */ 195/* fs/readdir.c */
184#define __NR_getdents64 61 196#define __NR_getdents64 61
185__SYSCALL(__NR_getdents64, sys_getdents64) 197__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
186 198
187/* fs/read_write.c */ 199/* fs/read_write.c */
188#define __NR3264_lseek 62 200#define __NR3264_lseek 62
@@ -192,17 +204,17 @@ __SYSCALL(__NR_read, sys_read)
192#define __NR_write 64 204#define __NR_write 64
193__SYSCALL(__NR_write, sys_write) 205__SYSCALL(__NR_write, sys_write)
194#define __NR_readv 65 206#define __NR_readv 65
195__SYSCALL(__NR_readv, sys_readv) 207__SC_COMP(__NR_readv, sys_readv, compat_sys_readv)
196#define __NR_writev 66 208#define __NR_writev 66
197__SYSCALL(__NR_writev, sys_writev) 209__SC_COMP(__NR_writev, sys_writev, compat_sys_writev)
198#define __NR_pread64 67 210#define __NR_pread64 67
199__SYSCALL(__NR_pread64, sys_pread64) 211__SC_COMP(__NR_pread64, sys_pread64, compat_sys_pread64)
200#define __NR_pwrite64 68 212#define __NR_pwrite64 68
201__SYSCALL(__NR_pwrite64, sys_pwrite64) 213__SC_COMP(__NR_pwrite64, sys_pwrite64, compat_sys_pwrite64)
202#define __NR_preadv 69 214#define __NR_preadv 69
203__SYSCALL(__NR_preadv, sys_preadv) 215__SC_COMP(__NR_preadv, sys_preadv, compat_sys_preadv)
204#define __NR_pwritev 70 216#define __NR_pwritev 70
205__SYSCALL(__NR_pwritev, sys_pwritev) 217__SC_COMP(__NR_pwritev, sys_pwritev, compat_sys_pwritev)
206 218
207/* fs/sendfile.c */ 219/* fs/sendfile.c */
208#define __NR3264_sendfile 71 220#define __NR3264_sendfile 71
@@ -210,17 +222,17 @@ __SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
210 222
211/* fs/select.c */ 223/* fs/select.c */
212#define __NR_pselect6 72 224#define __NR_pselect6 72
213__SYSCALL(__NR_pselect6, sys_pselect6) 225__SC_COMP(__NR_pselect6, sys_pselect6, compat_sys_pselect6)
214#define __NR_ppoll 73 226#define __NR_ppoll 73
215__SYSCALL(__NR_ppoll, sys_ppoll) 227__SC_COMP(__NR_ppoll, sys_ppoll, compat_sys_ppoll)
216 228
217/* fs/signalfd.c */ 229/* fs/signalfd.c */
218#define __NR_signalfd4 74 230#define __NR_signalfd4 74
219__SYSCALL(__NR_signalfd4, sys_signalfd4) 231__SC_COMP(__NR_signalfd4, sys_signalfd4, compat_sys_signalfd4)
220 232
221/* fs/splice.c */ 233/* fs/splice.c */
222#define __NR_vmsplice 75 234#define __NR_vmsplice 75
223__SYSCALL(__NR_vmsplice, sys_vmsplice) 235__SC_COMP(__NR_vmsplice, sys_vmsplice, compat_sys_vmsplice)
224#define __NR_splice 76 236#define __NR_splice 76
225__SYSCALL(__NR_splice, sys_splice) 237__SYSCALL(__NR_splice, sys_splice)
226#define __NR_tee 77 238#define __NR_tee 77
@@ -243,23 +255,27 @@ __SYSCALL(__NR_fsync, sys_fsync)
243__SYSCALL(__NR_fdatasync, sys_fdatasync) 255__SYSCALL(__NR_fdatasync, sys_fdatasync)
244#ifdef __ARCH_WANT_SYNC_FILE_RANGE2 256#ifdef __ARCH_WANT_SYNC_FILE_RANGE2
245#define __NR_sync_file_range2 84 257#define __NR_sync_file_range2 84
246__SYSCALL(__NR_sync_file_range2, sys_sync_file_range2) 258__SC_COMP(__NR_sync_file_range2, sys_sync_file_range2, \
259 compat_sys_sync_file_range2)
247#else 260#else
248#define __NR_sync_file_range 84 261#define __NR_sync_file_range 84
249__SYSCALL(__NR_sync_file_range, sys_sync_file_range) 262__SC_COMP(__NR_sync_file_range, sys_sync_file_range, \
263 compat_sys_sync_file_range)
250#endif 264#endif
251 265
252/* fs/timerfd.c */ 266/* fs/timerfd.c */
253#define __NR_timerfd_create 85 267#define __NR_timerfd_create 85
254__SYSCALL(__NR_timerfd_create, sys_timerfd_create) 268__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
255#define __NR_timerfd_settime 86 269#define __NR_timerfd_settime 86
256__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime) 270__SC_COMP(__NR_timerfd_settime, sys_timerfd_settime, \
271 compat_sys_timerfd_settime)
257#define __NR_timerfd_gettime 87 272#define __NR_timerfd_gettime 87
258__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime) 273__SC_COMP(__NR_timerfd_gettime, sys_timerfd_gettime, \
274 compat_sys_timerfd_gettime)
259 275
260/* fs/utimes.c */ 276/* fs/utimes.c */
261#define __NR_utimensat 88 277#define __NR_utimensat 88
262__SYSCALL(__NR_utimensat, sys_utimensat) 278__SC_COMP(__NR_utimensat, sys_utimensat, compat_sys_utimensat)
263 279
264/* kernel/acct.c */ 280/* kernel/acct.c */
265#define __NR_acct 89 281#define __NR_acct 89
@@ -281,7 +297,7 @@ __SYSCALL(__NR_exit, sys_exit)
281#define __NR_exit_group 94 297#define __NR_exit_group 94
282__SYSCALL(__NR_exit_group, sys_exit_group) 298__SYSCALL(__NR_exit_group, sys_exit_group)
283#define __NR_waitid 95 299#define __NR_waitid 95
284__SYSCALL(__NR_waitid, sys_waitid) 300__SC_COMP(__NR_waitid, sys_waitid, compat_sys_waitid)
285 301
286/* kernel/fork.c */ 302/* kernel/fork.c */
287#define __NR_set_tid_address 96 303#define __NR_set_tid_address 96
@@ -291,25 +307,27 @@ __SYSCALL(__NR_unshare, sys_unshare)
291 307
292/* kernel/futex.c */ 308/* kernel/futex.c */
293#define __NR_futex 98 309#define __NR_futex 98
294__SYSCALL(__NR_futex, sys_futex) 310__SC_COMP(__NR_futex, sys_futex, compat_sys_futex)
295#define __NR_set_robust_list 99 311#define __NR_set_robust_list 99
296__SYSCALL(__NR_set_robust_list, sys_set_robust_list) 312__SC_COMP(__NR_set_robust_list, sys_set_robust_list, \
313 compat_sys_set_robust_list)
297#define __NR_get_robust_list 100 314#define __NR_get_robust_list 100
298__SYSCALL(__NR_get_robust_list, sys_get_robust_list) 315__SC_COMP(__NR_get_robust_list, sys_get_robust_list, \
316 compat_sys_get_robust_list)
299 317
300/* kernel/hrtimer.c */ 318/* kernel/hrtimer.c */
301#define __NR_nanosleep 101 319#define __NR_nanosleep 101
302__SYSCALL(__NR_nanosleep, sys_nanosleep) 320__SC_COMP(__NR_nanosleep, sys_nanosleep, compat_sys_nanosleep)
303 321
304/* kernel/itimer.c */ 322/* kernel/itimer.c */
305#define __NR_getitimer 102 323#define __NR_getitimer 102
306__SYSCALL(__NR_getitimer, sys_getitimer) 324__SC_COMP(__NR_getitimer, sys_getitimer, compat_sys_getitimer)
307#define __NR_setitimer 103 325#define __NR_setitimer 103
308__SYSCALL(__NR_setitimer, sys_setitimer) 326__SC_COMP(__NR_setitimer, sys_setitimer, compat_sys_setitimer)
309 327
310/* kernel/kexec.c */ 328/* kernel/kexec.c */
311#define __NR_kexec_load 104 329#define __NR_kexec_load 104
312__SYSCALL(__NR_kexec_load, sys_kexec_load) 330__SC_COMP(__NR_kexec_load, sys_kexec_load, compat_sys_kexec_load)
313 331
314/* kernel/module.c */ 332/* kernel/module.c */
315#define __NR_init_module 105 333#define __NR_init_module 105
@@ -319,23 +337,24 @@ __SYSCALL(__NR_delete_module, sys_delete_module)
319 337
320/* kernel/posix-timers.c */ 338/* kernel/posix-timers.c */
321#define __NR_timer_create 107 339#define __NR_timer_create 107
322__SYSCALL(__NR_timer_create, sys_timer_create) 340__SC_COMP(__NR_timer_create, sys_timer_create, compat_sys_timer_create)
323#define __NR_timer_gettime 108 341#define __NR_timer_gettime 108
324__SYSCALL(__NR_timer_gettime, sys_timer_gettime) 342__SC_COMP(__NR_timer_gettime, sys_timer_gettime, compat_sys_timer_gettime)
325#define __NR_timer_getoverrun 109 343#define __NR_timer_getoverrun 109
326__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun) 344__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
327#define __NR_timer_settime 110 345#define __NR_timer_settime 110
328__SYSCALL(__NR_timer_settime, sys_timer_settime) 346__SC_COMP(__NR_timer_settime, sys_timer_settime, compat_sys_timer_settime)
329#define __NR_timer_delete 111 347#define __NR_timer_delete 111
330__SYSCALL(__NR_timer_delete, sys_timer_delete) 348__SYSCALL(__NR_timer_delete, sys_timer_delete)
331#define __NR_clock_settime 112 349#define __NR_clock_settime 112
332__SYSCALL(__NR_clock_settime, sys_clock_settime) 350__SC_COMP(__NR_clock_settime, sys_clock_settime, compat_sys_clock_settime)
333#define __NR_clock_gettime 113 351#define __NR_clock_gettime 113
334__SYSCALL(__NR_clock_gettime, sys_clock_gettime) 352__SC_COMP(__NR_clock_gettime, sys_clock_gettime, compat_sys_clock_gettime)
335#define __NR_clock_getres 114 353#define __NR_clock_getres 114
336__SYSCALL(__NR_clock_getres, sys_clock_getres) 354__SC_COMP(__NR_clock_getres, sys_clock_getres, compat_sys_clock_getres)
337#define __NR_clock_nanosleep 115 355#define __NR_clock_nanosleep 115
338__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep) 356__SC_COMP(__NR_clock_nanosleep, sys_clock_nanosleep, \
357 compat_sys_clock_nanosleep)
339 358
340/* kernel/printk.c */ 359/* kernel/printk.c */
341#define __NR_syslog 116 360#define __NR_syslog 116
@@ -355,9 +374,11 @@ __SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
355#define __NR_sched_getparam 121 374#define __NR_sched_getparam 121
356__SYSCALL(__NR_sched_getparam, sys_sched_getparam) 375__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
357#define __NR_sched_setaffinity 122 376#define __NR_sched_setaffinity 122
358__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity) 377__SC_COMP(__NR_sched_setaffinity, sys_sched_setaffinity, \
378 compat_sys_sched_setaffinity)
359#define __NR_sched_getaffinity 123 379#define __NR_sched_getaffinity 123
360__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity) 380__SC_COMP(__NR_sched_getaffinity, sys_sched_getaffinity, \
381 compat_sys_sched_getaffinity)
361#define __NR_sched_yield 124 382#define __NR_sched_yield 124
362__SYSCALL(__NR_sched_yield, sys_sched_yield) 383__SYSCALL(__NR_sched_yield, sys_sched_yield)
363#define __NR_sched_get_priority_max 125 384#define __NR_sched_get_priority_max 125
@@ -365,7 +386,8 @@ __SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
365#define __NR_sched_get_priority_min 126 386#define __NR_sched_get_priority_min 126
366__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min) 387__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
367#define __NR_sched_rr_get_interval 127 388#define __NR_sched_rr_get_interval 127
368__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval) 389__SC_COMP(__NR_sched_rr_get_interval, sys_sched_rr_get_interval, \
390 compat_sys_sched_rr_get_interval)
369 391
370/* kernel/signal.c */ 392/* kernel/signal.c */
371#define __NR_restart_syscall 128 393#define __NR_restart_syscall 128
@@ -377,21 +399,23 @@ __SYSCALL(__NR_tkill, sys_tkill)
377#define __NR_tgkill 131 399#define __NR_tgkill 131
378__SYSCALL(__NR_tgkill, sys_tgkill) 400__SYSCALL(__NR_tgkill, sys_tgkill)
379#define __NR_sigaltstack 132 401#define __NR_sigaltstack 132
380__SYSCALL(__NR_sigaltstack, sys_sigaltstack) 402__SC_COMP(__NR_sigaltstack, sys_sigaltstack, compat_sys_sigaltstack)
381#define __NR_rt_sigsuspend 133 403#define __NR_rt_sigsuspend 133
382__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */ 404__SC_COMP(__NR_rt_sigsuspend, sys_rt_sigsuspend, compat_sys_rt_sigsuspend)
383#define __NR_rt_sigaction 134 405#define __NR_rt_sigaction 134
384__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */ 406__SC_COMP(__NR_rt_sigaction, sys_rt_sigaction, compat_sys_rt_sigaction)
385#define __NR_rt_sigprocmask 135 407#define __NR_rt_sigprocmask 135
386__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask) 408__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
387#define __NR_rt_sigpending 136 409#define __NR_rt_sigpending 136
388__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending) 410__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
389#define __NR_rt_sigtimedwait 137 411#define __NR_rt_sigtimedwait 137
390__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait) 412__SC_COMP(__NR_rt_sigtimedwait, sys_rt_sigtimedwait, \
413 compat_sys_rt_sigtimedwait)
391#define __NR_rt_sigqueueinfo 138 414#define __NR_rt_sigqueueinfo 138
392__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo) 415__SC_COMP(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo, \
416 compat_sys_rt_sigqueueinfo)
393#define __NR_rt_sigreturn 139 417#define __NR_rt_sigreturn 139
394__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */ 418__SC_COMP(__NR_rt_sigreturn, sys_rt_sigreturn, compat_sys_rt_sigreturn)
395 419
396/* kernel/sys.c */ 420/* kernel/sys.c */
397#define __NR_setpriority 140 421#define __NR_setpriority 140
@@ -421,7 +445,7 @@ __SYSCALL(__NR_setfsuid, sys_setfsuid)
421#define __NR_setfsgid 152 445#define __NR_setfsgid 152
422__SYSCALL(__NR_setfsgid, sys_setfsgid) 446__SYSCALL(__NR_setfsgid, sys_setfsgid)
423#define __NR_times 153 447#define __NR_times 153
424__SYSCALL(__NR_times, sys_times) 448__SC_COMP(__NR_times, sys_times, compat_sys_times)
425#define __NR_setpgid 154 449#define __NR_setpgid 154
426__SYSCALL(__NR_setpgid, sys_setpgid) 450__SYSCALL(__NR_setpgid, sys_setpgid)
427#define __NR_getpgid 155 451#define __NR_getpgid 155
@@ -441,11 +465,11 @@ __SYSCALL(__NR_sethostname, sys_sethostname)
441#define __NR_setdomainname 162 465#define __NR_setdomainname 162
442__SYSCALL(__NR_setdomainname, sys_setdomainname) 466__SYSCALL(__NR_setdomainname, sys_setdomainname)
443#define __NR_getrlimit 163 467#define __NR_getrlimit 163
444__SYSCALL(__NR_getrlimit, sys_getrlimit) 468__SC_COMP(__NR_getrlimit, sys_getrlimit, compat_sys_getrlimit)
445#define __NR_setrlimit 164 469#define __NR_setrlimit 164
446__SYSCALL(__NR_setrlimit, sys_setrlimit) 470__SC_COMP(__NR_setrlimit, sys_setrlimit, compat_sys_setrlimit)
447#define __NR_getrusage 165 471#define __NR_getrusage 165
448__SYSCALL(__NR_getrusage, sys_getrusage) 472__SC_COMP(__NR_getrusage, sys_getrusage, compat_sys_getrusage)
449#define __NR_umask 166 473#define __NR_umask 166
450__SYSCALL(__NR_umask, sys_umask) 474__SYSCALL(__NR_umask, sys_umask)
451#define __NR_prctl 167 475#define __NR_prctl 167
@@ -455,11 +479,11 @@ __SYSCALL(__NR_getcpu, sys_getcpu)
455 479
456/* kernel/time.c */ 480/* kernel/time.c */
457#define __NR_gettimeofday 169 481#define __NR_gettimeofday 169
458__SYSCALL(__NR_gettimeofday, sys_gettimeofday) 482__SC_COMP(__NR_gettimeofday, sys_gettimeofday, compat_sys_gettimeofday)
459#define __NR_settimeofday 170 483#define __NR_settimeofday 170
460__SYSCALL(__NR_settimeofday, sys_settimeofday) 484__SC_COMP(__NR_settimeofday, sys_settimeofday, compat_sys_settimeofday)
461#define __NR_adjtimex 171 485#define __NR_adjtimex 171
462__SYSCALL(__NR_adjtimex, sys_adjtimex) 486__SC_COMP(__NR_adjtimex, sys_adjtimex, compat_sys_adjtimex)
463 487
464/* kernel/timer.c */ 488/* kernel/timer.c */
465#define __NR_getpid 172 489#define __NR_getpid 172
@@ -477,39 +501,40 @@ __SYSCALL(__NR_getegid, sys_getegid)
477#define __NR_gettid 178 501#define __NR_gettid 178
478__SYSCALL(__NR_gettid, sys_gettid) 502__SYSCALL(__NR_gettid, sys_gettid)
479#define __NR_sysinfo 179 503#define __NR_sysinfo 179
480__SYSCALL(__NR_sysinfo, sys_sysinfo) 504__SC_COMP(__NR_sysinfo, sys_sysinfo, compat_sys_sysinfo)
481 505
482/* ipc/mqueue.c */ 506/* ipc/mqueue.c */
483#define __NR_mq_open 180 507#define __NR_mq_open 180
484__SYSCALL(__NR_mq_open, sys_mq_open) 508__SC_COMP(__NR_mq_open, sys_mq_open, compat_sys_mq_open)
485#define __NR_mq_unlink 181 509#define __NR_mq_unlink 181
486__SYSCALL(__NR_mq_unlink, sys_mq_unlink) 510__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
487#define __NR_mq_timedsend 182 511#define __NR_mq_timedsend 182
488__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend) 512__SC_COMP(__NR_mq_timedsend, sys_mq_timedsend, compat_sys_mq_timedsend)
489#define __NR_mq_timedreceive 183 513#define __NR_mq_timedreceive 183
490__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive) 514__SC_COMP(__NR_mq_timedreceive, sys_mq_timedreceive, \
515 compat_sys_mq_timedreceive)
491#define __NR_mq_notify 184 516#define __NR_mq_notify 184
492__SYSCALL(__NR_mq_notify, sys_mq_notify) 517__SC_COMP(__NR_mq_notify, sys_mq_notify, compat_sys_mq_notify)
493#define __NR_mq_getsetattr 185 518#define __NR_mq_getsetattr 185
494__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr) 519__SC_COMP(__NR_mq_getsetattr, sys_mq_getsetattr, compat_sys_mq_getsetattr)
495 520
496/* ipc/msg.c */ 521/* ipc/msg.c */
497#define __NR_msgget 186 522#define __NR_msgget 186
498__SYSCALL(__NR_msgget, sys_msgget) 523__SYSCALL(__NR_msgget, sys_msgget)
499#define __NR_msgctl 187 524#define __NR_msgctl 187
500__SYSCALL(__NR_msgctl, sys_msgctl) 525__SC_COMP(__NR_msgctl, sys_msgctl, compat_sys_msgctl)
501#define __NR_msgrcv 188 526#define __NR_msgrcv 188
502__SYSCALL(__NR_msgrcv, sys_msgrcv) 527__SC_COMP(__NR_msgrcv, sys_msgrcv, compat_sys_msgrcv)
503#define __NR_msgsnd 189 528#define __NR_msgsnd 189
504__SYSCALL(__NR_msgsnd, sys_msgsnd) 529__SC_COMP(__NR_msgsnd, sys_msgsnd, compat_sys_msgsnd)
505 530
506/* ipc/sem.c */ 531/* ipc/sem.c */
507#define __NR_semget 190 532#define __NR_semget 190
508__SYSCALL(__NR_semget, sys_semget) 533__SYSCALL(__NR_semget, sys_semget)
509#define __NR_semctl 191 534#define __NR_semctl 191
510__SYSCALL(__NR_semctl, sys_semctl) 535__SC_COMP(__NR_semctl, sys_semctl, compat_sys_semctl)
511#define __NR_semtimedop 192 536#define __NR_semtimedop 192
512__SYSCALL(__NR_semtimedop, sys_semtimedop) 537__SC_COMP(__NR_semtimedop, sys_semtimedop, compat_sys_semtimedop)
513#define __NR_semop 193 538#define __NR_semop 193
514__SYSCALL(__NR_semop, sys_semop) 539__SYSCALL(__NR_semop, sys_semop)
515 540
@@ -517,9 +542,9 @@ __SYSCALL(__NR_semop, sys_semop)
517#define __NR_shmget 194 542#define __NR_shmget 194
518__SYSCALL(__NR_shmget, sys_shmget) 543__SYSCALL(__NR_shmget, sys_shmget)
519#define __NR_shmctl 195 544#define __NR_shmctl 195
520__SYSCALL(__NR_shmctl, sys_shmctl) 545__SC_COMP(__NR_shmctl, sys_shmctl, compat_sys_shmctl)
521#define __NR_shmat 196 546#define __NR_shmat 196
522__SYSCALL(__NR_shmat, sys_shmat) 547__SC_COMP(__NR_shmat, sys_shmat, compat_sys_shmat)
523#define __NR_shmdt 197 548#define __NR_shmdt 197
524__SYSCALL(__NR_shmdt, sys_shmdt) 549__SYSCALL(__NR_shmdt, sys_shmdt)
525 550
@@ -543,21 +568,21 @@ __SYSCALL(__NR_getpeername, sys_getpeername)
543#define __NR_sendto 206 568#define __NR_sendto 206
544__SYSCALL(__NR_sendto, sys_sendto) 569__SYSCALL(__NR_sendto, sys_sendto)
545#define __NR_recvfrom 207 570#define __NR_recvfrom 207
546__SYSCALL(__NR_recvfrom, sys_recvfrom) 571__SC_COMP(__NR_recvfrom, sys_recvfrom, compat_sys_recvfrom)
547#define __NR_setsockopt 208 572#define __NR_setsockopt 208
548__SYSCALL(__NR_setsockopt, sys_setsockopt) 573__SC_COMP(__NR_setsockopt, sys_setsockopt, compat_sys_setsockopt)
549#define __NR_getsockopt 209 574#define __NR_getsockopt 209
550__SYSCALL(__NR_getsockopt, sys_getsockopt) 575__SC_COMP(__NR_getsockopt, sys_getsockopt, compat_sys_getsockopt)
551#define __NR_shutdown 210 576#define __NR_shutdown 210
552__SYSCALL(__NR_shutdown, sys_shutdown) 577__SYSCALL(__NR_shutdown, sys_shutdown)
553#define __NR_sendmsg 211 578#define __NR_sendmsg 211
554__SYSCALL(__NR_sendmsg, sys_sendmsg) 579__SC_COMP(__NR_sendmsg, sys_sendmsg, compat_sys_sendmsg)
555#define __NR_recvmsg 212 580#define __NR_recvmsg 212
556__SYSCALL(__NR_recvmsg, sys_recvmsg) 581__SC_COMP(__NR_recvmsg, sys_recvmsg, compat_sys_recvmsg)
557 582
558/* mm/filemap.c */ 583/* mm/filemap.c */
559#define __NR_readahead 213 584#define __NR_readahead 213
560__SYSCALL(__NR_readahead, sys_readahead) 585__SC_COMP(__NR_readahead, sys_readahead, compat_sys_readahead)
561 586
562/* mm/nommu.c, also with MMU */ 587/* mm/nommu.c, also with MMU */
563#define __NR_brk 214 588#define __NR_brk 214
@@ -573,19 +598,19 @@ __SYSCALL(__NR_add_key, sys_add_key)
573#define __NR_request_key 218 598#define __NR_request_key 218
574__SYSCALL(__NR_request_key, sys_request_key) 599__SYSCALL(__NR_request_key, sys_request_key)
575#define __NR_keyctl 219 600#define __NR_keyctl 219
576__SYSCALL(__NR_keyctl, sys_keyctl) 601__SC_COMP(__NR_keyctl, sys_keyctl, compat_sys_keyctl)
577 602
578/* arch/example/kernel/sys_example.c */ 603/* arch/example/kernel/sys_example.c */
579#define __NR_clone 220 604#define __NR_clone 220
580__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */ 605__SYSCALL(__NR_clone, sys_clone)
581#define __NR_execve 221 606#define __NR_execve 221
582__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */ 607__SC_COMP(__NR_execve, sys_execve, compat_sys_execve)
583 608
584#define __NR3264_mmap 222 609#define __NR3264_mmap 222
585__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap) 610__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
586/* mm/fadvise.c */ 611/* mm/fadvise.c */
587#define __NR3264_fadvise64 223 612#define __NR3264_fadvise64 223
588__SYSCALL(__NR3264_fadvise64, sys_fadvise64_64) 613__SC_COMP(__NR3264_fadvise64, sys_fadvise64_64, compat_sys_fadvise64_64)
589 614
590/* mm/, CONFIG_MMU only */ 615/* mm/, CONFIG_MMU only */
591#ifndef __ARCH_NOMMU 616#ifndef __ARCH_NOMMU
@@ -612,25 +637,26 @@ __SYSCALL(__NR_madvise, sys_madvise)
612#define __NR_remap_file_pages 234 637#define __NR_remap_file_pages 234
613__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages) 638__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
614#define __NR_mbind 235 639#define __NR_mbind 235
615__SYSCALL(__NR_mbind, sys_mbind) 640__SC_COMP(__NR_mbind, sys_mbind, compat_sys_mbind)
616#define __NR_get_mempolicy 236 641#define __NR_get_mempolicy 236
617__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy) 642__SC_COMP(__NR_get_mempolicy, sys_get_mempolicy, compat_sys_get_mempolicy)
618#define __NR_set_mempolicy 237 643#define __NR_set_mempolicy 237
619__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy) 644__SC_COMP(__NR_set_mempolicy, sys_set_mempolicy, compat_sys_set_mempolicy)
620#define __NR_migrate_pages 238 645#define __NR_migrate_pages 238
621__SYSCALL(__NR_migrate_pages, sys_migrate_pages) 646__SC_COMP(__NR_migrate_pages, sys_migrate_pages, compat_sys_migrate_pages)
622#define __NR_move_pages 239 647#define __NR_move_pages 239
623__SYSCALL(__NR_move_pages, sys_move_pages) 648__SC_COMP(__NR_move_pages, sys_move_pages, compat_sys_move_pages)
624#endif 649#endif
625 650
626#define __NR_rt_tgsigqueueinfo 240 651#define __NR_rt_tgsigqueueinfo 240
627__SYSCALL(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo) 652__SC_COMP(__NR_rt_tgsigqueueinfo, sys_rt_tgsigqueueinfo, \
653 compat_sys_rt_tgsigqueueinfo)
628#define __NR_perf_event_open 241 654#define __NR_perf_event_open 241
629__SYSCALL(__NR_perf_event_open, sys_perf_event_open) 655__SYSCALL(__NR_perf_event_open, sys_perf_event_open)
630#define __NR_accept4 242 656#define __NR_accept4 242
631__SYSCALL(__NR_accept4, sys_accept4) 657__SYSCALL(__NR_accept4, sys_accept4)
632#define __NR_recvmmsg 243 658#define __NR_recvmmsg 243
633__SYSCALL(__NR_recvmmsg, sys_recvmmsg) 659__SC_COMP(__NR_recvmmsg, sys_recvmmsg, compat_sys_recvmmsg)
634 660
635/* 661/*
636 * Architectures may provide up to 16 syscalls of their own 662 * Architectures may provide up to 16 syscalls of their own
@@ -639,19 +665,20 @@ __SYSCALL(__NR_recvmmsg, sys_recvmmsg)
639#define __NR_arch_specific_syscall 244 665#define __NR_arch_specific_syscall 244
640 666
641#define __NR_wait4 260 667#define __NR_wait4 260
642__SYSCALL(__NR_wait4, sys_wait4) 668__SC_COMP(__NR_wait4, sys_wait4, compat_sys_wait4)
643#define __NR_prlimit64 261 669#define __NR_prlimit64 261
644__SYSCALL(__NR_prlimit64, sys_prlimit64) 670__SYSCALL(__NR_prlimit64, sys_prlimit64)
645#define __NR_fanotify_init 262 671#define __NR_fanotify_init 262
646__SYSCALL(__NR_fanotify_init, sys_fanotify_init) 672__SYSCALL(__NR_fanotify_init, sys_fanotify_init)
647#define __NR_fanotify_mark 263 673#define __NR_fanotify_mark 263
648__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark) 674__SYSCALL(__NR_fanotify_mark, sys_fanotify_mark)
649#define __NR_name_to_handle_at 264 675#define __NR_name_to_handle_at 264
650__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at) 676__SYSCALL(__NR_name_to_handle_at, sys_name_to_handle_at)
651#define __NR_open_by_handle_at 265 677#define __NR_open_by_handle_at 265
652__SYSCALL(__NR_open_by_handle_at, sys_open_by_handle_at) 678__SC_COMP(__NR_open_by_handle_at, sys_open_by_handle_at, \
679 compat_sys_open_by_handle_at)
653#define __NR_clock_adjtime 266 680#define __NR_clock_adjtime 266
654__SYSCALL(__NR_clock_adjtime, sys_clock_adjtime) 681__SC_COMP(__NR_clock_adjtime, sys_clock_adjtime, compat_sys_clock_adjtime)
655#define __NR_syncfs 267 682#define __NR_syncfs 267
656__SYSCALL(__NR_syncfs, sys_syncfs) 683__SYSCALL(__NR_syncfs, sys_syncfs)
657 684
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index bd297a20ab9..db22d136ad0 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -15,7 +15,7 @@
15 * HEAD_TEXT_SECTION 15 * HEAD_TEXT_SECTION
16 * INIT_TEXT_SECTION(PAGE_SIZE) 16 * INIT_TEXT_SECTION(PAGE_SIZE)
17 * INIT_DATA_SECTION(...) 17 * INIT_DATA_SECTION(...)
18 * PERCPU(CACHELINE_SIZE, PAGE_SIZE) 18 * PERCPU_SECTION(CACHELINE_SIZE)
19 * __init_end = .; 19 * __init_end = .;
20 * 20 *
21 * _stext = .; 21 * _stext = .;
@@ -170,6 +170,10 @@
170 STRUCT_ALIGN(); \ 170 STRUCT_ALIGN(); \
171 *(__tracepoints) \ 171 *(__tracepoints) \
172 /* implement dynamic printk debug */ \ 172 /* implement dynamic printk debug */ \
173 . = ALIGN(8); \
174 VMLINUX_SYMBOL(__start___jump_table) = .; \
175 *(__jump_table) \
176 VMLINUX_SYMBOL(__stop___jump_table) = .; \
173 . = ALIGN(8); \ 177 . = ALIGN(8); \
174 VMLINUX_SYMBOL(__start___verbose) = .; \ 178 VMLINUX_SYMBOL(__start___verbose) = .; \
175 *(__verbose) \ 179 *(__verbose) \
@@ -228,8 +232,6 @@
228 \ 232 \
229 BUG_TABLE \ 233 BUG_TABLE \
230 \ 234 \
231 JUMP_TABLE \
232 \
233 /* PCI quirks */ \ 235 /* PCI quirks */ \
234 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ 236 .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
235 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ 237 VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -274,70 +276,70 @@
274 /* Kernel symbol table: Normal symbols */ \ 276 /* Kernel symbol table: Normal symbols */ \
275 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \ 277 __ksymtab : AT(ADDR(__ksymtab) - LOAD_OFFSET) { \
276 VMLINUX_SYMBOL(__start___ksymtab) = .; \ 278 VMLINUX_SYMBOL(__start___ksymtab) = .; \
277 *(__ksymtab) \ 279 *(SORT(___ksymtab+*)) \
278 VMLINUX_SYMBOL(__stop___ksymtab) = .; \ 280 VMLINUX_SYMBOL(__stop___ksymtab) = .; \
279 } \ 281 } \
280 \ 282 \
281 /* Kernel symbol table: GPL-only symbols */ \ 283 /* Kernel symbol table: GPL-only symbols */ \
282 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \ 284 __ksymtab_gpl : AT(ADDR(__ksymtab_gpl) - LOAD_OFFSET) { \
283 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \ 285 VMLINUX_SYMBOL(__start___ksymtab_gpl) = .; \
284 *(__ksymtab_gpl) \ 286 *(SORT(___ksymtab_gpl+*)) \
285 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \ 287 VMLINUX_SYMBOL(__stop___ksymtab_gpl) = .; \
286 } \ 288 } \
287 \ 289 \
288 /* Kernel symbol table: Normal unused symbols */ \ 290 /* Kernel symbol table: Normal unused symbols */ \
289 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \ 291 __ksymtab_unused : AT(ADDR(__ksymtab_unused) - LOAD_OFFSET) { \
290 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \ 292 VMLINUX_SYMBOL(__start___ksymtab_unused) = .; \
291 *(__ksymtab_unused) \ 293 *(SORT(___ksymtab_unused+*)) \
292 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \ 294 VMLINUX_SYMBOL(__stop___ksymtab_unused) = .; \
293 } \ 295 } \
294 \ 296 \
295 /* Kernel symbol table: GPL-only unused symbols */ \ 297 /* Kernel symbol table: GPL-only unused symbols */ \
296 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \ 298 __ksymtab_unused_gpl : AT(ADDR(__ksymtab_unused_gpl) - LOAD_OFFSET) { \
297 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \ 299 VMLINUX_SYMBOL(__start___ksymtab_unused_gpl) = .; \
298 *(__ksymtab_unused_gpl) \ 300 *(SORT(___ksymtab_unused_gpl+*)) \
299 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \ 301 VMLINUX_SYMBOL(__stop___ksymtab_unused_gpl) = .; \
300 } \ 302 } \
301 \ 303 \
302 /* Kernel symbol table: GPL-future-only symbols */ \ 304 /* Kernel symbol table: GPL-future-only symbols */ \
303 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \ 305 __ksymtab_gpl_future : AT(ADDR(__ksymtab_gpl_future) - LOAD_OFFSET) { \
304 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \ 306 VMLINUX_SYMBOL(__start___ksymtab_gpl_future) = .; \
305 *(__ksymtab_gpl_future) \ 307 *(SORT(___ksymtab_gpl_future+*)) \
306 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \ 308 VMLINUX_SYMBOL(__stop___ksymtab_gpl_future) = .; \
307 } \ 309 } \
308 \ 310 \
309 /* Kernel symbol table: Normal symbols */ \ 311 /* Kernel symbol table: Normal symbols */ \
310 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \ 312 __kcrctab : AT(ADDR(__kcrctab) - LOAD_OFFSET) { \
311 VMLINUX_SYMBOL(__start___kcrctab) = .; \ 313 VMLINUX_SYMBOL(__start___kcrctab) = .; \
312 *(__kcrctab) \ 314 *(SORT(___kcrctab+*)) \
313 VMLINUX_SYMBOL(__stop___kcrctab) = .; \ 315 VMLINUX_SYMBOL(__stop___kcrctab) = .; \
314 } \ 316 } \
315 \ 317 \
316 /* Kernel symbol table: GPL-only symbols */ \ 318 /* Kernel symbol table: GPL-only symbols */ \
317 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \ 319 __kcrctab_gpl : AT(ADDR(__kcrctab_gpl) - LOAD_OFFSET) { \
318 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \ 320 VMLINUX_SYMBOL(__start___kcrctab_gpl) = .; \
319 *(__kcrctab_gpl) \ 321 *(SORT(___kcrctab_gpl+*)) \
320 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \ 322 VMLINUX_SYMBOL(__stop___kcrctab_gpl) = .; \
321 } \ 323 } \
322 \ 324 \
323 /* Kernel symbol table: Normal unused symbols */ \ 325 /* Kernel symbol table: Normal unused symbols */ \
324 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \ 326 __kcrctab_unused : AT(ADDR(__kcrctab_unused) - LOAD_OFFSET) { \
325 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \ 327 VMLINUX_SYMBOL(__start___kcrctab_unused) = .; \
326 *(__kcrctab_unused) \ 328 *(SORT(___kcrctab_unused+*)) \
327 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \ 329 VMLINUX_SYMBOL(__stop___kcrctab_unused) = .; \
328 } \ 330 } \
329 \ 331 \
330 /* Kernel symbol table: GPL-only unused symbols */ \ 332 /* Kernel symbol table: GPL-only unused symbols */ \
331 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \ 333 __kcrctab_unused_gpl : AT(ADDR(__kcrctab_unused_gpl) - LOAD_OFFSET) { \
332 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \ 334 VMLINUX_SYMBOL(__start___kcrctab_unused_gpl) = .; \
333 *(__kcrctab_unused_gpl) \ 335 *(SORT(___kcrctab_unused_gpl+*)) \
334 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \ 336 VMLINUX_SYMBOL(__stop___kcrctab_unused_gpl) = .; \
335 } \ 337 } \
336 \ 338 \
337 /* Kernel symbol table: GPL-future-only symbols */ \ 339 /* Kernel symbol table: GPL-future-only symbols */ \
338 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \ 340 __kcrctab_gpl_future : AT(ADDR(__kcrctab_gpl_future) - LOAD_OFFSET) { \
339 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \ 341 VMLINUX_SYMBOL(__start___kcrctab_gpl_future) = .; \
340 *(__kcrctab_gpl_future) \ 342 *(SORT(___kcrctab_gpl_future+*)) \
341 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \ 343 VMLINUX_SYMBOL(__stop___kcrctab_gpl_future) = .; \
342 } \ 344 } \
343 \ 345 \
@@ -589,14 +591,6 @@
589#define BUG_TABLE 591#define BUG_TABLE
590#endif 592#endif
591 593
592#define JUMP_TABLE \
593 . = ALIGN(8); \
594 __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \
595 VMLINUX_SYMBOL(__start___jump_table) = .; \
596 *(__jump_table) \
597 VMLINUX_SYMBOL(__stop___jump_table) = .; \
598 }
599
600#ifdef CONFIG_PM_TRACE 594#ifdef CONFIG_PM_TRACE
601#define TRACEDATA \ 595#define TRACEDATA \
602 . = ALIGN(4); \ 596 . = ALIGN(4); \
@@ -688,6 +682,28 @@
688 } 682 }
689 683
690/** 684/**
685 * PERCPU_INPUT - the percpu input sections
686 * @cacheline: cacheline size
687 *
688 * The core percpu section names and core symbols which do not rely
689 * directly upon load addresses.
690 *
691 * @cacheline is used to align subsections to avoid false cacheline
692 * sharing between subsections for different purposes.
693 */
694#define PERCPU_INPUT(cacheline) \
695 VMLINUX_SYMBOL(__per_cpu_start) = .; \
696 *(.data..percpu..first) \
697 . = ALIGN(PAGE_SIZE); \
698 *(.data..percpu..page_aligned) \
699 . = ALIGN(cacheline); \
700 *(.data..percpu..readmostly) \
701 . = ALIGN(cacheline); \
702 *(.data..percpu) \
703 *(.data..percpu..shared_aligned) \
704 VMLINUX_SYMBOL(__per_cpu_end) = .;
705
706/**
691 * PERCPU_VADDR - define output section for percpu area 707 * PERCPU_VADDR - define output section for percpu area
692 * @cacheline: cacheline size 708 * @cacheline: cacheline size
693 * @vaddr: explicit base address (optional) 709 * @vaddr: explicit base address (optional)
@@ -709,52 +725,33 @@
709 * 725 *
710 * Note that this macros defines __per_cpu_load as an absolute symbol. 726 * Note that this macros defines __per_cpu_load as an absolute symbol.
711 * If there is no need to put the percpu section at a predetermined 727 * If there is no need to put the percpu section at a predetermined
712 * address, use PERCPU(). 728 * address, use PERCPU_SECTION.
713 */ 729 */
714#define PERCPU_VADDR(cacheline, vaddr, phdr) \ 730#define PERCPU_VADDR(cacheline, vaddr, phdr) \
715 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 731 VMLINUX_SYMBOL(__per_cpu_load) = .; \
716 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ 732 .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
717 - LOAD_OFFSET) { \ 733 - LOAD_OFFSET) { \
718 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 734 PERCPU_INPUT(cacheline) \
719 *(.data..percpu..first) \
720 . = ALIGN(PAGE_SIZE); \
721 *(.data..percpu..page_aligned) \
722 . = ALIGN(cacheline); \
723 *(.data..percpu..readmostly) \
724 . = ALIGN(cacheline); \
725 *(.data..percpu) \
726 *(.data..percpu..shared_aligned) \
727 VMLINUX_SYMBOL(__per_cpu_end) = .; \
728 } phdr \ 735 } phdr \
729 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); 736 . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
730 737
731/** 738/**
732 * PERCPU - define output section for percpu area, simple version 739 * PERCPU_SECTION - define output section for percpu area, simple version
733 * @cacheline: cacheline size 740 * @cacheline: cacheline size
734 * @align: required alignment
735 * 741 *
736 * Align to @align and outputs output section for percpu area. This macro 742 * Align to PAGE_SIZE and outputs output section for percpu area. This
737 * doesn't manipulate @vaddr or @phdr and __per_cpu_load and 743 * macro doesn't manipulate @vaddr or @phdr and __per_cpu_load and
738 * __per_cpu_start will be identical. 744 * __per_cpu_start will be identical.
739 * 745 *
740 * This macro is equivalent to ALIGN(@align); PERCPU_VADDR(@cacheline,,) 746 * This macro is equivalent to ALIGN(PAGE_SIZE); PERCPU_VADDR(@cacheline,,)
741 * except that __per_cpu_load is defined as a relative symbol against 747 * except that __per_cpu_load is defined as a relative symbol against
742 * .data..percpu which is required for relocatable x86_32 configuration. 748 * .data..percpu which is required for relocatable x86_32 configuration.
743 */ 749 */
744#define PERCPU(cacheline, align) \ 750#define PERCPU_SECTION(cacheline) \
745 . = ALIGN(align); \ 751 . = ALIGN(PAGE_SIZE); \
746 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \ 752 .data..percpu : AT(ADDR(.data..percpu) - LOAD_OFFSET) { \
747 VMLINUX_SYMBOL(__per_cpu_load) = .; \ 753 VMLINUX_SYMBOL(__per_cpu_load) = .; \
748 VMLINUX_SYMBOL(__per_cpu_start) = .; \ 754 PERCPU_INPUT(cacheline) \
749 *(.data..percpu..first) \
750 . = ALIGN(PAGE_SIZE); \
751 *(.data..percpu..page_aligned) \
752 . = ALIGN(cacheline); \
753 *(.data..percpu..readmostly) \
754 . = ALIGN(cacheline); \
755 *(.data..percpu) \
756 *(.data..percpu..shared_aligned) \
757 VMLINUX_SYMBOL(__per_cpu_end) = .; \
758 } 755 }
759 756
760 757
diff --git a/include/asm-generic/xor.h b/include/asm-generic/xor.h
index aaab875e1a3..6028fb86225 100644
--- a/include/asm-generic/xor.h
+++ b/include/asm-generic/xor.h
@@ -13,7 +13,7 @@
13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 13 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
14 */ 14 */
15 15
16#include <asm/processor.h> 16#include <linux/prefetch.h>
17 17
18static void 18static void
19xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2) 19xor_8regs_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)