aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/bootmem.h36
-rw-r--r--include/linux/dcbnl.h4
-rw-r--r--include/linux/decompress/bunzip2.h10
-rw-r--r--include/linux/decompress/generic.h33
-rw-r--r--include/linux/decompress/inflate.h13
-rw-r--r--include/linux/decompress/mm.h87
-rw-r--r--include/linux/decompress/unlzma.h12
-rw-r--r--include/linux/io-mapping.h5
-rw-r--r--include/linux/netfilter/xt_NFLOG.h2
-rw-r--r--include/linux/percpu.h102
-rw-r--r--include/linux/rcuclassic.h6
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/rcupreempt.h15
-rw-r--r--include/linux/rcutree.h6
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/uaccess.h4
-rw-r--r--include/linux/vmalloc.h4
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
19 files changed, 299 insertions, 51 deletions
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index b97cdc516a8f..106c3ba50844 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -52,6 +52,7 @@ header-y += const.h
52header-y += cgroupstats.h 52header-y += cgroupstats.h
53header-y += cramfs_fs.h 53header-y += cramfs_fs.h
54header-y += cycx_cfm.h 54header-y += cycx_cfm.h
55header-y += dcbnl.h
55header-y += dlmconstants.h 56header-y += dlmconstants.h
56header-y += dlm_device.h 57header-y += dlm_device.h
57header-y += dlm_netlink.h 58header-y += dlm_netlink.h
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 95837bfb5256..455d83219fae 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -65,23 +65,20 @@ extern void free_bootmem(unsigned long addr, unsigned long size);
65#define BOOTMEM_DEFAULT 0 65#define BOOTMEM_DEFAULT 0
66#define BOOTMEM_EXCLUSIVE (1<<0) 66#define BOOTMEM_EXCLUSIVE (1<<0)
67 67
68extern int reserve_bootmem(unsigned long addr,
69 unsigned long size,
70 int flags);
68extern int reserve_bootmem_node(pg_data_t *pgdat, 71extern int reserve_bootmem_node(pg_data_t *pgdat,
69 unsigned long physaddr, 72 unsigned long physaddr,
70 unsigned long size, 73 unsigned long size,
71 int flags); 74 int flags);
72#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE
73extern int reserve_bootmem(unsigned long addr, unsigned long size, int flags);
74#endif
75 75
76extern void *__alloc_bootmem_nopanic(unsigned long size, 76extern void *__alloc_bootmem(unsigned long size,
77 unsigned long align, 77 unsigned long align,
78 unsigned long goal); 78 unsigned long goal);
79extern void *__alloc_bootmem(unsigned long size, 79extern void *__alloc_bootmem_nopanic(unsigned long size,
80 unsigned long align, 80 unsigned long align,
81 unsigned long goal); 81 unsigned long goal);
82extern void *__alloc_bootmem_low(unsigned long size,
83 unsigned long align,
84 unsigned long goal);
85extern void *__alloc_bootmem_node(pg_data_t *pgdat, 82extern void *__alloc_bootmem_node(pg_data_t *pgdat,
86 unsigned long size, 83 unsigned long size,
87 unsigned long align, 84 unsigned long align,
@@ -90,30 +87,35 @@ extern void *__alloc_bootmem_node_nopanic(pg_data_t *pgdat,
90 unsigned long size, 87 unsigned long size,
91 unsigned long align, 88 unsigned long align,
92 unsigned long goal); 89 unsigned long goal);
90extern void *__alloc_bootmem_low(unsigned long size,
91 unsigned long align,
92 unsigned long goal);
93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat, 93extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
94 unsigned long size, 94 unsigned long size,
95 unsigned long align, 95 unsigned long align,
96 unsigned long goal); 96 unsigned long goal);
97#ifndef CONFIG_HAVE_ARCH_BOOTMEM_NODE 97
98#define alloc_bootmem(x) \ 98#define alloc_bootmem(x) \
99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 99 __alloc_bootmem(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
100#define alloc_bootmem_nopanic(x) \ 100#define alloc_bootmem_nopanic(x) \
101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 101 __alloc_bootmem_nopanic(x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
102#define alloc_bootmem_low(x) \
103 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
104#define alloc_bootmem_pages(x) \ 102#define alloc_bootmem_pages(x) \
105 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 103 __alloc_bootmem(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
106#define alloc_bootmem_pages_nopanic(x) \ 104#define alloc_bootmem_pages_nopanic(x) \
107 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 105 __alloc_bootmem_nopanic(x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
108#define alloc_bootmem_low_pages(x) \
109 __alloc_bootmem_low(x, PAGE_SIZE, 0)
110#define alloc_bootmem_node(pgdat, x) \ 106#define alloc_bootmem_node(pgdat, x) \
111 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) 107 __alloc_bootmem_node(pgdat, x, SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS))
112#define alloc_bootmem_pages_node(pgdat, x) \ 108#define alloc_bootmem_pages_node(pgdat, x) \
113 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) 109 __alloc_bootmem_node(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
110#define alloc_bootmem_pages_node_nopanic(pgdat, x) \
111 __alloc_bootmem_node_nopanic(pgdat, x, PAGE_SIZE, __pa(MAX_DMA_ADDRESS))
112
113#define alloc_bootmem_low(x) \
114 __alloc_bootmem_low(x, SMP_CACHE_BYTES, 0)
115#define alloc_bootmem_low_pages(x) \
116 __alloc_bootmem_low(x, PAGE_SIZE, 0)
114#define alloc_bootmem_low_pages_node(pgdat, x) \ 117#define alloc_bootmem_low_pages_node(pgdat, x) \
115 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0) 118 __alloc_bootmem_low_node(pgdat, x, PAGE_SIZE, 0)
116#endif /* !CONFIG_HAVE_ARCH_BOOTMEM_NODE */
117 119
118extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, 120extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
119 int flags); 121 int flags);
diff --git a/include/linux/dcbnl.h b/include/linux/dcbnl.h
index b0ef274e0031..7d2e10006188 100644
--- a/include/linux/dcbnl.h
+++ b/include/linux/dcbnl.h
@@ -20,10 +20,12 @@
20#ifndef __LINUX_DCBNL_H__ 20#ifndef __LINUX_DCBNL_H__
21#define __LINUX_DCBNL_H__ 21#define __LINUX_DCBNL_H__
22 22
23#include <linux/types.h>
24
23#define DCB_PROTO_VERSION 1 25#define DCB_PROTO_VERSION 1
24 26
25struct dcbmsg { 27struct dcbmsg {
26 unsigned char dcb_family; 28 __u8 dcb_family;
27 __u8 cmd; 29 __u8 cmd;
28 __u16 dcb_pad; 30 __u16 dcb_pad;
29}; 31};
diff --git a/include/linux/decompress/bunzip2.h b/include/linux/decompress/bunzip2.h
new file mode 100644
index 000000000000..115272137a9c
--- /dev/null
+++ b/include/linux/decompress/bunzip2.h
@@ -0,0 +1,10 @@
1#ifndef DECOMPRESS_BUNZIP2_H
2#define DECOMPRESS_BUNZIP2_H
3
4int bunzip2(unsigned char *inbuf, int len,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *pos,
9 void(*error)(char *x));
10#endif
diff --git a/include/linux/decompress/generic.h b/include/linux/decompress/generic.h
new file mode 100644
index 000000000000..6dfb856327bb
--- /dev/null
+++ b/include/linux/decompress/generic.h
@@ -0,0 +1,33 @@
1#ifndef DECOMPRESS_GENERIC_H
2#define DECOMPRESS_GENERIC_H
3
4/* Minimal chunksize to be read.
5 *Bzip2 prefers at least 4096
6 *Lzma prefers 0x10000 */
7#define COMPR_IOBUF_SIZE 4096
8
9typedef int (*decompress_fn) (unsigned char *inbuf, int len,
10 int(*fill)(void*, unsigned int),
11 int(*writebb)(void*, unsigned int),
12 unsigned char *output,
13 int *posp,
14 void(*error)(char *x));
15
16/* inbuf - input buffer
17 *len - len of pre-read data in inbuf
18 *fill - function to fill inbuf if empty
19 *writebb - function to write out outbug
20 *posp - if non-null, input position (number of bytes read) will be
21 * returned here
22 *
23 *If len != 0, the inbuf is initialized (with as much data), and fill
24 *should not be called
25 *If len = 0, the inbuf is allocated, but empty. Its size is IOBUF_SIZE
26 *fill should be called (repeatedly...) to read data, at most IOBUF_SIZE
27 */
28
29/* Utility routine to detect the decompression method */
30decompress_fn decompress_method(const unsigned char *inbuf, int len,
31 const char **name);
32
33#endif
diff --git a/include/linux/decompress/inflate.h b/include/linux/decompress/inflate.h
new file mode 100644
index 000000000000..f9b06ccc3e5c
--- /dev/null
+++ b/include/linux/decompress/inflate.h
@@ -0,0 +1,13 @@
1#ifndef INFLATE_H
2#define INFLATE_H
3
4/* Other housekeeping constants */
5#define INBUFSIZ 4096
6
7int gunzip(unsigned char *inbuf, int len,
8 int(*fill)(void*, unsigned int),
9 int(*flush)(void*, unsigned int),
10 unsigned char *output,
11 int *pos,
12 void(*error_fn)(char *x));
13#endif
diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
new file mode 100644
index 000000000000..12ff8c3f1d05
--- /dev/null
+++ b/include/linux/decompress/mm.h
@@ -0,0 +1,87 @@
1/*
2 * linux/compr_mm.h
3 *
4 * Memory management for pre-boot and ramdisk uncompressors
5 *
6 * Authors: Alain Knaff <alain@knaff.lu>
7 *
8 */
9
10#ifndef DECOMPR_MM_H
11#define DECOMPR_MM_H
12
13#ifdef STATIC
14
15/* Code active when included from pre-boot environment: */
16
17/* A trivial malloc implementation, adapted from
18 * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994
19 */
20static unsigned long malloc_ptr;
21static int malloc_count;
22
23static void *malloc(int size)
24{
25 void *p;
26
27 if (size < 0)
28 error("Malloc error");
29 if (!malloc_ptr)
30 malloc_ptr = free_mem_ptr;
31
32 malloc_ptr = (malloc_ptr + 3) & ~3; /* Align */
33
34 p = (void *)malloc_ptr;
35 malloc_ptr += size;
36
37 if (free_mem_end_ptr && malloc_ptr >= free_mem_end_ptr)
38 error("Out of memory");
39
40 malloc_count++;
41 return p;
42}
43
44static void free(void *where)
45{
46 malloc_count--;
47 if (!malloc_count)
48 malloc_ptr = free_mem_ptr;
49}
50
51#define large_malloc(a) malloc(a)
52#define large_free(a) free(a)
53
54#define set_error_fn(x)
55
56#define INIT
57
58#else /* STATIC */
59
60/* Code active when compiled standalone for use when loading ramdisk: */
61
62#include <linux/kernel.h>
63#include <linux/fs.h>
64#include <linux/string.h>
65#include <linux/vmalloc.h>
66
67/* Use defines rather than static inline in order to avoid spurious
68 * warnings when not needed (indeed large_malloc / large_free are not
69 * needed by inflate */
70
71#define malloc(a) kmalloc(a, GFP_KERNEL)
72#define free(a) kfree(a)
73
74#define large_malloc(a) vmalloc(a)
75#define large_free(a) vfree(a)
76
77static void(*error)(char *m);
78#define set_error_fn(x) error = x;
79
80#define INIT __init
81#define STATIC
82
83#include <linux/init.h>
84
85#endif /* STATIC */
86
87#endif /* DECOMPR_MM_H */
diff --git a/include/linux/decompress/unlzma.h b/include/linux/decompress/unlzma.h
new file mode 100644
index 000000000000..7796538f1bf4
--- /dev/null
+++ b/include/linux/decompress/unlzma.h
@@ -0,0 +1,12 @@
1#ifndef DECOMPRESS_UNLZMA_H
2#define DECOMPRESS_UNLZMA_H
3
4int unlzma(unsigned char *, int,
5 int(*fill)(void*, unsigned int),
6 int(*flush)(void*, unsigned int),
7 unsigned char *output,
8 int *posp,
9 void(*error)(char *x)
10 );
11
12#endif
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index cbc2f0cd631b..0adb0f91568c 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -91,8 +91,11 @@ io_mapping_unmap_atomic(void *vaddr)
91static inline void * 91static inline void *
92io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) 92io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset)
93{ 93{
94 resource_size_t phys_addr;
95
94 BUG_ON(offset >= mapping->size); 96 BUG_ON(offset >= mapping->size);
95 resource_size_t phys_addr = mapping->base + offset; 97 phys_addr = mapping->base + offset;
98
96 return ioremap_wc(phys_addr, PAGE_SIZE); 99 return ioremap_wc(phys_addr, PAGE_SIZE);
97} 100}
98 101
diff --git a/include/linux/netfilter/xt_NFLOG.h b/include/linux/netfilter/xt_NFLOG.h
index cdcd0ed58f7a..4b36aeb46a10 100644
--- a/include/linux/netfilter/xt_NFLOG.h
+++ b/include/linux/netfilter/xt_NFLOG.h
@@ -2,7 +2,7 @@
2#define _XT_NFLOG_TARGET 2#define _XT_NFLOG_TARGET
3 3
4#define XT_NFLOG_DEFAULT_GROUP 0x1 4#define XT_NFLOG_DEFAULT_GROUP 0x1
5#define XT_NFLOG_DEFAULT_THRESHOLD 1 5#define XT_NFLOG_DEFAULT_THRESHOLD 0
6 6
7#define XT_NFLOG_MASK 0x0 7#define XT_NFLOG_MASK 0x0
8 8
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 3577ffd90d45..545b068bcb70 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -76,52 +76,98 @@
76 76
77#ifdef CONFIG_SMP 77#ifdef CONFIG_SMP
78 78
79#ifdef CONFIG_HAVE_DYNAMIC_PER_CPU_AREA
80
81/* minimum unit size, also is the maximum supported allocation size */
82#define PCPU_MIN_UNIT_SIZE (16UL << PAGE_SHIFT)
83
84/*
85 * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
86 * back on the first chunk if arch is manually allocating and mapping
87 * it for faster access (as a part of large page mapping for example).
88 * Note that dynamic percpu allocator covers both static and dynamic
89 * areas, so these values are bigger than PERCPU_MODULE_RESERVE.
90 *
91 * On typical configuration with modules, the following values leave
92 * about 8k of free space on the first chunk after boot on both x86_32
93 * and 64 when module support is enabled. When module support is
94 * disabled, it's much tighter.
95 */
96#ifndef PERCPU_DYNAMIC_RESERVE
97# if BITS_PER_LONG > 32
98# ifdef CONFIG_MODULES
99# define PERCPU_DYNAMIC_RESERVE (6 << PAGE_SHIFT)
100# else
101# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
102# endif
103# else
104# ifdef CONFIG_MODULES
105# define PERCPU_DYNAMIC_RESERVE (4 << PAGE_SHIFT)
106# else
107# define PERCPU_DYNAMIC_RESERVE (2 << PAGE_SHIFT)
108# endif
109# endif
110#endif /* PERCPU_DYNAMIC_RESERVE */
111
112extern void *pcpu_base_addr;
113
114typedef struct page * (*pcpu_get_page_fn_t)(unsigned int cpu, int pageno);
115typedef void (*pcpu_populate_pte_fn_t)(unsigned long addr);
116
117extern size_t __init pcpu_setup_first_chunk(pcpu_get_page_fn_t get_page_fn,
118 size_t static_size, size_t unit_size,
119 size_t free_size, void *base_addr,
120 pcpu_populate_pte_fn_t populate_pte_fn);
121
122/*
123 * Use this to get to a cpu's version of the per-cpu object
124 * dynamically allocated. Non-atomic access to the current CPU's
125 * version should probably be combined with get_cpu()/put_cpu().
126 */
127#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
128
129#else /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
130
79struct percpu_data { 131struct percpu_data {
80 void *ptrs[1]; 132 void *ptrs[1];
81}; 133};
82 134
83#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) 135#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
84/* 136
85 * Use this to get to a cpu's version of the per-cpu object dynamically 137#define per_cpu_ptr(ptr, cpu) \
86 * allocated. Non-atomic access to the current CPU's version should 138({ \
87 * probably be combined with get_cpu()/put_cpu(). 139 struct percpu_data *__p = __percpu_disguise(ptr); \
88 */ 140 (__typeof__(ptr))__p->ptrs[(cpu)]; \
89#define percpu_ptr(ptr, cpu) \
90({ \
91 struct percpu_data *__p = __percpu_disguise(ptr); \
92 (__typeof__(ptr))__p->ptrs[(cpu)]; \
93}) 141})
94 142
95extern void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask); 143#endif /* CONFIG_HAVE_DYNAMIC_PER_CPU_AREA */
96extern void percpu_free(void *__pdata); 144
145extern void *__alloc_percpu(size_t size, size_t align);
146extern void free_percpu(void *__pdata);
97 147
98#else /* CONFIG_SMP */ 148#else /* CONFIG_SMP */
99 149
100#define percpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) 150#define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
101 151
102static __always_inline void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 152static inline void *__alloc_percpu(size_t size, size_t align)
103{ 153{
104 return kzalloc(size, gfp); 154 /*
155 * Can't easily make larger alignment work with kmalloc. WARN
156 * on it. Larger alignment should only be used for module
157 * percpu sections on SMP for which this path isn't used.
158 */
159 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
160 return kzalloc(size, GFP_KERNEL);
105} 161}
106 162
107static inline void percpu_free(void *__pdata) 163static inline void free_percpu(void *p)
108{ 164{
109 kfree(__pdata); 165 kfree(p);
110} 166}
111 167
112#endif /* CONFIG_SMP */ 168#endif /* CONFIG_SMP */
113 169
114#define percpu_alloc_mask(size, gfp, mask) \ 170#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type), \
115 __percpu_alloc_mask((size), (gfp), &(mask)) 171 __alignof__(type))
116
117#define percpu_alloc(size, gfp) percpu_alloc_mask((size), (gfp), cpu_online_map)
118
119/* (legacy) interface for use without CPU hotplug handling */
120
121#define __alloc_percpu(size) percpu_alloc_mask((size), GFP_KERNEL, \
122 cpu_possible_map)
123#define alloc_percpu(type) (type *)__alloc_percpu(sizeof(type))
124#define free_percpu(ptr) percpu_free((ptr))
125#define per_cpu_ptr(ptr, cpu) percpu_ptr((ptr), (cpu))
126 172
127#endif /* __LINUX_PERCPU_H */ 173#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h
index f3f697df1d71..80044a4f3ab9 100644
--- a/include/linux/rcuclassic.h
+++ b/include/linux/rcuclassic.h
@@ -181,4 +181,10 @@ extern long rcu_batches_completed_bh(void);
181#define rcu_enter_nohz() do { } while (0) 181#define rcu_enter_nohz() do { } while (0)
182#define rcu_exit_nohz() do { } while (0) 182#define rcu_exit_nohz() do { } while (0)
183 183
184/* A context switch is a grace period for rcuclassic. */
185static inline int rcu_blocking_is_gp(void)
186{
187 return num_online_cpus() == 1;
188}
189
184#endif /* __LINUX_RCUCLASSIC_H */ 190#endif /* __LINUX_RCUCLASSIC_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 921340a7b71c..528343e6da51 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -52,6 +52,9 @@ struct rcu_head {
52 void (*func)(struct rcu_head *head); 52 void (*func)(struct rcu_head *head);
53}; 53};
54 54
55/* Internal to kernel, but needed by rcupreempt.h. */
56extern int rcu_scheduler_active;
57
55#if defined(CONFIG_CLASSIC_RCU) 58#if defined(CONFIG_CLASSIC_RCU)
56#include <linux/rcuclassic.h> 59#include <linux/rcuclassic.h>
57#elif defined(CONFIG_TREE_RCU) 60#elif defined(CONFIG_TREE_RCU)
@@ -265,6 +268,7 @@ extern void rcu_barrier_sched(void);
265 268
266/* Internal to kernel */ 269/* Internal to kernel */
267extern void rcu_init(void); 270extern void rcu_init(void);
271extern void rcu_scheduler_starting(void);
268extern int rcu_needs_cpu(int cpu); 272extern int rcu_needs_cpu(int cpu);
269 273
270#endif /* __LINUX_RCUPDATE_H */ 274#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h
index 3e05c09b54a2..74304b4538d8 100644
--- a/include/linux/rcupreempt.h
+++ b/include/linux/rcupreempt.h
@@ -142,4 +142,19 @@ static inline void rcu_exit_nohz(void)
142#define rcu_exit_nohz() do { } while (0) 142#define rcu_exit_nohz() do { } while (0)
143#endif /* CONFIG_NO_HZ */ 143#endif /* CONFIG_NO_HZ */
144 144
145/*
146 * A context switch is a grace period for rcupreempt synchronize_rcu()
147 * only during early boot, before the scheduler has been initialized.
148 * So, how the heck do we get a context switch? Well, if the caller
149 * invokes synchronize_rcu(), they are willing to accept a context
150 * switch, so we simply pretend that one happened.
151 *
152 * After boot, there might be a blocked or preempted task in an RCU
153 * read-side critical section, so we cannot then take the fastpath.
154 */
155static inline int rcu_blocking_is_gp(void)
156{
157 return num_online_cpus() == 1 && !rcu_scheduler_active;
158}
159
145#endif /* __LINUX_RCUPREEMPT_H */ 160#endif /* __LINUX_RCUPREEMPT_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index d4368b7975c3..a722fb67bb2d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -326,4 +326,10 @@ static inline void rcu_exit_nohz(void)
326} 326}
327#endif /* CONFIG_NO_HZ */ 327#endif /* CONFIG_NO_HZ */
328 328
329/* A context switch is a grace period for rcutree. */
330static inline int rcu_blocking_is_gp(void)
331{
332 return num_online_cpus() == 1;
333}
334
329#endif /* __LINUX_RCUTREE_H */ 335#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f0a50b20e8a0..a7c7698583bb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2303,9 +2303,13 @@ extern long sched_group_rt_runtime(struct task_group *tg);
2303extern int sched_group_set_rt_period(struct task_group *tg, 2303extern int sched_group_set_rt_period(struct task_group *tg,
2304 long rt_period_us); 2304 long rt_period_us);
2305extern long sched_group_rt_period(struct task_group *tg); 2305extern long sched_group_rt_period(struct task_group *tg);
2306extern int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk);
2306#endif 2307#endif
2307#endif 2308#endif
2308 2309
2310extern int task_can_switch_user(struct user_struct *up,
2311 struct task_struct *tsk);
2312
2309#ifdef CONFIG_TASK_XACCT 2313#ifdef CONFIG_TASK_XACCT
2310static inline void add_rchar(struct task_struct *tsk, ssize_t amt) 2314static inline void add_rchar(struct task_struct *tsk, ssize_t amt)
2311{ 2315{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index 6f3c603b0d67..6b58367d145e 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -41,13 +41,13 @@ static inline void pagefault_enable(void)
41#ifndef ARCH_HAS_NOCACHE_UACCESS 41#ifndef ARCH_HAS_NOCACHE_UACCESS
42 42
43static inline unsigned long __copy_from_user_inatomic_nocache(void *to, 43static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
44 const void __user *from, unsigned long n, unsigned long total) 44 const void __user *from, unsigned long n)
45{ 45{
46 return __copy_from_user_inatomic(to, from, n); 46 return __copy_from_user_inatomic(to, from, n);
47} 47}
48 48
49static inline unsigned long __copy_from_user_nocache(void *to, 49static inline unsigned long __copy_from_user_nocache(void *to,
50 const void __user *from, unsigned long n, unsigned long total) 50 const void __user *from, unsigned long n)
51{ 51{
52 return __copy_from_user(to, from, n); 52 return __copy_from_user(to, from, n);
53} 53}
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 9c0890c7a06a..a43ebec3a7b9 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -95,6 +95,9 @@ extern struct vm_struct *remove_vm_area(const void *addr);
95 95
96extern int map_vm_area(struct vm_struct *area, pgprot_t prot, 96extern int map_vm_area(struct vm_struct *area, pgprot_t prot,
97 struct page ***pages); 97 struct page ***pages);
98extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
99 pgprot_t prot, struct page **pages);
100extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
98extern void unmap_kernel_range(unsigned long addr, unsigned long size); 101extern void unmap_kernel_range(unsigned long addr, unsigned long size);
99 102
100/* Allocate/destroy a 'vmalloc' VM area. */ 103/* Allocate/destroy a 'vmalloc' VM area. */
@@ -110,5 +113,6 @@ extern long vwrite(char *buf, char *addr, unsigned long count);
110 */ 113 */
111extern rwlock_t vmlist_lock; 114extern rwlock_t vmlist_lock;
112extern struct vm_struct *vmlist; 115extern struct vm_struct *vmlist;
116extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
113 117
114#endif /* _LINUX_VMALLOC_H */ 118#endif /* _LINUX_VMALLOC_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index e78afe7f28e3..c25068e38516 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -59,7 +59,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
59 struct nf_conn *ct = (struct nf_conn *)skb->nfct; 59 struct nf_conn *ct = (struct nf_conn *)skb->nfct;
60 int ret = NF_ACCEPT; 60 int ret = NF_ACCEPT;
61 61
62 if (ct) { 62 if (ct && ct != &nf_conntrack_untracked) {
63 if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct)) 63 if (!nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
64 ret = __nf_conntrack_confirm(skb); 64 ret = __nf_conntrack_confirm(skb);
65 nf_ct_deliver_cached_events(ct); 65 nf_ct_deliver_cached_events(ct);