diff options
Diffstat (limited to 'include')
358 files changed, 9922 insertions, 2922 deletions
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index baacd98e7cc6..4de84ce3a927 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h | |||
@@ -377,9 +377,6 @@ struct acpi_pci_root { | |||
377 | 377 | ||
378 | u32 osc_support_set; /* _OSC state of support bits */ | 378 | u32 osc_support_set; /* _OSC state of support bits */ |
379 | u32 osc_control_set; /* _OSC state of control bits */ | 379 | u32 osc_control_set; /* _OSC state of control bits */ |
380 | u32 osc_control_qry; /* the latest _OSC query result */ | ||
381 | |||
382 | u32 osc_queried:1; /* has _OSC control been queried? */ | ||
383 | }; | 380 | }; |
384 | 381 | ||
385 | /* helper */ | 382 | /* helper */ |
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h index c0786d446a00..984cdc62e30b 100644 --- a/include/acpi/acpixf.h +++ b/include/acpi/acpixf.h | |||
@@ -55,7 +55,7 @@ | |||
55 | extern u8 acpi_gbl_permanent_mmap; | 55 | extern u8 acpi_gbl_permanent_mmap; |
56 | 56 | ||
57 | /* | 57 | /* |
58 | * Globals that are publically available, allowing for | 58 | * Globals that are publicly available, allowing for |
59 | * run time configuration | 59 | * run time configuration |
60 | */ | 60 | */ |
61 | extern u32 acpi_dbg_level; | 61 | extern u32 acpi_dbg_level; |
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h index e53347fbf1da..e994197f84b7 100644 --- a/include/asm-generic/atomic.h +++ b/include/asm-generic/atomic.h | |||
@@ -43,6 +43,7 @@ | |||
43 | */ | 43 | */ |
44 | #define atomic_set(v, i) (((v)->counter) = (i)) | 44 | #define atomic_set(v, i) (((v)->counter) = (i)) |
45 | 45 | ||
46 | #include <linux/irqflags.h> | ||
46 | #include <asm/system.h> | 47 | #include <asm/system.h> |
47 | 48 | ||
48 | /** | 49 | /** |
@@ -57,7 +58,7 @@ static inline int atomic_add_return(int i, atomic_t *v) | |||
57 | unsigned long flags; | 58 | unsigned long flags; |
58 | int temp; | 59 | int temp; |
59 | 60 | ||
60 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | 61 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
61 | temp = v->counter; | 62 | temp = v->counter; |
62 | temp += i; | 63 | temp += i; |
63 | v->counter = temp; | 64 | v->counter = temp; |
@@ -78,7 +79,7 @@ static inline int atomic_sub_return(int i, atomic_t *v) | |||
78 | unsigned long flags; | 79 | unsigned long flags; |
79 | int temp; | 80 | int temp; |
80 | 81 | ||
81 | raw_local_irq_save(flags); /* Don't trace it in a irqsoff handler */ | 82 | raw_local_irq_save(flags); /* Don't trace it in an irqsoff handler */ |
82 | temp = v->counter; | 83 | temp = v->counter; |
83 | temp -= i; | 84 | temp -= i; |
84 | v->counter = temp; | 85 | v->counter = temp; |
@@ -119,14 +120,23 @@ static inline void atomic_dec(atomic_t *v) | |||
119 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) | 120 | #define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0) |
120 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) | 121 | #define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0) |
121 | 122 | ||
122 | #define atomic_add_unless(v, a, u) \ | 123 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) |
123 | ({ \ | 124 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) |
124 | int c, old; \ | 125 | |
125 | c = atomic_read(v); \ | 126 | #define cmpxchg_local(ptr, o, n) \ |
126 | while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ | 127 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ |
127 | c = old; \ | 128 | (unsigned long)(n), sizeof(*(ptr)))) |
128 | c != (u); \ | 129 | |
129 | }) | 130 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) |
131 | |||
132 | static inline int atomic_add_unless(atomic_t *v, int a, int u) | ||
133 | { | ||
134 | int c, old; | ||
135 | c = atomic_read(v); | ||
136 | while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c) | ||
137 | c = old; | ||
138 | return c != u; | ||
139 | } | ||
130 | 140 | ||
131 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | 141 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) |
132 | 142 | ||
@@ -140,15 +150,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) | |||
140 | raw_local_irq_restore(flags); | 150 | raw_local_irq_restore(flags); |
141 | } | 151 | } |
142 | 152 | ||
143 | #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) | ||
144 | #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new))) | ||
145 | |||
146 | #define cmpxchg_local(ptr, o, n) \ | ||
147 | ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\ | ||
148 | (unsigned long)(n), sizeof(*(ptr)))) | ||
149 | |||
150 | #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) | ||
151 | |||
152 | /* Assume that atomic operations are already serializing */ | 153 | /* Assume that atomic operations are already serializing */ |
153 | #define smp_mb__before_atomic_dec() barrier() | 154 | #define smp_mb__before_atomic_dec() barrier() |
154 | #define smp_mb__after_atomic_dec() barrier() | 155 | #define smp_mb__after_atomic_dec() barrier() |
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 1914e9742512..110fa700f853 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h | |||
@@ -1,15 +1,50 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ |
2 | #define _ASM_GENERIC_BITOPS_FIND_H_ | 2 | #define _ASM_GENERIC_BITOPS_FIND_H_ |
3 | 3 | ||
4 | #ifndef CONFIG_GENERIC_FIND_NEXT_BIT | 4 | /** |
5 | * find_next_bit - find the next set bit in a memory region | ||
6 | * @addr: The address to base the search on | ||
7 | * @offset: The bitnumber to start searching at | ||
8 | * @size: The bitmap size in bits | ||
9 | */ | ||
5 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | 10 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long |
6 | size, unsigned long offset); | 11 | size, unsigned long offset); |
7 | 12 | ||
13 | /** | ||
14 | * find_next_zero_bit - find the next cleared bit in a memory region | ||
15 | * @addr: The address to base the search on | ||
16 | * @offset: The bitnumber to start searching at | ||
17 | * @size: The bitmap size in bits | ||
18 | */ | ||
8 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | 19 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned |
9 | long size, unsigned long offset); | 20 | long size, unsigned long offset); |
10 | #endif | 21 | |
22 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
23 | |||
24 | /** | ||
25 | * find_first_bit - find the first set bit in a memory region | ||
26 | * @addr: The address to start the search at | ||
27 | * @size: The maximum size to search | ||
28 | * | ||
29 | * Returns the bit number of the first set bit. | ||
30 | */ | ||
31 | extern unsigned long find_first_bit(const unsigned long *addr, | ||
32 | unsigned long size); | ||
33 | |||
34 | /** | ||
35 | * find_first_zero_bit - find the first cleared bit in a memory region | ||
36 | * @addr: The address to start the search at | ||
37 | * @size: The maximum size to search | ||
38 | * | ||
39 | * Returns the bit number of the first cleared bit. | ||
40 | */ | ||
41 | extern unsigned long find_first_zero_bit(const unsigned long *addr, | ||
42 | unsigned long size); | ||
43 | #else /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
11 | 44 | ||
12 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) | 45 | #define find_first_bit(addr, size) find_next_bit((addr), (size), 0) |
13 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) | 46 | #define find_first_zero_bit(addr, size) find_next_zero_bit((addr), (size), 0) |
14 | 47 | ||
48 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
49 | |||
15 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ | 50 | #endif /*_ASM_GENERIC_BITOPS_FIND_H_ */ |
diff --git a/include/asm-generic/cmpxchg-local.h b/include/asm-generic/cmpxchg-local.h index b2ba2fc8829a..2533fddd34a6 100644 --- a/include/asm-generic/cmpxchg-local.h +++ b/include/asm-generic/cmpxchg-local.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H | 2 | #define __ASM_GENERIC_CMPXCHG_LOCAL_H |
3 | 3 | ||
4 | #include <linux/types.h> | 4 | #include <linux/types.h> |
5 | #include <linux/irqflags.h> | ||
5 | 6 | ||
6 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); | 7 | extern unsigned long wrong_size_cmpxchg(volatile void *ptr); |
7 | 8 | ||
diff --git a/include/asm-generic/fcntl.h b/include/asm-generic/fcntl.h index a70b2d2bfc14..0fc16e3f0bfc 100644 --- a/include/asm-generic/fcntl.h +++ b/include/asm-generic/fcntl.h | |||
@@ -122,7 +122,7 @@ | |||
122 | 122 | ||
123 | struct f_owner_ex { | 123 | struct f_owner_ex { |
124 | int type; | 124 | int type; |
125 | pid_t pid; | 125 | __kernel_pid_t pid; |
126 | }; | 126 | }; |
127 | 127 | ||
128 | /* for F_[GET|SET]FL */ | 128 | /* for F_[GET|SET]FL */ |
diff --git a/include/asm-generic/gpio.h b/include/asm-generic/gpio.h index c7376bf80b06..8ca18e26d7e3 100644 --- a/include/asm-generic/gpio.h +++ b/include/asm-generic/gpio.h | |||
@@ -16,15 +16,27 @@ | |||
16 | * While the GPIO programming interface defines valid GPIO numbers | 16 | * While the GPIO programming interface defines valid GPIO numbers |
17 | * to be in the range 0..MAX_INT, this library restricts them to the | 17 | * to be in the range 0..MAX_INT, this library restricts them to the |
18 | * smaller range 0..ARCH_NR_GPIOS-1. | 18 | * smaller range 0..ARCH_NR_GPIOS-1. |
19 | * | ||
20 | * ARCH_NR_GPIOS is somewhat arbitrary; it usually reflects the sum of | ||
21 | * builtin/SoC GPIOs plus a number of GPIOs on expanders; the latter is | ||
22 | * actually an estimate of a board-specific value. | ||
19 | */ | 23 | */ |
20 | 24 | ||
21 | #ifndef ARCH_NR_GPIOS | 25 | #ifndef ARCH_NR_GPIOS |
22 | #define ARCH_NR_GPIOS 256 | 26 | #define ARCH_NR_GPIOS 256 |
23 | #endif | 27 | #endif |
24 | 28 | ||
29 | /* | ||
30 | * "valid" GPIO numbers are nonnegative and may be passed to | ||
31 | * setup routines like gpio_request(). only some valid numbers | ||
32 | * can successfully be requested and used. | ||
33 | * | ||
34 | * Invalid GPIO numbers are useful for indicating no-such-GPIO in | ||
35 | * platform data and other tables. | ||
36 | */ | ||
37 | |||
25 | static inline int gpio_is_valid(int number) | 38 | static inline int gpio_is_valid(int number) |
26 | { | 39 | { |
27 | /* only some non-negative numbers are valid */ | ||
28 | return ((unsigned)number) < ARCH_NR_GPIOS; | 40 | return ((unsigned)number) < ARCH_NR_GPIOS; |
29 | } | 41 | } |
30 | 42 | ||
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h index 62f59080e5cc..04d0a977cd43 100644 --- a/include/asm-generic/hardirq.h +++ b/include/asm-generic/hardirq.h | |||
@@ -3,13 +3,13 @@ | |||
3 | 3 | ||
4 | #include <linux/cache.h> | 4 | #include <linux/cache.h> |
5 | #include <linux/threads.h> | 5 | #include <linux/threads.h> |
6 | #include <linux/irq.h> | ||
7 | 6 | ||
8 | typedef struct { | 7 | typedef struct { |
9 | unsigned int __softirq_pending; | 8 | unsigned int __softirq_pending; |
10 | } ____cacheline_aligned irq_cpustat_t; | 9 | } ____cacheline_aligned irq_cpustat_t; |
11 | 10 | ||
12 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ | 11 | #include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ |
12 | #include <linux/irq.h> | ||
13 | 13 | ||
14 | #ifndef ack_bad_irq | 14 | #ifndef ack_bad_irq |
15 | static inline void ack_bad_irq(unsigned int irq) | 15 | static inline void ack_bad_irq(unsigned int irq) |
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h index 118601fce92d..3577ca11a0be 100644 --- a/include/asm-generic/io.h +++ b/include/asm-generic/io.h | |||
@@ -19,7 +19,9 @@ | |||
19 | #include <asm-generic/iomap.h> | 19 | #include <asm-generic/iomap.h> |
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | #ifndef mmiowb | ||
22 | #define mmiowb() do {} while (0) | 23 | #define mmiowb() do {} while (0) |
24 | #endif | ||
23 | 25 | ||
24 | /*****************************************************************************/ | 26 | /*****************************************************************************/ |
25 | /* | 27 | /* |
@@ -28,39 +30,51 @@ | |||
28 | * differently. On the simple architectures, we just read/write the | 30 | * differently. On the simple architectures, we just read/write the |
29 | * memory location directly. | 31 | * memory location directly. |
30 | */ | 32 | */ |
33 | #ifndef __raw_readb | ||
31 | static inline u8 __raw_readb(const volatile void __iomem *addr) | 34 | static inline u8 __raw_readb(const volatile void __iomem *addr) |
32 | { | 35 | { |
33 | return *(const volatile u8 __force *) addr; | 36 | return *(const volatile u8 __force *) addr; |
34 | } | 37 | } |
38 | #endif | ||
35 | 39 | ||
40 | #ifndef __raw_readw | ||
36 | static inline u16 __raw_readw(const volatile void __iomem *addr) | 41 | static inline u16 __raw_readw(const volatile void __iomem *addr) |
37 | { | 42 | { |
38 | return *(const volatile u16 __force *) addr; | 43 | return *(const volatile u16 __force *) addr; |
39 | } | 44 | } |
45 | #endif | ||
40 | 46 | ||
47 | #ifndef __raw_readl | ||
41 | static inline u32 __raw_readl(const volatile void __iomem *addr) | 48 | static inline u32 __raw_readl(const volatile void __iomem *addr) |
42 | { | 49 | { |
43 | return *(const volatile u32 __force *) addr; | 50 | return *(const volatile u32 __force *) addr; |
44 | } | 51 | } |
52 | #endif | ||
45 | 53 | ||
46 | #define readb __raw_readb | 54 | #define readb __raw_readb |
47 | #define readw(addr) __le16_to_cpu(__raw_readw(addr)) | 55 | #define readw(addr) __le16_to_cpu(__raw_readw(addr)) |
48 | #define readl(addr) __le32_to_cpu(__raw_readl(addr)) | 56 | #define readl(addr) __le32_to_cpu(__raw_readl(addr)) |
49 | 57 | ||
58 | #ifndef __raw_writeb | ||
50 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) | 59 | static inline void __raw_writeb(u8 b, volatile void __iomem *addr) |
51 | { | 60 | { |
52 | *(volatile u8 __force *) addr = b; | 61 | *(volatile u8 __force *) addr = b; |
53 | } | 62 | } |
63 | #endif | ||
54 | 64 | ||
65 | #ifndef __raw_writew | ||
55 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) | 66 | static inline void __raw_writew(u16 b, volatile void __iomem *addr) |
56 | { | 67 | { |
57 | *(volatile u16 __force *) addr = b; | 68 | *(volatile u16 __force *) addr = b; |
58 | } | 69 | } |
70 | #endif | ||
59 | 71 | ||
72 | #ifndef __raw_writel | ||
60 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) | 73 | static inline void __raw_writel(u32 b, volatile void __iomem *addr) |
61 | { | 74 | { |
62 | *(volatile u32 __force *) addr = b; | 75 | *(volatile u32 __force *) addr = b; |
63 | } | 76 | } |
77 | #endif | ||
64 | 78 | ||
65 | #define writeb __raw_writeb | 79 | #define writeb __raw_writeb |
66 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) | 80 | #define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr) |
@@ -122,6 +136,7 @@ static inline void outl(u32 b, unsigned long addr) | |||
122 | #define outw_p(x, addr) outw((x), (addr)) | 136 | #define outw_p(x, addr) outw((x), (addr)) |
123 | #define outl_p(x, addr) outl((x), (addr)) | 137 | #define outl_p(x, addr) outl((x), (addr)) |
124 | 138 | ||
139 | #ifndef insb | ||
125 | static inline void insb(unsigned long addr, void *buffer, int count) | 140 | static inline void insb(unsigned long addr, void *buffer, int count) |
126 | { | 141 | { |
127 | if (count) { | 142 | if (count) { |
@@ -132,7 +147,9 @@ static inline void insb(unsigned long addr, void *buffer, int count) | |||
132 | } while (--count); | 147 | } while (--count); |
133 | } | 148 | } |
134 | } | 149 | } |
150 | #endif | ||
135 | 151 | ||
152 | #ifndef insw | ||
136 | static inline void insw(unsigned long addr, void *buffer, int count) | 153 | static inline void insw(unsigned long addr, void *buffer, int count) |
137 | { | 154 | { |
138 | if (count) { | 155 | if (count) { |
@@ -143,7 +160,9 @@ static inline void insw(unsigned long addr, void *buffer, int count) | |||
143 | } while (--count); | 160 | } while (--count); |
144 | } | 161 | } |
145 | } | 162 | } |
163 | #endif | ||
146 | 164 | ||
165 | #ifndef insl | ||
147 | static inline void insl(unsigned long addr, void *buffer, int count) | 166 | static inline void insl(unsigned long addr, void *buffer, int count) |
148 | { | 167 | { |
149 | if (count) { | 168 | if (count) { |
@@ -154,7 +173,9 @@ static inline void insl(unsigned long addr, void *buffer, int count) | |||
154 | } while (--count); | 173 | } while (--count); |
155 | } | 174 | } |
156 | } | 175 | } |
176 | #endif | ||
157 | 177 | ||
178 | #ifndef outsb | ||
158 | static inline void outsb(unsigned long addr, const void *buffer, int count) | 179 | static inline void outsb(unsigned long addr, const void *buffer, int count) |
159 | { | 180 | { |
160 | if (count) { | 181 | if (count) { |
@@ -164,7 +185,9 @@ static inline void outsb(unsigned long addr, const void *buffer, int count) | |||
164 | } while (--count); | 185 | } while (--count); |
165 | } | 186 | } |
166 | } | 187 | } |
188 | #endif | ||
167 | 189 | ||
190 | #ifndef outsw | ||
168 | static inline void outsw(unsigned long addr, const void *buffer, int count) | 191 | static inline void outsw(unsigned long addr, const void *buffer, int count) |
169 | { | 192 | { |
170 | if (count) { | 193 | if (count) { |
@@ -174,7 +197,9 @@ static inline void outsw(unsigned long addr, const void *buffer, int count) | |||
174 | } while (--count); | 197 | } while (--count); |
175 | } | 198 | } |
176 | } | 199 | } |
200 | #endif | ||
177 | 201 | ||
202 | #ifndef outsl | ||
178 | static inline void outsl(unsigned long addr, const void *buffer, int count) | 203 | static inline void outsl(unsigned long addr, const void *buffer, int count) |
179 | { | 204 | { |
180 | if (count) { | 205 | if (count) { |
@@ -184,6 +209,7 @@ static inline void outsl(unsigned long addr, const void *buffer, int count) | |||
184 | } while (--count); | 209 | } while (--count); |
185 | } | 210 | } |
186 | } | 211 | } |
212 | #endif | ||
187 | 213 | ||
188 | #ifndef CONFIG_GENERIC_IOMAP | 214 | #ifndef CONFIG_GENERIC_IOMAP |
189 | #define ioread8(addr) readb(addr) | 215 | #define ioread8(addr) readb(addr) |
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h index 8554cb6a81b9..a3216655d657 100644 --- a/include/asm-generic/ioctls.h +++ b/include/asm-generic/ioctls.h | |||
@@ -62,7 +62,9 @@ | |||
62 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) | 62 | #define TCSETSW2 _IOW('T', 0x2C, struct termios2) |
63 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) | 63 | #define TCSETSF2 _IOW('T', 0x2D, struct termios2) |
64 | #define TIOCGRS485 0x542E | 64 | #define TIOCGRS485 0x542E |
65 | #ifndef TIOCSRS485 | ||
65 | #define TIOCSRS485 0x542F | 66 | #define TIOCSRS485 0x542F |
67 | #endif | ||
66 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 68 | #define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
67 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ | 69 | #define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */ |
68 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ | 70 | #define TCGETX 0x5432 /* SYS5 TCGETX compatibility */ |
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h index 9aebf618275a..1f40d0024cf3 100644 --- a/include/asm-generic/irqflags.h +++ b/include/asm-generic/irqflags.h | |||
@@ -5,68 +5,62 @@ | |||
5 | * All architectures should implement at least the first two functions, | 5 | * All architectures should implement at least the first two functions, |
6 | * usually inline assembly will be the best way. | 6 | * usually inline assembly will be the best way. |
7 | */ | 7 | */ |
8 | #ifndef RAW_IRQ_DISABLED | 8 | #ifndef ARCH_IRQ_DISABLED |
9 | #define RAW_IRQ_DISABLED 0 | 9 | #define ARCH_IRQ_DISABLED 0 |
10 | #define RAW_IRQ_ENABLED 1 | 10 | #define ARCH_IRQ_ENABLED 1 |
11 | #endif | 11 | #endif |
12 | 12 | ||
13 | /* read interrupt enabled status */ | 13 | /* read interrupt enabled status */ |
14 | #ifndef __raw_local_save_flags | 14 | #ifndef arch_local_save_flags |
15 | unsigned long __raw_local_save_flags(void); | 15 | unsigned long arch_local_save_flags(void); |
16 | #endif | 16 | #endif |
17 | 17 | ||
18 | /* set interrupt enabled status */ | 18 | /* set interrupt enabled status */ |
19 | #ifndef raw_local_irq_restore | 19 | #ifndef arch_local_irq_restore |
20 | void raw_local_irq_restore(unsigned long flags); | 20 | void arch_local_irq_restore(unsigned long flags); |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* get status and disable interrupts */ | 23 | /* get status and disable interrupts */ |
24 | #ifndef __raw_local_irq_save | 24 | #ifndef arch_local_irq_save |
25 | static inline unsigned long __raw_local_irq_save(void) | 25 | static inline unsigned long arch_local_irq_save(void) |
26 | { | 26 | { |
27 | unsigned long flags; | 27 | unsigned long flags; |
28 | flags = __raw_local_save_flags(); | 28 | flags = arch_local_save_flags(); |
29 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 29 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
30 | return flags; | 30 | return flags; |
31 | } | 31 | } |
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | /* test flags */ | 34 | /* test flags */ |
35 | #ifndef raw_irqs_disabled_flags | 35 | #ifndef arch_irqs_disabled_flags |
36 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 36 | static inline int arch_irqs_disabled_flags(unsigned long flags) |
37 | { | 37 | { |
38 | return flags == RAW_IRQ_DISABLED; | 38 | return flags == ARCH_IRQ_DISABLED; |
39 | } | 39 | } |
40 | #endif | 40 | #endif |
41 | 41 | ||
42 | /* unconditionally enable interrupts */ | 42 | /* unconditionally enable interrupts */ |
43 | #ifndef raw_local_irq_enable | 43 | #ifndef arch_local_irq_enable |
44 | static inline void raw_local_irq_enable(void) | 44 | static inline void arch_local_irq_enable(void) |
45 | { | 45 | { |
46 | raw_local_irq_restore(RAW_IRQ_ENABLED); | 46 | arch_local_irq_restore(ARCH_IRQ_ENABLED); |
47 | } | 47 | } |
48 | #endif | 48 | #endif |
49 | 49 | ||
50 | /* unconditionally disable interrupts */ | 50 | /* unconditionally disable interrupts */ |
51 | #ifndef raw_local_irq_disable | 51 | #ifndef arch_local_irq_disable |
52 | static inline void raw_local_irq_disable(void) | 52 | static inline void arch_local_irq_disable(void) |
53 | { | 53 | { |
54 | raw_local_irq_restore(RAW_IRQ_DISABLED); | 54 | arch_local_irq_restore(ARCH_IRQ_DISABLED); |
55 | } | 55 | } |
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | /* test hardware interrupt enable bit */ | 58 | /* test hardware interrupt enable bit */ |
59 | #ifndef raw_irqs_disabled | 59 | #ifndef arch_irqs_disabled |
60 | static inline int raw_irqs_disabled(void) | 60 | static inline int arch_irqs_disabled(void) |
61 | { | 61 | { |
62 | return raw_irqs_disabled_flags(__raw_local_save_flags()); | 62 | return arch_irqs_disabled_flags(arch_local_save_flags()); |
63 | } | 63 | } |
64 | #endif | 64 | #endif |
65 | 65 | ||
66 | #define raw_local_save_flags(flags) \ | ||
67 | do { (flags) = __raw_local_save_flags(); } while (0) | ||
68 | |||
69 | #define raw_local_irq_save(flags) \ | ||
70 | do { (flags) = __raw_local_irq_save(); } while (0) | ||
71 | |||
72 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ | 66 | #endif /* __ASM_GENERIC_IRQFLAGS_H */ |
diff --git a/include/asm-generic/kdebug.h b/include/asm-generic/kdebug.h index 11e57b6a85fc..d1814497bcdb 100644 --- a/include/asm-generic/kdebug.h +++ b/include/asm-generic/kdebug.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | enum die_val { | 4 | enum die_val { |
5 | DIE_UNUSED, | 5 | DIE_UNUSED, |
6 | DIE_OOPS=1 | 6 | DIE_OOPS = 1, |
7 | }; | 7 | }; |
8 | 8 | ||
9 | #endif /* _ASM_GENERIC_KDEBUG_H */ | 9 | #endif /* _ASM_GENERIC_KDEBUG_H */ |
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h index b5043a9890d8..d17784ea37ff 100644 --- a/include/asm-generic/percpu.h +++ b/include/asm-generic/percpu.h | |||
@@ -55,14 +55,18 @@ extern unsigned long __per_cpu_offset[NR_CPUS]; | |||
55 | */ | 55 | */ |
56 | #define per_cpu(var, cpu) \ | 56 | #define per_cpu(var, cpu) \ |
57 | (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) | 57 | (*SHIFT_PERCPU_PTR(&(var), per_cpu_offset(cpu))) |
58 | #define __get_cpu_var(var) \ | ||
59 | (*SHIFT_PERCPU_PTR(&(var), my_cpu_offset)) | ||
60 | #define __raw_get_cpu_var(var) \ | ||
61 | (*SHIFT_PERCPU_PTR(&(var), __my_cpu_offset)) | ||
62 | 58 | ||
63 | #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) | 59 | #ifndef __this_cpu_ptr |
64 | #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) | 60 | #define __this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, __my_cpu_offset) |
61 | #endif | ||
62 | #ifdef CONFIG_DEBUG_PREEMPT | ||
63 | #define this_cpu_ptr(ptr) SHIFT_PERCPU_PTR(ptr, my_cpu_offset) | ||
64 | #else | ||
65 | #define this_cpu_ptr(ptr) __this_cpu_ptr(ptr) | ||
66 | #endif | ||
65 | 67 | ||
68 | #define __get_cpu_var(var) (*this_cpu_ptr(&(var))) | ||
69 | #define __raw_get_cpu_var(var) (*__this_cpu_ptr(&(var))) | ||
66 | 70 | ||
67 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA | 71 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
68 | extern void setup_per_cpu_areas(void); | 72 | extern void setup_per_cpu_areas(void); |
@@ -70,11 +74,16 @@ extern void setup_per_cpu_areas(void); | |||
70 | 74 | ||
71 | #else /* ! SMP */ | 75 | #else /* ! SMP */ |
72 | 76 | ||
73 | #define per_cpu(var, cpu) (*((void)(cpu), &(var))) | 77 | #define VERIFY_PERCPU_PTR(__p) ({ \ |
74 | #define __get_cpu_var(var) (var) | 78 | __verify_pcpu_ptr((__p)); \ |
75 | #define __raw_get_cpu_var(var) (var) | 79 | (typeof(*(__p)) __kernel __force *)(__p); \ |
76 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | 80 | }) |
77 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | 81 | |
82 | #define per_cpu(var, cpu) (*((void)(cpu), VERIFY_PERCPU_PTR(&(var)))) | ||
83 | #define __get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
84 | #define __raw_get_cpu_var(var) (*VERIFY_PERCPU_PTR(&(var))) | ||
85 | #define this_cpu_ptr(ptr) per_cpu_ptr(ptr, 0) | ||
86 | #define __this_cpu_ptr(ptr) this_cpu_ptr(ptr) | ||
78 | 87 | ||
79 | #endif /* SMP */ | 88 | #endif /* SMP */ |
80 | 89 | ||
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h index e2bd73e8f9c0..f4d4120e5128 100644 --- a/include/asm-generic/pgtable.h +++ b/include/asm-generic/pgtable.h | |||
@@ -129,6 +129,10 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addres | |||
129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) | 129 | #define move_pte(pte, prot, old_addr, new_addr) (pte) |
130 | #endif | 130 | #endif |
131 | 131 | ||
132 | #ifndef flush_tlb_fix_spurious_fault | ||
133 | #define flush_tlb_fix_spurious_fault(vma, address) flush_tlb_page(vma, address) | ||
134 | #endif | ||
135 | |||
132 | #ifndef pgprot_noncached | 136 | #ifndef pgprot_noncached |
133 | #define pgprot_noncached(prot) (prot) | 137 | #define pgprot_noncached(prot) (prot) |
134 | #endif | 138 | #endif |
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h index df84e3b04555..d89dec864d42 100644 --- a/include/asm-generic/syscalls.h +++ b/include/asm-generic/syscalls.h | |||
@@ -23,8 +23,10 @@ asmlinkage long sys_vfork(struct pt_regs *regs); | |||
23 | #endif | 23 | #endif |
24 | 24 | ||
25 | #ifndef sys_execve | 25 | #ifndef sys_execve |
26 | asmlinkage long sys_execve(char __user *filename, char __user * __user *argv, | 26 | asmlinkage long sys_execve(const char __user *filename, |
27 | char __user * __user *envp, struct pt_regs *regs); | 27 | const char __user *const __user *argv, |
28 | const char __user *const __user *envp, | ||
29 | struct pt_regs *regs); | ||
28 | #endif | 30 | #endif |
29 | 31 | ||
30 | #ifndef sys_mmap2 | 32 | #ifndef sys_mmap2 |
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h index efa403b5e121..4b0b9cbbfae5 100644 --- a/include/asm-generic/system.h +++ b/include/asm-generic/system.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
22 | 22 | ||
23 | #include <asm/cmpxchg-local.h> | 23 | #include <asm/cmpxchg-local.h> |
24 | #include <asm/cmpxchg.h> | ||
24 | 25 | ||
25 | struct task_struct; | 26 | struct task_struct; |
26 | 27 | ||
@@ -136,25 +137,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size) | |||
136 | #define xchg(ptr, x) \ | 137 | #define xchg(ptr, x) \ |
137 | ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) | 138 | ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr)))) |
138 | 139 | ||
139 | static inline unsigned long __cmpxchg(volatile unsigned long *m, | ||
140 | unsigned long old, unsigned long new) | ||
141 | { | ||
142 | unsigned long retval; | ||
143 | unsigned long flags; | ||
144 | |||
145 | local_irq_save(flags); | ||
146 | retval = *m; | ||
147 | if (retval == old) | ||
148 | *m = new; | ||
149 | local_irq_restore(flags); | ||
150 | return retval; | ||
151 | } | ||
152 | |||
153 | #define cmpxchg(ptr, o, n) \ | ||
154 | ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \ | ||
155 | (unsigned long)(o), \ | ||
156 | (unsigned long)(n))) | ||
157 | |||
158 | #endif /* !__ASSEMBLY__ */ | 140 | #endif /* !__ASSEMBLY__ */ |
159 | 141 | ||
160 | #endif /* __KERNEL__ */ | 142 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 8a92a170fb7d..f4229fb315e1 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h | |||
@@ -220,6 +220,8 @@ | |||
220 | \ | 220 | \ |
221 | BUG_TABLE \ | 221 | BUG_TABLE \ |
222 | \ | 222 | \ |
223 | JUMP_TABLE \ | ||
224 | \ | ||
223 | /* PCI quirks */ \ | 225 | /* PCI quirks */ \ |
224 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ | 226 | .pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \ |
225 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ | 227 | VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \ |
@@ -563,6 +565,14 @@ | |||
563 | #define BUG_TABLE | 565 | #define BUG_TABLE |
564 | #endif | 566 | #endif |
565 | 567 | ||
568 | #define JUMP_TABLE \ | ||
569 | . = ALIGN(8); \ | ||
570 | __jump_table : AT(ADDR(__jump_table) - LOAD_OFFSET) { \ | ||
571 | VMLINUX_SYMBOL(__start___jump_table) = .; \ | ||
572 | *(__jump_table) \ | ||
573 | VMLINUX_SYMBOL(__stop___jump_table) = .; \ | ||
574 | } | ||
575 | |||
566 | #ifdef CONFIG_PM_TRACE | 576 | #ifdef CONFIG_PM_TRACE |
567 | #define TRACEDATA \ | 577 | #define TRACEDATA \ |
568 | . = ALIGN(4); \ | 578 | . = ALIGN(4); \ |
@@ -677,7 +687,9 @@ | |||
677 | - LOAD_OFFSET) { \ | 687 | - LOAD_OFFSET) { \ |
678 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 688 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
679 | *(.data..percpu..first) \ | 689 | *(.data..percpu..first) \ |
690 | . = ALIGN(PAGE_SIZE); \ | ||
680 | *(.data..percpu..page_aligned) \ | 691 | *(.data..percpu..page_aligned) \ |
692 | *(.data..percpu..readmostly) \ | ||
681 | *(.data..percpu) \ | 693 | *(.data..percpu) \ |
682 | *(.data..percpu..shared_aligned) \ | 694 | *(.data..percpu..shared_aligned) \ |
683 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 695 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
@@ -703,7 +715,9 @@ | |||
703 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ | 715 | VMLINUX_SYMBOL(__per_cpu_load) = .; \ |
704 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ | 716 | VMLINUX_SYMBOL(__per_cpu_start) = .; \ |
705 | *(.data..percpu..first) \ | 717 | *(.data..percpu..first) \ |
718 | . = ALIGN(PAGE_SIZE); \ | ||
706 | *(.data..percpu..page_aligned) \ | 719 | *(.data..percpu..page_aligned) \ |
720 | *(.data..percpu..readmostly) \ | ||
707 | *(.data..percpu) \ | 721 | *(.data..percpu) \ |
708 | *(.data..percpu..shared_aligned) \ | 722 | *(.data..percpu..shared_aligned) \ |
709 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ | 723 | VMLINUX_SYMBOL(__per_cpu_end) = .; \ |
diff --git a/include/crypto/cryptd.h b/include/crypto/cryptd.h index 1c96b255017c..ba98918bbd9b 100644 --- a/include/crypto/cryptd.h +++ b/include/crypto/cryptd.h | |||
@@ -1,5 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Software async crypto daemon | 2 | * Software async crypto daemon |
3 | * | ||
4 | * Added AEAD support to cryptd. | ||
5 | * Authors: Tadeusz Struk (tadeusz.struk@intel.com) | ||
6 | * Adrian Hoban <adrian.hoban@intel.com> | ||
7 | * Gabriele Paoloni <gabriele.paoloni@intel.com> | ||
8 | * Aidan O'Mahony (aidan.o.mahony@intel.com) | ||
9 | * Copyright (c) 2010, Intel Corporation. | ||
3 | */ | 10 | */ |
4 | 11 | ||
5 | #ifndef _CRYPTO_CRYPT_H | 12 | #ifndef _CRYPTO_CRYPT_H |
@@ -42,4 +49,21 @@ struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm); | |||
42 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req); | 49 | struct shash_desc *cryptd_shash_desc(struct ahash_request *req); |
43 | void cryptd_free_ahash(struct cryptd_ahash *tfm); | 50 | void cryptd_free_ahash(struct cryptd_ahash *tfm); |
44 | 51 | ||
52 | struct cryptd_aead { | ||
53 | struct crypto_aead base; | ||
54 | }; | ||
55 | |||
56 | static inline struct cryptd_aead *__cryptd_aead_cast( | ||
57 | struct crypto_aead *tfm) | ||
58 | { | ||
59 | return (struct cryptd_aead *)tfm; | ||
60 | } | ||
61 | |||
62 | struct cryptd_aead *cryptd_alloc_aead(const char *alg_name, | ||
63 | u32 type, u32 mask); | ||
64 | |||
65 | struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm); | ||
66 | |||
67 | void cryptd_free_aead(struct cryptd_aead *tfm); | ||
68 | |||
45 | #endif | 69 | #endif |
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h index 4086b8ebfafe..da2530e34b26 100644 --- a/include/crypto/gf128mul.h +++ b/include/crypto/gf128mul.h | |||
@@ -54,8 +54,8 @@ | |||
54 | 54 | ||
55 | /* Comment by Rik: | 55 | /* Comment by Rik: |
56 | * | 56 | * |
57 | * For some background on GF(2^128) see for example: http://- | 57 | * For some background on GF(2^128) see for example: |
58 | * csrc.nist.gov/CryptoToolkit/modes/proposedmodes/gcm/gcm-revised-spec.pdf | 58 | * http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf |
59 | * | 59 | * |
60 | * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can | 60 | * The elements of GF(2^128) := GF(2)[X]/(X^128-X^7-X^2-X^1-1) can |
61 | * be mapped to computer memory in a variety of ways. Let's examine | 61 | * be mapped to computer memory in a variety of ways. Let's examine |
diff --git a/include/drm/drmP.h b/include/drm/drmP.h index 2a512bc0d4ab..4c9461a4f9e6 100644 --- a/include/drm/drmP.h +++ b/include/drm/drmP.h | |||
@@ -305,14 +305,16 @@ struct drm_ioctl_desc { | |||
305 | unsigned int cmd; | 305 | unsigned int cmd; |
306 | int flags; | 306 | int flags; |
307 | drm_ioctl_t *func; | 307 | drm_ioctl_t *func; |
308 | unsigned int cmd_drv; | ||
308 | }; | 309 | }; |
309 | 310 | ||
310 | /** | 311 | /** |
311 | * Creates a driver or general drm_ioctl_desc array entry for the given | 312 | * Creates a driver or general drm_ioctl_desc array entry for the given |
312 | * ioctl, for use by drm_ioctl(). | 313 | * ioctl, for use by drm_ioctl(). |
313 | */ | 314 | */ |
314 | #define DRM_IOCTL_DEF(ioctl, _func, _flags) \ | 315 | |
315 | [DRM_IOCTL_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags} | 316 | #define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \ |
317 | [DRM_IOCTL_NR(DRM_##ioctl)] = {.cmd = DRM_##ioctl, .func = _func, .flags = _flags, .cmd_drv = DRM_IOCTL_##ioctl} | ||
316 | 318 | ||
317 | struct drm_magic_entry { | 319 | struct drm_magic_entry { |
318 | struct list_head head; | 320 | struct list_head head; |
@@ -610,7 +612,7 @@ struct drm_gem_object { | |||
610 | struct kref refcount; | 612 | struct kref refcount; |
611 | 613 | ||
612 | /** Handle count of this object. Each handle also holds a reference */ | 614 | /** Handle count of this object. Each handle also holds a reference */ |
613 | struct kref handlecount; | 615 | atomic_t handle_count; /* number of handles on this object */ |
614 | 616 | ||
615 | /** Related drm device */ | 617 | /** Related drm device */ |
616 | struct drm_device *dev; | 618 | struct drm_device *dev; |
@@ -806,7 +808,6 @@ struct drm_driver { | |||
806 | */ | 808 | */ |
807 | int (*gem_init_object) (struct drm_gem_object *obj); | 809 | int (*gem_init_object) (struct drm_gem_object *obj); |
808 | void (*gem_free_object) (struct drm_gem_object *obj); | 810 | void (*gem_free_object) (struct drm_gem_object *obj); |
809 | void (*gem_free_object_unlocked) (struct drm_gem_object *obj); | ||
810 | 811 | ||
811 | /* vga arb irq handler */ | 812 | /* vga arb irq handler */ |
812 | void (*vgaarb_irq)(struct drm_device *dev, bool state); | 813 | void (*vgaarb_irq)(struct drm_device *dev, bool state); |
@@ -1173,6 +1174,7 @@ extern int drm_release(struct inode *inode, struct file *filp); | |||
1173 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); | 1174 | extern int drm_mmap(struct file *filp, struct vm_area_struct *vma); |
1174 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); | 1175 | extern int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma); |
1175 | extern void drm_vm_open_locked(struct vm_area_struct *vma); | 1176 | extern void drm_vm_open_locked(struct vm_area_struct *vma); |
1177 | extern void drm_vm_close_locked(struct vm_area_struct *vma); | ||
1176 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); | 1178 | extern resource_size_t drm_core_get_map_ofs(struct drm_local_map * map); |
1177 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); | 1179 | extern resource_size_t drm_core_get_reg_ofs(struct drm_device *dev); |
1178 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); | 1180 | extern unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait); |
@@ -1453,12 +1455,11 @@ int drm_gem_init(struct drm_device *dev); | |||
1453 | void drm_gem_destroy(struct drm_device *dev); | 1455 | void drm_gem_destroy(struct drm_device *dev); |
1454 | void drm_gem_object_release(struct drm_gem_object *obj); | 1456 | void drm_gem_object_release(struct drm_gem_object *obj); |
1455 | void drm_gem_object_free(struct kref *kref); | 1457 | void drm_gem_object_free(struct kref *kref); |
1456 | void drm_gem_object_free_unlocked(struct kref *kref); | ||
1457 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | 1458 | struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, |
1458 | size_t size); | 1459 | size_t size); |
1459 | int drm_gem_object_init(struct drm_device *dev, | 1460 | int drm_gem_object_init(struct drm_device *dev, |
1460 | struct drm_gem_object *obj, size_t size); | 1461 | struct drm_gem_object *obj, size_t size); |
1461 | void drm_gem_object_handle_free(struct kref *kref); | 1462 | void drm_gem_object_handle_free(struct drm_gem_object *obj); |
1462 | void drm_gem_vm_open(struct vm_area_struct *vma); | 1463 | void drm_gem_vm_open(struct vm_area_struct *vma); |
1463 | void drm_gem_vm_close(struct vm_area_struct *vma); | 1464 | void drm_gem_vm_close(struct vm_area_struct *vma); |
1464 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); | 1465 | int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma); |
@@ -1481,8 +1482,12 @@ drm_gem_object_unreference(struct drm_gem_object *obj) | |||
1481 | static inline void | 1482 | static inline void |
1482 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) | 1483 | drm_gem_object_unreference_unlocked(struct drm_gem_object *obj) |
1483 | { | 1484 | { |
1484 | if (obj != NULL) | 1485 | if (obj != NULL) { |
1485 | kref_put(&obj->refcount, drm_gem_object_free_unlocked); | 1486 | struct drm_device *dev = obj->dev; |
1487 | mutex_lock(&dev->struct_mutex); | ||
1488 | kref_put(&obj->refcount, drm_gem_object_free); | ||
1489 | mutex_unlock(&dev->struct_mutex); | ||
1490 | } | ||
1486 | } | 1491 | } |
1487 | 1492 | ||
1488 | int drm_gem_handle_create(struct drm_file *file_priv, | 1493 | int drm_gem_handle_create(struct drm_file *file_priv, |
@@ -1493,7 +1498,7 @@ static inline void | |||
1493 | drm_gem_object_handle_reference(struct drm_gem_object *obj) | 1498 | drm_gem_object_handle_reference(struct drm_gem_object *obj) |
1494 | { | 1499 | { |
1495 | drm_gem_object_reference(obj); | 1500 | drm_gem_object_reference(obj); |
1496 | kref_get(&obj->handlecount); | 1501 | atomic_inc(&obj->handle_count); |
1497 | } | 1502 | } |
1498 | 1503 | ||
1499 | static inline void | 1504 | static inline void |
@@ -1502,12 +1507,15 @@ drm_gem_object_handle_unreference(struct drm_gem_object *obj) | |||
1502 | if (obj == NULL) | 1507 | if (obj == NULL) |
1503 | return; | 1508 | return; |
1504 | 1509 | ||
1510 | if (atomic_read(&obj->handle_count) == 0) | ||
1511 | return; | ||
1505 | /* | 1512 | /* |
1506 | * Must bump handle count first as this may be the last | 1513 | * Must bump handle count first as this may be the last |
1507 | * ref, in which case the object would disappear before we | 1514 | * ref, in which case the object would disappear before we |
1508 | * checked for a name | 1515 | * checked for a name |
1509 | */ | 1516 | */ |
1510 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1517 | if (atomic_dec_and_test(&obj->handle_count)) |
1518 | drm_gem_object_handle_free(obj); | ||
1511 | drm_gem_object_unreference(obj); | 1519 | drm_gem_object_unreference(obj); |
1512 | } | 1520 | } |
1513 | 1521 | ||
@@ -1517,12 +1525,17 @@ drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) | |||
1517 | if (obj == NULL) | 1525 | if (obj == NULL) |
1518 | return; | 1526 | return; |
1519 | 1527 | ||
1528 | if (atomic_read(&obj->handle_count) == 0) | ||
1529 | return; | ||
1530 | |||
1520 | /* | 1531 | /* |
1521 | * Must bump handle count first as this may be the last | 1532 | * Must bump handle count first as this may be the last |
1522 | * ref, in which case the object would disappear before we | 1533 | * ref, in which case the object would disappear before we |
1523 | * checked for a name | 1534 | * checked for a name |
1524 | */ | 1535 | */ |
1525 | kref_put(&obj->handlecount, drm_gem_object_handle_free); | 1536 | |
1537 | if (atomic_dec_and_test(&obj->handle_count)) | ||
1538 | drm_gem_object_handle_free(obj); | ||
1526 | drm_gem_object_unreference_unlocked(obj); | 1539 | drm_gem_object_unreference_unlocked(obj); |
1527 | } | 1540 | } |
1528 | 1541 | ||
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index c9f3cc5949a8..3e5a51af757c 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h | |||
@@ -386,7 +386,15 @@ struct drm_connector_funcs { | |||
386 | void (*dpms)(struct drm_connector *connector, int mode); | 386 | void (*dpms)(struct drm_connector *connector, int mode); |
387 | void (*save)(struct drm_connector *connector); | 387 | void (*save)(struct drm_connector *connector); |
388 | void (*restore)(struct drm_connector *connector); | 388 | void (*restore)(struct drm_connector *connector); |
389 | enum drm_connector_status (*detect)(struct drm_connector *connector); | 389 | |
390 | /* Check to see if anything is attached to the connector. | ||
391 | * @force is set to false whilst polling, true when checking the | ||
392 | * connector due to user request. @force can be used by the driver | ||
393 | * to avoid expensive, destructive operations during automated | ||
394 | * probing. | ||
395 | */ | ||
396 | enum drm_connector_status (*detect)(struct drm_connector *connector, | ||
397 | bool force); | ||
390 | int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); | 398 | int (*fill_modes)(struct drm_connector *connector, uint32_t max_width, uint32_t max_height); |
391 | int (*set_property)(struct drm_connector *connector, struct drm_property *property, | 399 | int (*set_property)(struct drm_connector *connector, struct drm_property *property, |
392 | uint64_t val); | 400 | uint64_t val); |
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h index 3a9940ef728b..883c1d439899 100644 --- a/include/drm/drm_pciids.h +++ b/include/drm/drm_pciids.h | |||
@@ -85,7 +85,6 @@ | |||
85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 85 | {0x1002, 0x5460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 86 | {0x1002, 0x5462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ | 87 | {0x1002, 0x5464, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ |
88 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_NEW_MEMMAP}, \ | ||
89 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 88 | {0x1002, 0x5548, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
90 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 89 | {0x1002, 0x5549, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
91 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ | 90 | {0x1002, 0x554A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R423|RADEON_NEW_MEMMAP}, \ |
@@ -103,6 +102,7 @@ | |||
103 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 102 | {0x1002, 0x564F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
104 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 103 | {0x1002, 0x5652, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ | 104 | {0x1002, 0x5653, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ |
105 | {0x1002, 0x5657, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV410|RADEON_NEW_MEMMAP}, \ | ||
106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ | 106 | {0x1002, 0x5834, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP}, \ |
107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ | 107 | {0x1002, 0x5835, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS300|RADEON_IS_IGP|RADEON_IS_MOBILITY}, \ |
108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ | 108 | {0x1002, 0x5954, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS480|RADEON_IS_IGP|RADEON_IS_MOBILITY|RADEON_IS_IGPGART}, \ |
diff --git a/include/drm/i830_drm.h b/include/drm/i830_drm.h index 4b00d2dd4f68..61315c29b8f3 100644 --- a/include/drm/i830_drm.h +++ b/include/drm/i830_drm.h | |||
@@ -264,20 +264,20 @@ typedef struct _drm_i830_sarea { | |||
264 | #define DRM_I830_GETPARAM 0x0c | 264 | #define DRM_I830_GETPARAM 0x0c |
265 | #define DRM_I830_SETPARAM 0x0d | 265 | #define DRM_I830_SETPARAM 0x0d |
266 | 266 | ||
267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_INIT, drm_i830_init_t) | 267 | #define DRM_IOCTL_I830_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_INIT, drm_i830_init_t) |
268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_VERTEX, drm_i830_vertex_t) | 268 | #define DRM_IOCTL_I830_VERTEX DRM_IOW( DRM_COMMAND_BASE + DRM_I830_VERTEX, drm_i830_vertex_t) |
269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_CLEAR, drm_i830_clear_t) | 269 | #define DRM_IOCTL_I830_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_I830_CLEAR, drm_i830_clear_t) |
270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLUSH) | 270 | #define DRM_IOCTL_I830_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLUSH) |
271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_GETAGE) | 271 | #define DRM_IOCTL_I830_GETAGE DRM_IO ( DRM_COMMAND_BASE + DRM_I830_GETAGE) |
272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETBUF, drm_i830_dma_t) | 272 | #define DRM_IOCTL_I830_GETBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETBUF, drm_i830_dma_t) |
273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_SWAP) | 273 | #define DRM_IOCTL_I830_SWAP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_SWAP) |
274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_COPY, drm_i830_copy_t) | 274 | #define DRM_IOCTL_I830_COPY DRM_IOW( DRM_COMMAND_BASE + DRM_I830_COPY, drm_i830_copy_t) |
275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_DOCOPY) | 275 | #define DRM_IOCTL_I830_DOCOPY DRM_IO ( DRM_COMMAND_BASE + DRM_I830_DOCOPY) |
276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_IOCTL_I830_FLIP) | 276 | #define DRM_IOCTL_I830_FLIP DRM_IO ( DRM_COMMAND_BASE + DRM_I830_FLIP) |
277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_EMIT, drm_i830_irq_emit_t) | 277 | #define DRM_IOCTL_I830_IRQ_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_IRQ_EMIT, drm_i830_irq_emit_t) |
278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_IOCTL_I830_IRQ_WAIT, drm_i830_irq_wait_t) | 278 | #define DRM_IOCTL_I830_IRQ_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_I830_IRQ_WAIT, drm_i830_irq_wait_t) |
279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_GETPARAM, drm_i830_getparam_t) | 279 | #define DRM_IOCTL_I830_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_GETPARAM, drm_i830_getparam_t) |
280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_IOCTL_I830_SETPARAM, drm_i830_setparam_t) | 280 | #define DRM_IOCTL_I830_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_I830_SETPARAM, drm_i830_setparam_t) |
281 | 281 | ||
282 | typedef struct _drm_i830_clear { | 282 | typedef struct _drm_i830_clear { |
283 | int clear_color; | 283 | int clear_color; |
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h index 8f8b072c4c7b..e41c74facb6a 100644 --- a/include/drm/i915_drm.h +++ b/include/drm/i915_drm.h | |||
@@ -215,6 +215,7 @@ typedef struct _drm_i915_sarea { | |||
215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 215 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | 216 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) |
217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | 217 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) |
218 | #define DRM_IOCTL_I915_HWS_ADDR DRM_IOW(DRM_COMMAND_BASE + DRM_I915_HWS_ADDR, struct drm_i915_gem_init) | ||
218 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) | 219 | #define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init) |
219 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) | 220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer) |
220 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) | 221 | #define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2) |
diff --git a/include/drm/mga_drm.h b/include/drm/mga_drm.h index 3ffbc4798afa..c16097f99be0 100644 --- a/include/drm/mga_drm.h +++ b/include/drm/mga_drm.h | |||
@@ -248,7 +248,7 @@ typedef struct _drm_mga_sarea { | |||
248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c | 248 | #define DRM_MGA_DMA_BOOTSTRAP 0x0c |
249 | 249 | ||
250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) | 250 | #define DRM_IOCTL_MGA_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_INIT, drm_mga_init_t) |
251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, drm_lock_t) | 251 | #define DRM_IOCTL_MGA_FLUSH DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_FLUSH, struct drm_lock) |
252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) | 252 | #define DRM_IOCTL_MGA_RESET DRM_IO( DRM_COMMAND_BASE + DRM_MGA_RESET) |
253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) | 253 | #define DRM_IOCTL_MGA_SWAP DRM_IO( DRM_COMMAND_BASE + DRM_MGA_SWAP) |
254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) | 254 | #define DRM_IOCTL_MGA_CLEAR DRM_IOW( DRM_COMMAND_BASE + DRM_MGA_CLEAR, drm_mga_clear_t) |
diff --git a/include/drm/nouveau_drm.h b/include/drm/nouveau_drm.h index fe917dee723a..01a714119506 100644 --- a/include/drm/nouveau_drm.h +++ b/include/drm/nouveau_drm.h | |||
@@ -197,4 +197,17 @@ struct drm_nouveau_sarea { | |||
197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 | 197 | #define DRM_NOUVEAU_GEM_CPU_FINI 0x43 |
198 | #define DRM_NOUVEAU_GEM_INFO 0x44 | 198 | #define DRM_NOUVEAU_GEM_INFO 0x44 |
199 | 199 | ||
200 | #define DRM_IOCTL_NOUVEAU_GETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GETPARAM, struct drm_nouveau_getparam) | ||
201 | #define DRM_IOCTL_NOUVEAU_SETPARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_SETPARAM, struct drm_nouveau_setparam) | ||
202 | #define DRM_IOCTL_NOUVEAU_CHANNEL_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_ALLOC, struct drm_nouveau_channel_alloc) | ||
203 | #define DRM_IOCTL_NOUVEAU_CHANNEL_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_CHANNEL_FREE, struct drm_nouveau_channel_free) | ||
204 | #define DRM_IOCTL_NOUVEAU_GROBJ_ALLOC DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GROBJ_ALLOC, struct drm_nouveau_grobj_alloc) | ||
205 | #define DRM_IOCTL_NOUVEAU_NOTIFIEROBJ_ALLOC DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_NOTIFIEROBJ_ALLOC, struct drm_nouveau_notifierobj_alloc) | ||
206 | #define DRM_IOCTL_NOUVEAU_GPUOBJ_FREE DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GPUOBJ_FREE, struct drm_nouveau_gpuobj_free) | ||
207 | #define DRM_IOCTL_NOUVEAU_GEM_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_NEW, struct drm_nouveau_gem_new) | ||
208 | #define DRM_IOCTL_NOUVEAU_GEM_PUSHBUF DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_PUSHBUF, struct drm_nouveau_gem_pushbuf) | ||
209 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_PREP DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_PREP, struct drm_nouveau_gem_cpu_prep) | ||
210 | #define DRM_IOCTL_NOUVEAU_GEM_CPU_FINI DRM_IOW (DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_CPU_FINI, struct drm_nouveau_gem_cpu_fini) | ||
211 | #define DRM_IOCTL_NOUVEAU_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_NOUVEAU_GEM_INFO, struct drm_nouveau_gem_info) | ||
212 | |||
200 | #endif /* __NOUVEAU_DRM_H__ */ | 213 | #endif /* __NOUVEAU_DRM_H__ */ |
diff --git a/include/drm/radeon_drm.h b/include/drm/radeon_drm.h index 0acaf8f91437..10f8b53bdd40 100644 --- a/include/drm/radeon_drm.h +++ b/include/drm/radeon_drm.h | |||
@@ -547,8 +547,8 @@ typedef struct { | |||
547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) | 547 | #define DRM_IOCTL_RADEON_GEM_WAIT_IDLE DRM_IOW(DRM_COMMAND_BASE + DRM_RADEON_GEM_WAIT_IDLE, struct drm_radeon_gem_wait_idle) |
548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) | 548 | #define DRM_IOCTL_RADEON_CS DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_CS, struct drm_radeon_cs) |
549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) | 549 | #define DRM_IOCTL_RADEON_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_INFO, struct drm_radeon_info) |
550 | #define DRM_IOCTL_RADEON_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) | 550 | #define DRM_IOCTL_RADEON_GEM_SET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_SET_TILING, struct drm_radeon_gem_set_tiling) |
551 | #define DRM_IOCTL_RADEON_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) | 551 | #define DRM_IOCTL_RADEON_GEM_GET_TILING DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_GET_TILING, struct drm_radeon_gem_get_tiling) |
552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) | 552 | #define DRM_IOCTL_RADEON_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_RADEON_GEM_BUSY, struct drm_radeon_gem_busy) |
553 | 553 | ||
554 | typedef struct drm_radeon_init { | 554 | typedef struct drm_radeon_init { |
diff --git a/include/drm/savage_drm.h b/include/drm/savage_drm.h index 8a576ef01821..4863cf6bf96f 100644 --- a/include/drm/savage_drm.h +++ b/include/drm/savage_drm.h | |||
@@ -63,10 +63,10 @@ typedef struct _drm_savage_sarea { | |||
63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 | 63 | #define DRM_SAVAGE_BCI_EVENT_EMIT 0x02 |
64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 | 64 | #define DRM_SAVAGE_BCI_EVENT_WAIT 0x03 |
65 | 65 | ||
66 | #define DRM_IOCTL_SAVAGE_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) | 66 | #define DRM_IOCTL_SAVAGE_BCI_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_INIT, drm_savage_init_t) |
67 | #define DRM_IOCTL_SAVAGE_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) | 67 | #define DRM_IOCTL_SAVAGE_BCI_CMDBUF DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_CMDBUF, drm_savage_cmdbuf_t) |
68 | #define DRM_IOCTL_SAVAGE_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) | 68 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_EMIT DRM_IOWR(DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_EMIT, drm_savage_event_emit_t) |
69 | #define DRM_IOCTL_SAVAGE_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) | 69 | #define DRM_IOCTL_SAVAGE_BCI_EVENT_WAIT DRM_IOW( DRM_COMMAND_BASE + DRM_SAVAGE_BCI_EVENT_WAIT, drm_savage_event_wait_t) |
70 | 70 | ||
71 | #define SAVAGE_DMA_PCI 1 | 71 | #define SAVAGE_DMA_PCI 1 |
72 | #define SAVAGE_DMA_AGP 3 | 72 | #define SAVAGE_DMA_AGP 3 |
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 267a86c74e2e..2040e6c4f172 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h | |||
@@ -246,9 +246,11 @@ struct ttm_buffer_object { | |||
246 | 246 | ||
247 | atomic_t reserved; | 247 | atomic_t reserved; |
248 | 248 | ||
249 | |||
250 | /** | 249 | /** |
251 | * Members protected by the bo::lock | 250 | * Members protected by the bo::lock |
251 | * In addition, setting sync_obj to anything else | ||
252 | * than NULL requires bo::reserved to be held. This allows for | ||
253 | * checking NULL while reserved but not holding bo::lock. | ||
252 | */ | 254 | */ |
253 | 255 | ||
254 | void *sync_obj_arg; | 256 | void *sync_obj_arg; |
diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 626b629429ff..831c4634162c 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild | |||
@@ -118,7 +118,6 @@ header-y += eventpoll.h | |||
118 | header-y += ext2_fs.h | 118 | header-y += ext2_fs.h |
119 | header-y += fadvise.h | 119 | header-y += fadvise.h |
120 | header-y += falloc.h | 120 | header-y += falloc.h |
121 | header-y += fanotify.h | ||
122 | header-y += fb.h | 121 | header-y += fb.h |
123 | header-y += fcntl.h | 122 | header-y += fcntl.h |
124 | header-y += fd.h | 123 | header-y += fd.h |
@@ -302,6 +301,7 @@ header-y += quota.h | |||
302 | header-y += radeonfb.h | 301 | header-y += radeonfb.h |
303 | header-y += random.h | 302 | header-y += random.h |
304 | header-y += raw.h | 303 | header-y += raw.h |
304 | header-y += rds.h | ||
305 | header-y += reboot.h | 305 | header-y += reboot.h |
306 | header-y += reiserfs_fs.h | 306 | header-y += reiserfs_fs.h |
307 | header-y += reiserfs_xattr.h | 307 | header-y += reiserfs_xattr.h |
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index ccf94dc5acdf..c227757feb06 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -304,8 +304,8 @@ acpi_status acpi_run_osc(acpi_handle handle, struct acpi_osc_context *context); | |||
304 | OSC_PCI_EXPRESS_PME_CONTROL | \ | 304 | OSC_PCI_EXPRESS_PME_CONTROL | \ |
305 | OSC_PCI_EXPRESS_AER_CONTROL | \ | 305 | OSC_PCI_EXPRESS_AER_CONTROL | \ |
306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) | 306 | OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL) |
307 | 307 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, | |
308 | extern acpi_status acpi_pci_osc_control_set(acpi_handle handle, u32 flags); | 308 | u32 *mask, u32 req); |
309 | extern void acpi_early_init(void); | 309 | extern void acpi_early_init(void); |
310 | 310 | ||
311 | #else /* !CONFIG_ACPI */ | 311 | #else /* !CONFIG_ACPI */ |
diff --git a/include/linux/acpi_pmtmr.h b/include/linux/acpi_pmtmr.h index 7e3d2859be50..1d0ef1ae8036 100644 --- a/include/linux/acpi_pmtmr.h +++ b/include/linux/acpi_pmtmr.h | |||
@@ -25,8 +25,6 @@ static inline u32 acpi_pm_read_early(void) | |||
25 | return acpi_pm_read_verified() & ACPI_PM_MASK; | 25 | return acpi_pm_read_verified() & ACPI_PM_MASK; |
26 | } | 26 | } |
27 | 27 | ||
28 | extern void pmtimer_wait(unsigned); | ||
29 | |||
30 | #else | 28 | #else |
31 | 29 | ||
32 | static inline u32 acpi_pm_read_early(void) | 30 | static inline u32 acpi_pm_read_early(void) |
diff --git a/include/linux/altera_uart.h b/include/linux/altera_uart.h index 8d441064a30d..a10a90791976 100644 --- a/include/linux/altera_uart.h +++ b/include/linux/altera_uart.h | |||
@@ -5,10 +5,15 @@ | |||
5 | #ifndef __ALTUART_H | 5 | #ifndef __ALTUART_H |
6 | #define __ALTUART_H | 6 | #define __ALTUART_H |
7 | 7 | ||
8 | #include <linux/init.h> | ||
9 | |||
8 | struct altera_uart_platform_uart { | 10 | struct altera_uart_platform_uart { |
9 | unsigned long mapbase; /* Physical address base */ | 11 | unsigned long mapbase; /* Physical address base */ |
10 | unsigned int irq; /* Interrupt vector */ | 12 | unsigned int irq; /* Interrupt vector */ |
11 | unsigned int uartclk; /* UART clock rate */ | 13 | unsigned int uartclk; /* UART clock rate */ |
14 | unsigned int bus_shift; /* Bus shift (address stride) */ | ||
12 | }; | 15 | }; |
13 | 16 | ||
17 | int __init early_altera_uart_setup(struct altera_uart_platform_uart *platp); | ||
18 | |||
14 | #endif /* __ALTUART_H */ | 19 | #endif /* __ALTUART_H */ |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index b0c174012436..c6454cca0447 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/resource.h> | 20 | #include <linux/resource.h> |
21 | 21 | ||
22 | #define AMBA_NR_IRQS 2 | 22 | #define AMBA_NR_IRQS 2 |
23 | #define AMBA_CID 0xb105f00d | ||
23 | 24 | ||
24 | struct clk; | 25 | struct clk; |
25 | 26 | ||
@@ -70,9 +71,15 @@ void amba_release_regions(struct amba_device *); | |||
70 | #define amba_pclk_disable(d) \ | 71 | #define amba_pclk_disable(d) \ |
71 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) | 72 | do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) |
72 | 73 | ||
73 | #define amba_config(d) (((d)->periphid >> 24) & 0xff) | 74 | /* Some drivers don't use the struct amba_device */ |
74 | #define amba_rev(d) (((d)->periphid >> 20) & 0x0f) | 75 | #define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) |
75 | #define amba_manf(d) (((d)->periphid >> 12) & 0xff) | 76 | #define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) |
76 | #define amba_part(d) ((d)->periphid & 0xfff) | 77 | #define AMBA_MANF_BITS(a) (((a) >> 12) & 0xff) |
78 | #define AMBA_PART_BITS(a) ((a) & 0xfff) | ||
79 | |||
80 | #define amba_config(d) AMBA_CONFIG_BITS((d)->periphid) | ||
81 | #define amba_rev(d) AMBA_REV_BITS((d)->periphid) | ||
82 | #define amba_manf(d) AMBA_MANF_BITS((d)->periphid) | ||
83 | #define amba_part(d) AMBA_PART_BITS((d)->periphid) | ||
77 | 84 | ||
78 | #endif | 85 | #endif |
diff --git a/include/linux/amba/clcd.h b/include/linux/amba/clcd.h index ca16c3801a1e..be33b3affc8a 100644 --- a/include/linux/amba/clcd.h +++ b/include/linux/amba/clcd.h | |||
@@ -150,6 +150,7 @@ struct clcd_fb { | |||
150 | u16 off_cntl; | 150 | u16 off_cntl; |
151 | u32 clcd_cntl; | 151 | u32 clcd_cntl; |
152 | u32 cmap[16]; | 152 | u32 cmap[16]; |
153 | bool clk_enabled; | ||
153 | }; | 154 | }; |
154 | 155 | ||
155 | static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) | 156 | static inline void clcdfb_decode(struct clcd_fb *fb, struct clcd_regs *regs) |
diff --git a/include/linux/amba/mmci.h b/include/linux/amba/mmci.h index ca84ce70d5d5..f4ee9acc9721 100644 --- a/include/linux/amba/mmci.h +++ b/include/linux/amba/mmci.h | |||
@@ -24,6 +24,7 @@ | |||
24 | * whether a card is present in the MMC slot or not | 24 | * whether a card is present in the MMC slot or not |
25 | * @gpio_wp: read this GPIO pin to see if the card is write protected | 25 | * @gpio_wp: read this GPIO pin to see if the card is write protected |
26 | * @gpio_cd: read this GPIO pin to detect card insertion | 26 | * @gpio_cd: read this GPIO pin to detect card insertion |
27 | * @cd_invert: true if the gpio_cd pin value is active low | ||
27 | * @capabilities: the capabilities of the block as implemented in | 28 | * @capabilities: the capabilities of the block as implemented in |
28 | * this platform, signify anything MMC_CAP_* from mmc/host.h | 29 | * this platform, signify anything MMC_CAP_* from mmc/host.h |
29 | */ | 30 | */ |
@@ -35,6 +36,7 @@ struct mmci_platform_data { | |||
35 | unsigned int (*status)(struct device *); | 36 | unsigned int (*status)(struct device *); |
36 | int gpio_wp; | 37 | int gpio_wp; |
37 | int gpio_cd; | 38 | int gpio_cd; |
39 | bool cd_invert; | ||
38 | unsigned long capabilities; | 40 | unsigned long capabilities; |
39 | }; | 41 | }; |
40 | 42 | ||
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h index abf26cc47a2b..4ce98f54186b 100644 --- a/include/linux/amba/pl022.h +++ b/include/linux/amba/pl022.h | |||
@@ -228,6 +228,7 @@ enum ssp_chip_select { | |||
228 | }; | 228 | }; |
229 | 229 | ||
230 | 230 | ||
231 | struct dma_chan; | ||
231 | /** | 232 | /** |
232 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. | 233 | * struct pl022_ssp_master - device.platform_data for SPI controller devices. |
233 | * @num_chipselect: chipselects are used to distinguish individual | 234 | * @num_chipselect: chipselects are used to distinguish individual |
@@ -235,11 +236,16 @@ enum ssp_chip_select { | |||
235 | * each slave has a chipselect signal, but it's common that not | 236 | * each slave has a chipselect signal, but it's common that not |
236 | * every chipselect is connected to a slave. | 237 | * every chipselect is connected to a slave. |
237 | * @enable_dma: if true enables DMA driven transfers. | 238 | * @enable_dma: if true enables DMA driven transfers. |
239 | * @dma_rx_param: parameter to locate an RX DMA channel. | ||
240 | * @dma_tx_param: parameter to locate a TX DMA channel. | ||
238 | */ | 241 | */ |
239 | struct pl022_ssp_controller { | 242 | struct pl022_ssp_controller { |
240 | u16 bus_id; | 243 | u16 bus_id; |
241 | u8 num_chipselect; | 244 | u8 num_chipselect; |
242 | u8 enable_dma:1; | 245 | u8 enable_dma:1; |
246 | bool (*dma_filter)(struct dma_chan *chan, void *filter_param); | ||
247 | void *dma_rx_param; | ||
248 | void *dma_tx_param; | ||
243 | }; | 249 | }; |
244 | 250 | ||
245 | /** | 251 | /** |
@@ -270,20 +276,13 @@ struct pl022_ssp_controller { | |||
270 | * @dma_config: DMA configuration for SSP controller and peripheral | 276 | * @dma_config: DMA configuration for SSP controller and peripheral |
271 | */ | 277 | */ |
272 | struct pl022_config_chip { | 278 | struct pl022_config_chip { |
273 | struct device *dev; | ||
274 | enum ssp_loopback lbm; | ||
275 | enum ssp_interface iface; | 279 | enum ssp_interface iface; |
276 | enum ssp_hierarchy hierarchy; | 280 | enum ssp_hierarchy hierarchy; |
277 | bool slave_tx_disable; | 281 | bool slave_tx_disable; |
278 | struct ssp_clock_params clk_freq; | 282 | struct ssp_clock_params clk_freq; |
279 | enum ssp_rx_endian endian_rx; | ||
280 | enum ssp_tx_endian endian_tx; | ||
281 | enum ssp_data_size data_size; | ||
282 | enum ssp_mode com_mode; | 283 | enum ssp_mode com_mode; |
283 | enum ssp_rx_level_trig rx_lev_trig; | 284 | enum ssp_rx_level_trig rx_lev_trig; |
284 | enum ssp_tx_level_trig tx_lev_trig; | 285 | enum ssp_tx_level_trig tx_lev_trig; |
285 | enum ssp_spi_clk_phase clk_phase; | ||
286 | enum ssp_spi_clk_pol clk_pol; | ||
287 | enum ssp_microwire_ctrl_len ctrl_len; | 286 | enum ssp_microwire_ctrl_len ctrl_len; |
288 | enum ssp_microwire_wait_state wait_state; | 287 | enum ssp_microwire_wait_state wait_state; |
289 | enum ssp_duplex duplex; | 288 | enum ssp_duplex duplex; |
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h index e1b634b635f2..6021588ba0a8 100644 --- a/include/linux/amba/serial.h +++ b/include/linux/amba/serial.h | |||
@@ -32,7 +32,9 @@ | |||
32 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ | 32 | #define UART01x_RSR 0x04 /* Receive status register (Read). */ |
33 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ | 33 | #define UART01x_ECR 0x04 /* Error clear register (Write). */ |
34 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ | 34 | #define UART010_LCRH 0x08 /* Line control register, high byte. */ |
35 | #define ST_UART011_DMAWM 0x08 /* DMA watermark configure register. */ | ||
35 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ | 36 | #define UART010_LCRM 0x0C /* Line control register, middle byte. */ |
37 | #define ST_UART011_TIMEOUT 0x0C /* Timeout period register. */ | ||
36 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ | 38 | #define UART010_LCRL 0x10 /* Line control register, low byte. */ |
37 | #define UART010_CR 0x14 /* Control register. */ | 39 | #define UART010_CR 0x14 /* Control register. */ |
38 | #define UART01x_FR 0x18 /* Flag register (Read only). */ | 40 | #define UART01x_FR 0x18 /* Flag register (Read only). */ |
@@ -51,6 +53,15 @@ | |||
51 | #define UART011_MIS 0x40 /* Masked interrupt status. */ | 53 | #define UART011_MIS 0x40 /* Masked interrupt status. */ |
52 | #define UART011_ICR 0x44 /* Interrupt clear register. */ | 54 | #define UART011_ICR 0x44 /* Interrupt clear register. */ |
53 | #define UART011_DMACR 0x48 /* DMA control register. */ | 55 | #define UART011_DMACR 0x48 /* DMA control register. */ |
56 | #define ST_UART011_XFCR 0x50 /* XON/XOFF control register. */ | ||
57 | #define ST_UART011_XON1 0x54 /* XON1 register. */ | ||
58 | #define ST_UART011_XON2 0x58 /* XON2 register. */ | ||
59 | #define ST_UART011_XOFF1 0x5C /* XON1 register. */ | ||
60 | #define ST_UART011_XOFF2 0x60 /* XON2 register. */ | ||
61 | #define ST_UART011_ITCR 0x80 /* Integration test control register. */ | ||
62 | #define ST_UART011_ITIP 0x84 /* Integration test input register. */ | ||
63 | #define ST_UART011_ABCR 0x100 /* Autobaud control register. */ | ||
64 | #define ST_UART011_ABIMSC 0x15C /* Autobaud interrupt mask/clear register. */ | ||
54 | 65 | ||
55 | #define UART011_DR_OE (1 << 11) | 66 | #define UART011_DR_OE (1 << 11) |
56 | #define UART011_DR_BE (1 << 10) | 67 | #define UART011_DR_BE (1 << 10) |
diff --git a/include/linux/ata.h b/include/linux/ata.h index fe6e681a9d74..0c4929fa34d3 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -89,6 +89,7 @@ enum { | |||
89 | ATA_ID_SPG = 98, | 89 | ATA_ID_SPG = 98, |
90 | ATA_ID_LBA_CAPACITY_2 = 100, | 90 | ATA_ID_LBA_CAPACITY_2 = 100, |
91 | ATA_ID_SECTOR_SIZE = 106, | 91 | ATA_ID_SECTOR_SIZE = 106, |
92 | ATA_ID_LOGICAL_SECTOR_SIZE = 117, /* and 118 */ | ||
92 | ATA_ID_LAST_LUN = 126, | 93 | ATA_ID_LAST_LUN = 126, |
93 | ATA_ID_DLF = 128, | 94 | ATA_ID_DLF = 128, |
94 | ATA_ID_CSFO = 129, | 95 | ATA_ID_CSFO = 129, |
@@ -640,16 +641,49 @@ static inline int ata_id_flush_ext_enabled(const u16 *id) | |||
640 | return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; | 641 | return (id[ATA_ID_CFS_ENABLE_2] & 0x2400) == 0x2400; |
641 | } | 642 | } |
642 | 643 | ||
643 | static inline int ata_id_has_large_logical_sectors(const u16 *id) | 644 | static inline u32 ata_id_logical_sector_size(const u16 *id) |
644 | { | 645 | { |
645 | if ((id[ATA_ID_SECTOR_SIZE] & 0xc000) != 0x4000) | 646 | /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. |
646 | return 0; | 647 | * IDENTIFY DEVICE data, word 117-118. |
647 | return id[ATA_ID_SECTOR_SIZE] & (1 << 13); | 648 | * 0xd000 ignores bit 13 (logical:physical > 1) |
649 | */ | ||
650 | if ((id[ATA_ID_SECTOR_SIZE] & 0xd000) == 0x5000) | ||
651 | return (((id[ATA_ID_LOGICAL_SECTOR_SIZE+1] << 16) | ||
652 | + id[ATA_ID_LOGICAL_SECTOR_SIZE]) * sizeof(u16)) ; | ||
653 | return ATA_SECT_SIZE; | ||
654 | } | ||
655 | |||
656 | static inline u8 ata_id_log2_per_physical_sector(const u16 *id) | ||
657 | { | ||
658 | /* T13/1699-D Revision 6a, Sep 6, 2008. Page 128. | ||
659 | * IDENTIFY DEVICE data, word 106. | ||
660 | * 0xe000 ignores bit 12 (logical sector > 512 bytes) | ||
661 | */ | ||
662 | if ((id[ATA_ID_SECTOR_SIZE] & 0xe000) == 0x6000) | ||
663 | return (id[ATA_ID_SECTOR_SIZE] & 0xf); | ||
664 | return 0; | ||
648 | } | 665 | } |
649 | 666 | ||
650 | static inline u16 ata_id_logical_per_physical_sectors(const u16 *id) | 667 | /* Offset of logical sectors relative to physical sectors. |
668 | * | ||
669 | * If device has more than one logical sector per physical sector | ||
670 | * (aka 512 byte emulation), vendors might offset the "sector 0" address | ||
671 | * so sector 63 is "naturally aligned" - e.g. FAT partition table. | ||
672 | * This avoids Read/Mod/Write penalties when using FAT partition table | ||
673 | * and updating "well aligned" (FS perspective) physical sectors on every | ||
674 | * transaction. | ||
675 | */ | ||
676 | static inline u16 ata_id_logical_sector_offset(const u16 *id, | ||
677 | u8 log2_per_phys) | ||
651 | { | 678 | { |
652 | return 1 << (id[ATA_ID_SECTOR_SIZE] & 0xf); | 679 | u16 word_209 = id[209]; |
680 | |||
681 | if ((log2_per_phys > 1) && (word_209 & 0xc000) == 0x4000) { | ||
682 | u16 first = word_209 & 0x3fff; | ||
683 | if (first > 0) | ||
684 | return (1 << log2_per_phys) - first; | ||
685 | } | ||
686 | return 0; | ||
653 | } | 687 | } |
654 | 688 | ||
655 | static inline int ata_id_has_lba48(const u16 *id) | 689 | static inline int ata_id_has_lba48(const u16 *id) |
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h index f6481daf6e52..a8e4e832cdbb 100644 --- a/include/linux/atmdev.h +++ b/include/linux/atmdev.h | |||
@@ -449,7 +449,7 @@ void vcc_insert_socket(struct sock *sk); | |||
449 | 449 | ||
450 | static inline int atm_guess_pdu2truesize(int size) | 450 | static inline int atm_guess_pdu2truesize(int size) |
451 | { | 451 | { |
452 | return (SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info)); | 452 | return SKB_DATA_ALIGN(size) + sizeof(struct skb_shared_info); |
453 | } | 453 | } |
454 | 454 | ||
455 | 455 | ||
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h index c809e286d213..a065612fc928 100644 --- a/include/linux/binfmts.h +++ b/include/linux/binfmts.h | |||
@@ -50,8 +50,8 @@ struct linux_binprm{ | |||
50 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ | 50 | int unsafe; /* how unsafe this exec is (mask of LSM_UNSAFE_*) */ |
51 | unsigned int per_clear; /* bits to clear in current->personality */ | 51 | unsigned int per_clear; /* bits to clear in current->personality */ |
52 | int argc, envc; | 52 | int argc, envc; |
53 | char * filename; /* Name of binary as seen by procps */ | 53 | const char * filename; /* Name of binary as seen by procps */ |
54 | char * interp; /* Name of the binary really executed. Most | 54 | const char * interp; /* Name of the binary really executed. Most |
55 | of the time same as filename, but could be | 55 | of the time same as filename, but could be |
56 | different for binfmt_{misc,script} */ | 56 | different for binfmt_{misc,script} */ |
57 | unsigned interp_flags; | 57 | unsigned interp_flags; |
@@ -126,7 +126,8 @@ extern int setup_arg_pages(struct linux_binprm * bprm, | |||
126 | unsigned long stack_top, | 126 | unsigned long stack_top, |
127 | int executable_stack); | 127 | int executable_stack); |
128 | extern int bprm_mm_init(struct linux_binprm *bprm); | 128 | extern int bprm_mm_init(struct linux_binprm *bprm); |
129 | extern int copy_strings_kernel(int argc,char ** argv,struct linux_binprm *bprm); | 129 | extern int copy_strings_kernel(int argc, const char *const *argv, |
130 | struct linux_binprm *bprm); | ||
130 | extern int prepare_bprm_creds(struct linux_binprm *bprm); | 131 | extern int prepare_bprm_creds(struct linux_binprm *bprm); |
131 | extern void install_exec_creds(struct linux_binprm *bprm); | 132 | extern void install_exec_creds(struct linux_binprm *bprm); |
132 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); | 133 | extern void do_coredump(long signr, int exit_code, struct pt_regs *regs); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 5274103434ad..ba679992d39b 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -346,8 +346,15 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | |||
346 | } | 346 | } |
347 | 347 | ||
348 | #else | 348 | #else |
349 | #define bvec_kmap_irq(bvec, flags) (page_address((bvec)->bv_page) + (bvec)->bv_offset) | 349 | static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags) |
350 | #define bvec_kunmap_irq(buf, flags) do { *(flags) = 0; } while (0) | 350 | { |
351 | return page_address(bvec->bv_page) + bvec->bv_offset; | ||
352 | } | ||
353 | |||
354 | static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags) | ||
355 | { | ||
356 | *flags = 0; | ||
357 | } | ||
351 | #endif | 358 | #endif |
352 | 359 | ||
353 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, | 360 | static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, |
@@ -496,6 +503,10 @@ static inline struct bio *bio_list_get(struct bio_list *bl) | |||
496 | #define bip_for_each_vec(bvl, bip, i) \ | 503 | #define bip_for_each_vec(bvl, bip, i) \ |
497 | __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) | 504 | __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) |
498 | 505 | ||
506 | #define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ | ||
507 | for_each_bio(_bio) \ | ||
508 | bip_for_each_vec(_bvl, _bio->bi_integrity, _iter) | ||
509 | |||
499 | #define bio_integrity(bio) (bio->bi_integrity != NULL) | 510 | #define bio_integrity(bio) (bio->bi_integrity != NULL) |
500 | 511 | ||
501 | extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); | 512 | extern struct bio_integrity_payload *bio_integrity_alloc_bioset(struct bio *, gfp_t, unsigned int, struct bio_set *); |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index fc68053378ce..827cc95711ef 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -136,28 +136,6 @@ static inline unsigned long __ffs64(u64 word) | |||
136 | } | 136 | } |
137 | 137 | ||
138 | #ifdef __KERNEL__ | 138 | #ifdef __KERNEL__ |
139 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | ||
140 | |||
141 | /** | ||
142 | * find_first_bit - find the first set bit in a memory region | ||
143 | * @addr: The address to start the search at | ||
144 | * @size: The maximum size to search | ||
145 | * | ||
146 | * Returns the bit number of the first set bit. | ||
147 | */ | ||
148 | extern unsigned long find_first_bit(const unsigned long *addr, | ||
149 | unsigned long size); | ||
150 | |||
151 | /** | ||
152 | * find_first_zero_bit - find the first cleared bit in a memory region | ||
153 | * @addr: The address to start the search at | ||
154 | * @size: The maximum size to search | ||
155 | * | ||
156 | * Returns the bit number of the first cleared bit. | ||
157 | */ | ||
158 | extern unsigned long find_first_zero_bit(const unsigned long *addr, | ||
159 | unsigned long size); | ||
160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | ||
161 | 139 | ||
162 | #ifdef CONFIG_GENERIC_FIND_LAST_BIT | 140 | #ifdef CONFIG_GENERIC_FIND_LAST_BIT |
163 | /** | 141 | /** |
@@ -171,28 +149,5 @@ extern unsigned long find_last_bit(const unsigned long *addr, | |||
171 | unsigned long size); | 149 | unsigned long size); |
172 | #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ | 150 | #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ |
173 | 151 | ||
174 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT | ||
175 | |||
176 | /** | ||
177 | * find_next_bit - find the next set bit in a memory region | ||
178 | * @addr: The address to base the search on | ||
179 | * @offset: The bitnumber to start searching at | ||
180 | * @size: The bitmap size in bits | ||
181 | */ | ||
182 | extern unsigned long find_next_bit(const unsigned long *addr, | ||
183 | unsigned long size, unsigned long offset); | ||
184 | |||
185 | /** | ||
186 | * find_next_zero_bit - find the next cleared bit in a memory region | ||
187 | * @addr: The address to base the search on | ||
188 | * @offset: The bitnumber to start searching at | ||
189 | * @size: The bitmap size in bits | ||
190 | */ | ||
191 | |||
192 | extern unsigned long find_next_zero_bit(const unsigned long *addr, | ||
193 | unsigned long size, | ||
194 | unsigned long offset); | ||
195 | |||
196 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | ||
197 | #endif /* __KERNEL__ */ | 152 | #endif /* __KERNEL__ */ |
198 | #endif | 153 | #endif |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index ca83a97c9715..0437ab6bb54c 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -97,6 +97,7 @@ struct bio { | |||
97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ | 97 | #define BIO_NULL_MAPPED 9 /* contains invalid user pages */ |
98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ | 98 | #define BIO_FS_INTEGRITY 10 /* fs owns integrity data, not block layer */ |
99 | #define BIO_QUIET 11 /* Make BIO Quiet */ | 99 | #define BIO_QUIET 11 /* Make BIO Quiet */ |
100 | #define BIO_MAPPED_INTEGRITY 12/* integrity metadata has been remapped */ | ||
100 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) | 101 | #define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag))) |
101 | 102 | ||
102 | /* | 103 | /* |
@@ -130,6 +131,8 @@ enum rq_flag_bits { | |||
130 | /* bio only flags */ | 131 | /* bio only flags */ |
131 | __REQ_UNPLUG, /* unplug the immediately after submission */ | 132 | __REQ_UNPLUG, /* unplug the immediately after submission */ |
132 | __REQ_RAHEAD, /* read ahead, can fail anytime */ | 133 | __REQ_RAHEAD, /* read ahead, can fail anytime */ |
134 | __REQ_THROTTLED, /* This bio has already been subjected to | ||
135 | * throttling rules. Don't do it again. */ | ||
133 | 136 | ||
134 | /* request only flags */ | 137 | /* request only flags */ |
135 | __REQ_SORTED, /* elevator knows about this request */ | 138 | __REQ_SORTED, /* elevator knows about this request */ |
@@ -143,10 +146,8 @@ enum rq_flag_bits { | |||
143 | __REQ_FAILED, /* set if the request failed */ | 146 | __REQ_FAILED, /* set if the request failed */ |
144 | __REQ_QUIET, /* don't worry about errors */ | 147 | __REQ_QUIET, /* don't worry about errors */ |
145 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ | 148 | __REQ_PREEMPT, /* set for "ide_preempt" requests */ |
146 | __REQ_ORDERED_COLOR, /* is before or after barrier */ | ||
147 | __REQ_ALLOCED, /* request came from our alloc pool */ | 149 | __REQ_ALLOCED, /* request came from our alloc pool */ |
148 | __REQ_COPY_USER, /* contains copies of user pages */ | 150 | __REQ_COPY_USER, /* contains copies of user pages */ |
149 | __REQ_INTEGRITY, /* integrity metadata has been remapped */ | ||
150 | __REQ_FLUSH, /* request for cache flush */ | 151 | __REQ_FLUSH, /* request for cache flush */ |
151 | __REQ_IO_STAT, /* account I/O stat */ | 152 | __REQ_IO_STAT, /* account I/O stat */ |
152 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ | 153 | __REQ_MIXED_MERGE, /* merge of different types, fail separately */ |
@@ -168,10 +169,12 @@ enum rq_flag_bits { | |||
168 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 169 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
169 | #define REQ_COMMON_MASK \ | 170 | #define REQ_COMMON_MASK \ |
170 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ | 171 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ |
171 | REQ_META| REQ_DISCARD | REQ_NOIDLE) | 172 | REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) |
173 | #define REQ_CLONE_MASK REQ_COMMON_MASK | ||
172 | 174 | ||
173 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) | 175 | #define REQ_UNPLUG (1 << __REQ_UNPLUG) |
174 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | 176 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
177 | #define REQ_THROTTLED (1 << __REQ_THROTTLED) | ||
175 | 178 | ||
176 | #define REQ_SORTED (1 << __REQ_SORTED) | 179 | #define REQ_SORTED (1 << __REQ_SORTED) |
177 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) | 180 | #define REQ_SOFTBARRIER (1 << __REQ_SOFTBARRIER) |
@@ -184,10 +187,8 @@ enum rq_flag_bits { | |||
184 | #define REQ_FAILED (1 << __REQ_FAILED) | 187 | #define REQ_FAILED (1 << __REQ_FAILED) |
185 | #define REQ_QUIET (1 << __REQ_QUIET) | 188 | #define REQ_QUIET (1 << __REQ_QUIET) |
186 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) | 189 | #define REQ_PREEMPT (1 << __REQ_PREEMPT) |
187 | #define REQ_ORDERED_COLOR (1 << __REQ_ORDERED_COLOR) | ||
188 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) | 190 | #define REQ_ALLOCED (1 << __REQ_ALLOCED) |
189 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) | 191 | #define REQ_COPY_USER (1 << __REQ_COPY_USER) |
190 | #define REQ_INTEGRITY (1 << __REQ_INTEGRITY) | ||
191 | #define REQ_FLUSH (1 << __REQ_FLUSH) | 192 | #define REQ_FLUSH (1 << __REQ_FLUSH) |
192 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) | 193 | #define REQ_IO_STAT (1 << __REQ_IO_STAT) |
193 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) | 194 | #define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE) |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 2c54906f678f..646b462d04df 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -124,6 +124,9 @@ struct request { | |||
124 | * physical address coalescing is performed. | 124 | * physical address coalescing is performed. |
125 | */ | 125 | */ |
126 | unsigned short nr_phys_segments; | 126 | unsigned short nr_phys_segments; |
127 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | ||
128 | unsigned short nr_integrity_segments; | ||
129 | #endif | ||
127 | 130 | ||
128 | unsigned short ioprio; | 131 | unsigned short ioprio; |
129 | 132 | ||
@@ -243,6 +246,7 @@ struct queue_limits { | |||
243 | 246 | ||
244 | unsigned short logical_block_size; | 247 | unsigned short logical_block_size; |
245 | unsigned short max_segments; | 248 | unsigned short max_segments; |
249 | unsigned short max_integrity_segments; | ||
246 | 250 | ||
247 | unsigned char misaligned; | 251 | unsigned char misaligned; |
248 | unsigned char discard_misaligned; | 252 | unsigned char discard_misaligned; |
@@ -355,18 +359,25 @@ struct request_queue | |||
355 | struct blk_trace *blk_trace; | 359 | struct blk_trace *blk_trace; |
356 | #endif | 360 | #endif |
357 | /* | 361 | /* |
358 | * reserved for flush operations | 362 | * for flush operations |
359 | */ | 363 | */ |
360 | unsigned int ordered, next_ordered, ordseq; | 364 | unsigned int flush_flags; |
361 | int orderr, ordcolor; | 365 | unsigned int flush_seq; |
362 | struct request pre_flush_rq, bar_rq, post_flush_rq; | 366 | int flush_err; |
363 | struct request *orig_bar_rq; | 367 | struct request flush_rq; |
368 | struct request *orig_flush_rq; | ||
369 | struct list_head pending_flushes; | ||
364 | 370 | ||
365 | struct mutex sysfs_lock; | 371 | struct mutex sysfs_lock; |
366 | 372 | ||
367 | #if defined(CONFIG_BLK_DEV_BSG) | 373 | #if defined(CONFIG_BLK_DEV_BSG) |
368 | struct bsg_class_device bsg_dev; | 374 | struct bsg_class_device bsg_dev; |
369 | #endif | 375 | #endif |
376 | |||
377 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
378 | /* Throttle data */ | ||
379 | struct throtl_data *td; | ||
380 | #endif | ||
370 | }; | 381 | }; |
371 | 382 | ||
372 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ | 383 | #define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */ |
@@ -462,56 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
462 | __clear_bit(flag, &q->queue_flags); | 473 | __clear_bit(flag, &q->queue_flags); |
463 | } | 474 | } |
464 | 475 | ||
465 | enum { | ||
466 | /* | ||
467 | * Hardbarrier is supported with one of the following methods. | ||
468 | * | ||
469 | * NONE : hardbarrier unsupported | ||
470 | * DRAIN : ordering by draining is enough | ||
471 | * DRAIN_FLUSH : ordering by draining w/ pre and post flushes | ||
472 | * DRAIN_FUA : ordering by draining w/ pre flush and FUA write | ||
473 | * TAG : ordering by tag is enough | ||
474 | * TAG_FLUSH : ordering by tag w/ pre and post flushes | ||
475 | * TAG_FUA : ordering by tag w/ pre flush and FUA write | ||
476 | */ | ||
477 | QUEUE_ORDERED_BY_DRAIN = 0x01, | ||
478 | QUEUE_ORDERED_BY_TAG = 0x02, | ||
479 | QUEUE_ORDERED_DO_PREFLUSH = 0x10, | ||
480 | QUEUE_ORDERED_DO_BAR = 0x20, | ||
481 | QUEUE_ORDERED_DO_POSTFLUSH = 0x40, | ||
482 | QUEUE_ORDERED_DO_FUA = 0x80, | ||
483 | |||
484 | QUEUE_ORDERED_NONE = 0x00, | ||
485 | |||
486 | QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN | | ||
487 | QUEUE_ORDERED_DO_BAR, | ||
488 | QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN | | ||
489 | QUEUE_ORDERED_DO_PREFLUSH | | ||
490 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
491 | QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN | | ||
492 | QUEUE_ORDERED_DO_PREFLUSH | | ||
493 | QUEUE_ORDERED_DO_FUA, | ||
494 | |||
495 | QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG | | ||
496 | QUEUE_ORDERED_DO_BAR, | ||
497 | QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG | | ||
498 | QUEUE_ORDERED_DO_PREFLUSH | | ||
499 | QUEUE_ORDERED_DO_POSTFLUSH, | ||
500 | QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG | | ||
501 | QUEUE_ORDERED_DO_PREFLUSH | | ||
502 | QUEUE_ORDERED_DO_FUA, | ||
503 | |||
504 | /* | ||
505 | * Ordered operation sequence | ||
506 | */ | ||
507 | QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */ | ||
508 | QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */ | ||
509 | QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */ | ||
510 | QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */ | ||
511 | QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */ | ||
512 | QUEUE_ORDSEQ_DONE = 0x20, | ||
513 | }; | ||
514 | |||
515 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | 476 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) |
516 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 477 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
517 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 478 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
@@ -521,7 +482,6 @@ enum { | |||
521 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) | 482 | #define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) |
522 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) | 483 | #define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) |
523 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) | 484 | #define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) |
524 | #define blk_queue_flushing(q) ((q)->ordseq) | ||
525 | #define blk_queue_stackable(q) \ | 485 | #define blk_queue_stackable(q) \ |
526 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) | 486 | test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) |
527 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) | 487 | #define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) |
@@ -592,7 +552,8 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) | |||
592 | * it already be started by driver. | 552 | * it already be started by driver. |
593 | */ | 553 | */ |
594 | #define RQ_NOMERGE_FLAGS \ | 554 | #define RQ_NOMERGE_FLAGS \ |
595 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) | 555 | (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ |
556 | REQ_FLUSH | REQ_FUA) | ||
596 | #define rq_mergeable(rq) \ | 557 | #define rq_mergeable(rq) \ |
597 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ | 558 | (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ |
598 | (((rq)->cmd_flags & REQ_DISCARD) || \ | 559 | (((rq)->cmd_flags & REQ_DISCARD) || \ |
@@ -851,7 +812,7 @@ extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); | |||
851 | extern void blk_queue_max_discard_sectors(struct request_queue *q, | 812 | extern void blk_queue_max_discard_sectors(struct request_queue *q, |
852 | unsigned int max_discard_sectors); | 813 | unsigned int max_discard_sectors); |
853 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); | 814 | extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); |
854 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); | 815 | extern void blk_queue_physical_block_size(struct request_queue *, unsigned int); |
855 | extern void blk_queue_alignment_offset(struct request_queue *q, | 816 | extern void blk_queue_alignment_offset(struct request_queue *q, |
856 | unsigned int alignment); | 817 | unsigned int alignment); |
857 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); | 818 | extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); |
@@ -881,12 +842,8 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int); | |||
881 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | 842 | extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); |
882 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
883 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | ||
884 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
885 | extern int blk_queue_ordered(struct request_queue *, unsigned); | ||
886 | extern bool blk_do_ordered(struct request_queue *, struct request **); | ||
887 | extern unsigned blk_ordered_cur_seq(struct request_queue *); | ||
888 | extern unsigned blk_ordered_req_seq(struct request *); | ||
889 | extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int); | ||
890 | 847 | ||
891 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
892 | extern void blk_dump_rq_flags(struct request *, char *); | 849 | extern void blk_dump_rq_flags(struct request *, char *); |
@@ -919,27 +876,20 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt, | |||
919 | return NULL; | 876 | return NULL; |
920 | return bqt->tag_index[tag]; | 877 | return bqt->tag_index[tag]; |
921 | } | 878 | } |
922 | enum{ | 879 | |
923 | BLKDEV_WAIT, /* wait for completion */ | 880 | #define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */ |
924 | BLKDEV_BARRIER, /* issue request with barrier */ | 881 | |
925 | BLKDEV_SECURE, /* secure discard */ | 882 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); |
926 | }; | ||
927 | #define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT) | ||
928 | #define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER) | ||
929 | #define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE) | ||
930 | extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *, | ||
931 | unsigned long); | ||
932 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, | 883 | extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, |
933 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 884 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); |
934 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, | 885 | extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, |
935 | sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); | 886 | sector_t nr_sects, gfp_t gfp_mask); |
936 | static inline int sb_issue_discard(struct super_block *sb, | 887 | static inline int sb_issue_discard(struct super_block *sb, sector_t block, |
937 | sector_t block, sector_t nr_blocks) | 888 | sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags) |
938 | { | 889 | { |
939 | block <<= (sb->s_blocksize_bits - 9); | 890 | return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9), |
940 | nr_blocks <<= (sb->s_blocksize_bits - 9); | 891 | nr_blocks << (sb->s_blocksize_bits - 9), |
941 | return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, | 892 | gfp_mask, flags); |
942 | BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); | ||
943 | } | 893 | } |
944 | 894 | ||
945 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); | 895 | extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); |
@@ -1004,7 +954,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q) | |||
1004 | return q->limits.physical_block_size; | 954 | return q->limits.physical_block_size; |
1005 | } | 955 | } |
1006 | 956 | ||
1007 | static inline int bdev_physical_block_size(struct block_device *bdev) | 957 | static inline unsigned int bdev_physical_block_size(struct block_device *bdev) |
1008 | { | 958 | { |
1009 | return queue_physical_block_size(bdev_get_queue(bdev)); | 959 | return queue_physical_block_size(bdev_get_queue(bdev)); |
1010 | } | 960 | } |
@@ -1093,11 +1043,11 @@ static inline int queue_dma_alignment(struct request_queue *q) | |||
1093 | return q ? q->dma_alignment : 511; | 1043 | return q ? q->dma_alignment : 511; |
1094 | } | 1044 | } |
1095 | 1045 | ||
1096 | static inline int blk_rq_aligned(struct request_queue *q, void *addr, | 1046 | static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr, |
1097 | unsigned int len) | 1047 | unsigned int len) |
1098 | { | 1048 | { |
1099 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; | 1049 | unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; |
1100 | return !((unsigned long)addr & alignment) && !(len & alignment); | 1050 | return !(addr & alignment) && !(len & alignment); |
1101 | } | 1051 | } |
1102 | 1052 | ||
1103 | /* assumes size > 256 */ | 1053 | /* assumes size > 256 */ |
@@ -1127,6 +1077,7 @@ static inline void put_dev_sector(Sector p) | |||
1127 | 1077 | ||
1128 | struct work_struct; | 1078 | struct work_struct; |
1129 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1079 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1080 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1130 | 1081 | ||
1131 | #ifdef CONFIG_BLK_CGROUP | 1082 | #ifdef CONFIG_BLK_CGROUP |
1132 | /* | 1083 | /* |
@@ -1170,6 +1121,24 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1170 | } | 1121 | } |
1171 | #endif | 1122 | #endif |
1172 | 1123 | ||
1124 | #ifdef CONFIG_BLK_DEV_THROTTLING | ||
1125 | extern int blk_throtl_init(struct request_queue *q); | ||
1126 | extern void blk_throtl_exit(struct request_queue *q); | ||
1127 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | ||
1128 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1129 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1130 | #else /* CONFIG_BLK_DEV_THROTTLING */ | ||
1131 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | ||
1132 | { | ||
1133 | return 0; | ||
1134 | } | ||
1135 | |||
1136 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | ||
1137 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | ||
1138 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1139 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1140 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | ||
1141 | |||
1173 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1142 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
1174 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) | 1143 | MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) |
1175 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ | 1144 | #define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ |
@@ -1213,8 +1182,13 @@ struct blk_integrity { | |||
1213 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | 1182 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1214 | extern void blk_integrity_unregister(struct gendisk *); | 1183 | extern void blk_integrity_unregister(struct gendisk *); |
1215 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1184 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1216 | extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); | 1185 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
1217 | extern int blk_rq_count_integrity_sg(struct request *); | 1186 | struct scatterlist *); |
1187 | extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); | ||
1188 | extern int blk_integrity_merge_rq(struct request_queue *, struct request *, | ||
1189 | struct request *); | ||
1190 | extern int blk_integrity_merge_bio(struct request_queue *, struct request *, | ||
1191 | struct bio *); | ||
1218 | 1192 | ||
1219 | static inline | 1193 | static inline |
1220 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | 1194 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) |
@@ -1235,16 +1209,32 @@ static inline int blk_integrity_rq(struct request *rq) | |||
1235 | return bio_integrity(rq->bio); | 1209 | return bio_integrity(rq->bio); |
1236 | } | 1210 | } |
1237 | 1211 | ||
1212 | static inline void blk_queue_max_integrity_segments(struct request_queue *q, | ||
1213 | unsigned int segs) | ||
1214 | { | ||
1215 | q->limits.max_integrity_segments = segs; | ||
1216 | } | ||
1217 | |||
1218 | static inline unsigned short | ||
1219 | queue_max_integrity_segments(struct request_queue *q) | ||
1220 | { | ||
1221 | return q->limits.max_integrity_segments; | ||
1222 | } | ||
1223 | |||
1238 | #else /* CONFIG_BLK_DEV_INTEGRITY */ | 1224 | #else /* CONFIG_BLK_DEV_INTEGRITY */ |
1239 | 1225 | ||
1240 | #define blk_integrity_rq(rq) (0) | 1226 | #define blk_integrity_rq(rq) (0) |
1241 | #define blk_rq_count_integrity_sg(a) (0) | 1227 | #define blk_rq_count_integrity_sg(a, b) (0) |
1242 | #define blk_rq_map_integrity_sg(a, b) (0) | 1228 | #define blk_rq_map_integrity_sg(a, b, c) (0) |
1243 | #define bdev_get_integrity(a) (0) | 1229 | #define bdev_get_integrity(a) (0) |
1244 | #define blk_get_integrity(a) (0) | 1230 | #define blk_get_integrity(a) (0) |
1245 | #define blk_integrity_compare(a, b) (0) | 1231 | #define blk_integrity_compare(a, b) (0) |
1246 | #define blk_integrity_register(a, b) (0) | 1232 | #define blk_integrity_register(a, b) (0) |
1247 | #define blk_integrity_unregister(a) do { } while (0); | 1233 | #define blk_integrity_unregister(a) do { } while (0); |
1234 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | ||
1235 | #define queue_max_integrity_segments(a) (0) | ||
1236 | #define blk_integrity_merge_rq(a, b, c) (0) | ||
1237 | #define blk_integrity_merge_bio(a, b, c) (0) | ||
1248 | 1238 | ||
1249 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 1239 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
1250 | 1240 | ||
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 43e649a72529..dd1b25b2641c 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -32,8 +32,6 @@ enum bh_state_bits { | |||
32 | BH_Delay, /* Buffer is not yet allocated on disk */ | 32 | BH_Delay, /* Buffer is not yet allocated on disk */ |
33 | BH_Boundary, /* Block is followed by a discontiguity */ | 33 | BH_Boundary, /* Block is followed by a discontiguity */ |
34 | BH_Write_EIO, /* I/O error on write */ | 34 | BH_Write_EIO, /* I/O error on write */ |
35 | BH_Ordered, /* ordered write */ | ||
36 | BH_Eopnotsupp, /* operation not supported (barrier) */ | ||
37 | BH_Unwritten, /* Buffer is allocated on disk but not written */ | 35 | BH_Unwritten, /* Buffer is allocated on disk but not written */ |
38 | BH_Quiet, /* Buffer Error Prinks to be quiet */ | 36 | BH_Quiet, /* Buffer Error Prinks to be quiet */ |
39 | 37 | ||
@@ -125,8 +123,6 @@ BUFFER_FNS(Async_Write, async_write) | |||
125 | BUFFER_FNS(Delay, delay) | 123 | BUFFER_FNS(Delay, delay) |
126 | BUFFER_FNS(Boundary, boundary) | 124 | BUFFER_FNS(Boundary, boundary) |
127 | BUFFER_FNS(Write_EIO, write_io_error) | 125 | BUFFER_FNS(Write_EIO, write_io_error) |
128 | BUFFER_FNS(Ordered, ordered) | ||
129 | BUFFER_FNS(Eopnotsupp, eopnotsupp) | ||
130 | BUFFER_FNS(Unwritten, unwritten) | 126 | BUFFER_FNS(Unwritten, unwritten) |
131 | 127 | ||
132 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) | 128 | #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK) |
@@ -183,6 +179,8 @@ void unlock_buffer(struct buffer_head *bh); | |||
183 | void __lock_buffer(struct buffer_head *bh); | 179 | void __lock_buffer(struct buffer_head *bh); |
184 | void ll_rw_block(int, int, struct buffer_head * bh[]); | 180 | void ll_rw_block(int, int, struct buffer_head * bh[]); |
185 | int sync_dirty_buffer(struct buffer_head *bh); | 181 | int sync_dirty_buffer(struct buffer_head *bh); |
182 | int __sync_dirty_buffer(struct buffer_head *bh, int rw); | ||
183 | void write_dirty_buffer(struct buffer_head *bh, int rw); | ||
186 | int submit_bh(int, struct buffer_head *); | 184 | int submit_bh(int, struct buffer_head *); |
187 | void write_boundary_block(struct block_device *bdev, | 185 | void write_boundary_block(struct block_device *bdev, |
188 | sector_t bblock, unsigned blocksize); | 186 | sector_t bblock, unsigned blocksize); |
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h index dba28268e651..8e20540043f5 100644 --- a/include/linux/can/platform/mcp251x.h +++ b/include/linux/can/platform/mcp251x.h | |||
@@ -12,7 +12,6 @@ | |||
12 | /** | 12 | /** |
13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data | 13 | * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data |
14 | * @oscillator_frequency: - oscillator frequency in Hz | 14 | * @oscillator_frequency: - oscillator frequency in Hz |
15 | * @model: - actual type of chip | ||
16 | * @board_specific_setup: - called before probing the chip (power,reset) | 15 | * @board_specific_setup: - called before probing the chip (power,reset) |
17 | * @transceiver_enable: - called to power on/off the transceiver | 16 | * @transceiver_enable: - called to power on/off the transceiver |
18 | * @power_enable: - called to power on/off the mcp *and* the | 17 | * @power_enable: - called to power on/off the mcp *and* the |
@@ -25,9 +24,6 @@ | |||
25 | 24 | ||
26 | struct mcp251x_platform_data { | 25 | struct mcp251x_platform_data { |
27 | unsigned long oscillator_frequency; | 26 | unsigned long oscillator_frequency; |
28 | int model; | ||
29 | #define CAN_MCP251X_MCP2510 0x2510 | ||
30 | #define CAN_MCP251X_MCP2515 0x2515 | ||
31 | int (*board_specific_setup)(struct spi_device *spi); | 27 | int (*board_specific_setup)(struct spi_device *spi); |
32 | int (*transceiver_enable)(int enable); | 28 | int (*transceiver_enable)(int enable); |
33 | int (*power_enable) (int enable); | 29 | int (*power_enable) (int enable); |
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h new file mode 100644 index 000000000000..7fff521d7eb5 --- /dev/null +++ b/include/linux/ceph/auth.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _FS_CEPH_AUTH_H | ||
2 | #define _FS_CEPH_AUTH_H | ||
3 | |||
4 | #include <linux/ceph/types.h> | ||
5 | #include <linux/ceph/buffer.h> | ||
6 | |||
7 | /* | ||
8 | * Abstract interface for communicating with the authenticate module. | ||
9 | * There is some handshake that takes place between us and the monitor | ||
10 | * to acquire the necessary keys. These are used to generate an | ||
11 | * 'authorizer' that we use when connecting to a service (mds, osd). | ||
12 | */ | ||
13 | |||
14 | struct ceph_auth_client; | ||
15 | struct ceph_authorizer; | ||
16 | |||
17 | struct ceph_auth_client_ops { | ||
18 | const char *name; | ||
19 | |||
20 | /* | ||
21 | * true if we are authenticated and can connect to | ||
22 | * services. | ||
23 | */ | ||
24 | int (*is_authenticated)(struct ceph_auth_client *ac); | ||
25 | |||
26 | /* | ||
27 | * true if we should (re)authenticate, e.g., when our tickets | ||
28 | * are getting old and crusty. | ||
29 | */ | ||
30 | int (*should_authenticate)(struct ceph_auth_client *ac); | ||
31 | |||
32 | /* | ||
33 | * build requests and process replies during monitor | ||
34 | * handshake. if handle_reply returns -EAGAIN, we build | ||
35 | * another request. | ||
36 | */ | ||
37 | int (*build_request)(struct ceph_auth_client *ac, void *buf, void *end); | ||
38 | int (*handle_reply)(struct ceph_auth_client *ac, int result, | ||
39 | void *buf, void *end); | ||
40 | |||
41 | /* | ||
42 | * Create authorizer for connecting to a service, and verify | ||
43 | * the response to authenticate the service. | ||
44 | */ | ||
45 | int (*create_authorizer)(struct ceph_auth_client *ac, int peer_type, | ||
46 | struct ceph_authorizer **a, | ||
47 | void **buf, size_t *len, | ||
48 | void **reply_buf, size_t *reply_len); | ||
49 | int (*verify_authorizer_reply)(struct ceph_auth_client *ac, | ||
50 | struct ceph_authorizer *a, size_t len); | ||
51 | void (*destroy_authorizer)(struct ceph_auth_client *ac, | ||
52 | struct ceph_authorizer *a); | ||
53 | void (*invalidate_authorizer)(struct ceph_auth_client *ac, | ||
54 | int peer_type); | ||
55 | |||
56 | /* reset when we (re)connect to a monitor */ | ||
57 | void (*reset)(struct ceph_auth_client *ac); | ||
58 | |||
59 | void (*destroy)(struct ceph_auth_client *ac); | ||
60 | }; | ||
61 | |||
62 | struct ceph_auth_client { | ||
63 | u32 protocol; /* CEPH_AUTH_* */ | ||
64 | void *private; /* for use by protocol implementation */ | ||
65 | const struct ceph_auth_client_ops *ops; /* null iff protocol==0 */ | ||
66 | |||
67 | bool negotiating; /* true if negotiating protocol */ | ||
68 | const char *name; /* entity name */ | ||
69 | u64 global_id; /* our unique id in system */ | ||
70 | const char *secret; /* our secret key */ | ||
71 | unsigned want_keys; /* which services we want */ | ||
72 | }; | ||
73 | |||
74 | extern struct ceph_auth_client *ceph_auth_init(const char *name, | ||
75 | const char *secret); | ||
76 | extern void ceph_auth_destroy(struct ceph_auth_client *ac); | ||
77 | |||
78 | extern void ceph_auth_reset(struct ceph_auth_client *ac); | ||
79 | |||
80 | extern int ceph_auth_build_hello(struct ceph_auth_client *ac, | ||
81 | void *buf, size_t len); | ||
82 | extern int ceph_handle_auth_reply(struct ceph_auth_client *ac, | ||
83 | void *buf, size_t len, | ||
84 | void *reply_buf, size_t reply_len); | ||
85 | extern int ceph_entity_name_encode(const char *name, void **p, void *end); | ||
86 | |||
87 | extern int ceph_build_auth(struct ceph_auth_client *ac, | ||
88 | void *msg_buf, size_t msg_len); | ||
89 | |||
90 | extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac); | ||
91 | |||
92 | #endif | ||
diff --git a/include/linux/ceph/buffer.h b/include/linux/ceph/buffer.h new file mode 100644 index 000000000000..58d19014068f --- /dev/null +++ b/include/linux/ceph/buffer.h | |||
@@ -0,0 +1,39 @@ | |||
1 | #ifndef __FS_CEPH_BUFFER_H | ||
2 | #define __FS_CEPH_BUFFER_H | ||
3 | |||
4 | #include <linux/kref.h> | ||
5 | #include <linux/mm.h> | ||
6 | #include <linux/vmalloc.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/uio.h> | ||
9 | |||
10 | /* | ||
11 | * a simple reference counted buffer. | ||
12 | * | ||
13 | * use kmalloc for small sizes (<= one page), vmalloc for larger | ||
14 | * sizes. | ||
15 | */ | ||
16 | struct ceph_buffer { | ||
17 | struct kref kref; | ||
18 | struct kvec vec; | ||
19 | size_t alloc_len; | ||
20 | bool is_vmalloc; | ||
21 | }; | ||
22 | |||
23 | extern struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp); | ||
24 | extern void ceph_buffer_release(struct kref *kref); | ||
25 | |||
26 | static inline struct ceph_buffer *ceph_buffer_get(struct ceph_buffer *b) | ||
27 | { | ||
28 | kref_get(&b->kref); | ||
29 | return b; | ||
30 | } | ||
31 | |||
32 | static inline void ceph_buffer_put(struct ceph_buffer *b) | ||
33 | { | ||
34 | kref_put(&b->kref, ceph_buffer_release); | ||
35 | } | ||
36 | |||
37 | extern int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end); | ||
38 | |||
39 | #endif | ||
diff --git a/include/linux/ceph/ceph_debug.h b/include/linux/ceph/ceph_debug.h new file mode 100644 index 000000000000..aa2e19182d99 --- /dev/null +++ b/include/linux/ceph/ceph_debug.h | |||
@@ -0,0 +1,38 @@ | |||
1 | #ifndef _FS_CEPH_DEBUG_H | ||
2 | #define _FS_CEPH_DEBUG_H | ||
3 | |||
4 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
5 | |||
6 | #ifdef CONFIG_CEPH_LIB_PRETTYDEBUG | ||
7 | |||
8 | /* | ||
9 | * wrap pr_debug to include a filename:lineno prefix on each line. | ||
10 | * this incurs some overhead (kernel size and execution time) due to | ||
11 | * the extra function call at each call site. | ||
12 | */ | ||
13 | |||
14 | # if defined(DEBUG) || defined(CONFIG_DYNAMIC_DEBUG) | ||
15 | extern const char *ceph_file_part(const char *s, int len); | ||
16 | # define dout(fmt, ...) \ | ||
17 | pr_debug("%.*s %12.12s:%-4d : " fmt, \ | ||
18 | 8 - (int)sizeof(KBUILD_MODNAME), " ", \ | ||
19 | ceph_file_part(__FILE__, sizeof(__FILE__)), \ | ||
20 | __LINE__, ##__VA_ARGS__) | ||
21 | # else | ||
22 | /* faux printk call just to see any compiler warnings. */ | ||
23 | # define dout(fmt, ...) do { \ | ||
24 | if (0) \ | ||
25 | printk(KERN_DEBUG fmt, ##__VA_ARGS__); \ | ||
26 | } while (0) | ||
27 | # endif | ||
28 | |||
29 | #else | ||
30 | |||
31 | /* | ||
32 | * or, just wrap pr_debug | ||
33 | */ | ||
34 | # define dout(fmt, ...) pr_debug(" " fmt, ##__VA_ARGS__) | ||
35 | |||
36 | #endif | ||
37 | |||
38 | #endif | ||
diff --git a/include/linux/ceph/ceph_frag.h b/include/linux/ceph/ceph_frag.h new file mode 100644 index 000000000000..5babb8e95352 --- /dev/null +++ b/include/linux/ceph/ceph_frag.h | |||
@@ -0,0 +1,109 @@ | |||
1 | #ifndef FS_CEPH_FRAG_H | ||
2 | #define FS_CEPH_FRAG_H | ||
3 | |||
4 | /* | ||
5 | * "Frags" are a way to describe a subset of a 32-bit number space, | ||
6 | * using a mask and a value to match against that mask. Any given frag | ||
7 | * (subset of the number space) can be partitioned into 2^n sub-frags. | ||
8 | * | ||
9 | * Frags are encoded into a 32-bit word: | ||
10 | * 8 upper bits = "bits" | ||
11 | * 24 lower bits = "value" | ||
12 | * (We could go to 5+27 bits, but who cares.) | ||
13 | * | ||
14 | * We use the _most_ significant bits of the 24 bit value. This makes | ||
15 | * values logically sort. | ||
16 | * | ||
17 | * Unfortunately, because the "bits" field is still in the high bits, we | ||
18 | * can't sort encoded frags numerically. However, it does allow you | ||
19 | * to feed encoded frags as values into frag_contains_value. | ||
20 | */ | ||
21 | static inline __u32 ceph_frag_make(__u32 b, __u32 v) | ||
22 | { | ||
23 | return (b << 24) | | ||
24 | (v & (0xffffffu << (24-b)) & 0xffffffu); | ||
25 | } | ||
26 | static inline __u32 ceph_frag_bits(__u32 f) | ||
27 | { | ||
28 | return f >> 24; | ||
29 | } | ||
30 | static inline __u32 ceph_frag_value(__u32 f) | ||
31 | { | ||
32 | return f & 0xffffffu; | ||
33 | } | ||
34 | static inline __u32 ceph_frag_mask(__u32 f) | ||
35 | { | ||
36 | return (0xffffffu << (24-ceph_frag_bits(f))) & 0xffffffu; | ||
37 | } | ||
38 | static inline __u32 ceph_frag_mask_shift(__u32 f) | ||
39 | { | ||
40 | return 24 - ceph_frag_bits(f); | ||
41 | } | ||
42 | |||
43 | static inline int ceph_frag_contains_value(__u32 f, __u32 v) | ||
44 | { | ||
45 | return (v & ceph_frag_mask(f)) == ceph_frag_value(f); | ||
46 | } | ||
47 | static inline int ceph_frag_contains_frag(__u32 f, __u32 sub) | ||
48 | { | ||
49 | /* is sub as specific as us, and contained by us? */ | ||
50 | return ceph_frag_bits(sub) >= ceph_frag_bits(f) && | ||
51 | (ceph_frag_value(sub) & ceph_frag_mask(f)) == ceph_frag_value(f); | ||
52 | } | ||
53 | |||
54 | static inline __u32 ceph_frag_parent(__u32 f) | ||
55 | { | ||
56 | return ceph_frag_make(ceph_frag_bits(f) - 1, | ||
57 | ceph_frag_value(f) & (ceph_frag_mask(f) << 1)); | ||
58 | } | ||
59 | static inline int ceph_frag_is_left_child(__u32 f) | ||
60 | { | ||
61 | return ceph_frag_bits(f) > 0 && | ||
62 | (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 0; | ||
63 | } | ||
64 | static inline int ceph_frag_is_right_child(__u32 f) | ||
65 | { | ||
66 | return ceph_frag_bits(f) > 0 && | ||
67 | (ceph_frag_value(f) & (0x1000000 >> ceph_frag_bits(f))) == 1; | ||
68 | } | ||
69 | static inline __u32 ceph_frag_sibling(__u32 f) | ||
70 | { | ||
71 | return ceph_frag_make(ceph_frag_bits(f), | ||
72 | ceph_frag_value(f) ^ (0x1000000 >> ceph_frag_bits(f))); | ||
73 | } | ||
74 | static inline __u32 ceph_frag_left_child(__u32 f) | ||
75 | { | ||
76 | return ceph_frag_make(ceph_frag_bits(f)+1, ceph_frag_value(f)); | ||
77 | } | ||
78 | static inline __u32 ceph_frag_right_child(__u32 f) | ||
79 | { | ||
80 | return ceph_frag_make(ceph_frag_bits(f)+1, | ||
81 | ceph_frag_value(f) | (0x1000000 >> (1+ceph_frag_bits(f)))); | ||
82 | } | ||
83 | static inline __u32 ceph_frag_make_child(__u32 f, int by, int i) | ||
84 | { | ||
85 | int newbits = ceph_frag_bits(f) + by; | ||
86 | return ceph_frag_make(newbits, | ||
87 | ceph_frag_value(f) | (i << (24 - newbits))); | ||
88 | } | ||
89 | static inline int ceph_frag_is_leftmost(__u32 f) | ||
90 | { | ||
91 | return ceph_frag_value(f) == 0; | ||
92 | } | ||
93 | static inline int ceph_frag_is_rightmost(__u32 f) | ||
94 | { | ||
95 | return ceph_frag_value(f) == ceph_frag_mask(f); | ||
96 | } | ||
97 | static inline __u32 ceph_frag_next(__u32 f) | ||
98 | { | ||
99 | return ceph_frag_make(ceph_frag_bits(f), | ||
100 | ceph_frag_value(f) + (0x1000000 >> ceph_frag_bits(f))); | ||
101 | } | ||
102 | |||
103 | /* | ||
104 | * comparator to sort frags logically, as when traversing the | ||
105 | * number space in ascending order... | ||
106 | */ | ||
107 | int ceph_frag_compare(__u32 a, __u32 b); | ||
108 | |||
109 | #endif | ||
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h new file mode 100644 index 000000000000..c3c74aef289d --- /dev/null +++ b/include/linux/ceph/ceph_fs.h | |||
@@ -0,0 +1,729 @@ | |||
1 | /* | ||
2 | * ceph_fs.h - Ceph constants and data types to share between kernel and | ||
3 | * user space. | ||
4 | * | ||
5 | * Most types in this file are defined as little-endian, and are | ||
6 | * primarily intended to describe data structures that pass over the | ||
7 | * wire or that are stored on disk. | ||
8 | * | ||
9 | * LGPL2 | ||
10 | */ | ||
11 | |||
12 | #ifndef CEPH_FS_H | ||
13 | #define CEPH_FS_H | ||
14 | |||
15 | #include "msgr.h" | ||
16 | #include "rados.h" | ||
17 | |||
18 | /* | ||
19 | * subprotocol versions. when specific messages types or high-level | ||
20 | * protocols change, bump the affected components. we keep rev | ||
21 | * internal cluster protocols separately from the public, | ||
22 | * client-facing protocol. | ||
23 | */ | ||
24 | #define CEPH_OSD_PROTOCOL 8 /* cluster internal */ | ||
25 | #define CEPH_MDS_PROTOCOL 12 /* cluster internal */ | ||
26 | #define CEPH_MON_PROTOCOL 5 /* cluster internal */ | ||
27 | #define CEPH_OSDC_PROTOCOL 24 /* server/client */ | ||
28 | #define CEPH_MDSC_PROTOCOL 32 /* server/client */ | ||
29 | #define CEPH_MONC_PROTOCOL 15 /* server/client */ | ||
30 | |||
31 | |||
32 | #define CEPH_INO_ROOT 1 | ||
33 | #define CEPH_INO_CEPH 2 /* hidden .ceph dir */ | ||
34 | |||
35 | /* arbitrary limit on max # of monitors (cluster of 3 is typical) */ | ||
36 | #define CEPH_MAX_MON 31 | ||
37 | |||
38 | |||
39 | /* | ||
40 | * feature bits | ||
41 | */ | ||
42 | #define CEPH_FEATURE_UID (1<<0) | ||
43 | #define CEPH_FEATURE_NOSRCADDR (1<<1) | ||
44 | #define CEPH_FEATURE_MONCLOCKCHECK (1<<2) | ||
45 | #define CEPH_FEATURE_FLOCK (1<<3) | ||
46 | |||
47 | |||
48 | /* | ||
49 | * ceph_file_layout - describe data layout for a file/inode | ||
50 | */ | ||
51 | struct ceph_file_layout { | ||
52 | /* file -> object mapping */ | ||
53 | __le32 fl_stripe_unit; /* stripe unit, in bytes. must be multiple | ||
54 | of page size. */ | ||
55 | __le32 fl_stripe_count; /* over this many objects */ | ||
56 | __le32 fl_object_size; /* until objects are this big, then move to | ||
57 | new objects */ | ||
58 | __le32 fl_cas_hash; /* 0 = none; 1 = sha256 */ | ||
59 | |||
60 | /* pg -> disk layout */ | ||
61 | __le32 fl_object_stripe_unit; /* for per-object parity, if any */ | ||
62 | |||
63 | /* object -> pg layout */ | ||
64 | __le32 fl_pg_preferred; /* preferred primary for pg (-1 for none) */ | ||
65 | __le32 fl_pg_pool; /* namespace, crush ruleset, rep level */ | ||
66 | } __attribute__ ((packed)); | ||
67 | |||
68 | #define CEPH_MIN_STRIPE_UNIT 65536 | ||
69 | |||
70 | int ceph_file_layout_is_valid(const struct ceph_file_layout *layout); | ||
71 | |||
72 | |||
73 | /* crypto algorithms */ | ||
74 | #define CEPH_CRYPTO_NONE 0x0 | ||
75 | #define CEPH_CRYPTO_AES 0x1 | ||
76 | |||
77 | #define CEPH_AES_IV "cephsageyudagreg" | ||
78 | |||
79 | /* security/authentication protocols */ | ||
80 | #define CEPH_AUTH_UNKNOWN 0x0 | ||
81 | #define CEPH_AUTH_NONE 0x1 | ||
82 | #define CEPH_AUTH_CEPHX 0x2 | ||
83 | |||
84 | #define CEPH_AUTH_UID_DEFAULT ((__u64) -1) | ||
85 | |||
86 | |||
87 | /********************************************* | ||
88 | * message layer | ||
89 | */ | ||
90 | |||
91 | /* | ||
92 | * message types | ||
93 | */ | ||
94 | |||
95 | /* misc */ | ||
96 | #define CEPH_MSG_SHUTDOWN 1 | ||
97 | #define CEPH_MSG_PING 2 | ||
98 | |||
99 | /* client <-> monitor */ | ||
100 | #define CEPH_MSG_MON_MAP 4 | ||
101 | #define CEPH_MSG_MON_GET_MAP 5 | ||
102 | #define CEPH_MSG_STATFS 13 | ||
103 | #define CEPH_MSG_STATFS_REPLY 14 | ||
104 | #define CEPH_MSG_MON_SUBSCRIBE 15 | ||
105 | #define CEPH_MSG_MON_SUBSCRIBE_ACK 16 | ||
106 | #define CEPH_MSG_AUTH 17 | ||
107 | #define CEPH_MSG_AUTH_REPLY 18 | ||
108 | |||
109 | /* client <-> mds */ | ||
110 | #define CEPH_MSG_MDS_MAP 21 | ||
111 | |||
112 | #define CEPH_MSG_CLIENT_SESSION 22 | ||
113 | #define CEPH_MSG_CLIENT_RECONNECT 23 | ||
114 | |||
115 | #define CEPH_MSG_CLIENT_REQUEST 24 | ||
116 | #define CEPH_MSG_CLIENT_REQUEST_FORWARD 25 | ||
117 | #define CEPH_MSG_CLIENT_REPLY 26 | ||
118 | #define CEPH_MSG_CLIENT_CAPS 0x310 | ||
119 | #define CEPH_MSG_CLIENT_LEASE 0x311 | ||
120 | #define CEPH_MSG_CLIENT_SNAP 0x312 | ||
121 | #define CEPH_MSG_CLIENT_CAPRELEASE 0x313 | ||
122 | |||
123 | /* pool ops */ | ||
124 | #define CEPH_MSG_POOLOP_REPLY 48 | ||
125 | #define CEPH_MSG_POOLOP 49 | ||
126 | |||
127 | |||
128 | /* osd */ | ||
129 | #define CEPH_MSG_OSD_MAP 41 | ||
130 | #define CEPH_MSG_OSD_OP 42 | ||
131 | #define CEPH_MSG_OSD_OPREPLY 43 | ||
132 | |||
133 | /* pool operations */ | ||
134 | enum { | ||
135 | POOL_OP_CREATE = 0x01, | ||
136 | POOL_OP_DELETE = 0x02, | ||
137 | POOL_OP_AUID_CHANGE = 0x03, | ||
138 | POOL_OP_CREATE_SNAP = 0x11, | ||
139 | POOL_OP_DELETE_SNAP = 0x12, | ||
140 | POOL_OP_CREATE_UNMANAGED_SNAP = 0x21, | ||
141 | POOL_OP_DELETE_UNMANAGED_SNAP = 0x22, | ||
142 | }; | ||
143 | |||
144 | struct ceph_mon_request_header { | ||
145 | __le64 have_version; | ||
146 | __le16 session_mon; | ||
147 | __le64 session_mon_tid; | ||
148 | } __attribute__ ((packed)); | ||
149 | |||
150 | struct ceph_mon_statfs { | ||
151 | struct ceph_mon_request_header monhdr; | ||
152 | struct ceph_fsid fsid; | ||
153 | } __attribute__ ((packed)); | ||
154 | |||
155 | struct ceph_statfs { | ||
156 | __le64 kb, kb_used, kb_avail; | ||
157 | __le64 num_objects; | ||
158 | } __attribute__ ((packed)); | ||
159 | |||
160 | struct ceph_mon_statfs_reply { | ||
161 | struct ceph_fsid fsid; | ||
162 | __le64 version; | ||
163 | struct ceph_statfs st; | ||
164 | } __attribute__ ((packed)); | ||
165 | |||
166 | const char *ceph_pool_op_name(int op); | ||
167 | |||
168 | struct ceph_mon_poolop { | ||
169 | struct ceph_mon_request_header monhdr; | ||
170 | struct ceph_fsid fsid; | ||
171 | __le32 pool; | ||
172 | __le32 op; | ||
173 | __le64 auid; | ||
174 | __le64 snapid; | ||
175 | __le32 name_len; | ||
176 | } __attribute__ ((packed)); | ||
177 | |||
178 | struct ceph_mon_poolop_reply { | ||
179 | struct ceph_mon_request_header monhdr; | ||
180 | struct ceph_fsid fsid; | ||
181 | __le32 reply_code; | ||
182 | __le32 epoch; | ||
183 | char has_data; | ||
184 | char data[0]; | ||
185 | } __attribute__ ((packed)); | ||
186 | |||
187 | struct ceph_mon_unmanaged_snap { | ||
188 | __le64 snapid; | ||
189 | } __attribute__ ((packed)); | ||
190 | |||
191 | struct ceph_osd_getmap { | ||
192 | struct ceph_mon_request_header monhdr; | ||
193 | struct ceph_fsid fsid; | ||
194 | __le32 start; | ||
195 | } __attribute__ ((packed)); | ||
196 | |||
197 | struct ceph_mds_getmap { | ||
198 | struct ceph_mon_request_header monhdr; | ||
199 | struct ceph_fsid fsid; | ||
200 | } __attribute__ ((packed)); | ||
201 | |||
202 | struct ceph_client_mount { | ||
203 | struct ceph_mon_request_header monhdr; | ||
204 | } __attribute__ ((packed)); | ||
205 | |||
206 | struct ceph_mon_subscribe_item { | ||
207 | __le64 have_version; __le64 have; | ||
208 | __u8 onetime; | ||
209 | } __attribute__ ((packed)); | ||
210 | |||
211 | struct ceph_mon_subscribe_ack { | ||
212 | __le32 duration; /* seconds */ | ||
213 | struct ceph_fsid fsid; | ||
214 | } __attribute__ ((packed)); | ||
215 | |||
216 | /* | ||
217 | * mds states | ||
218 | * > 0 -> in | ||
219 | * <= 0 -> out | ||
220 | */ | ||
221 | #define CEPH_MDS_STATE_DNE 0 /* down, does not exist. */ | ||
222 | #define CEPH_MDS_STATE_STOPPED -1 /* down, once existed, but no subtrees. | ||
223 | empty log. */ | ||
224 | #define CEPH_MDS_STATE_BOOT -4 /* up, boot announcement. */ | ||
225 | #define CEPH_MDS_STATE_STANDBY -5 /* up, idle. waiting for assignment. */ | ||
226 | #define CEPH_MDS_STATE_CREATING -6 /* up, creating MDS instance. */ | ||
227 | #define CEPH_MDS_STATE_STARTING -7 /* up, starting previously stopped mds */ | ||
228 | #define CEPH_MDS_STATE_STANDBY_REPLAY -8 /* up, tailing active node's journal */ | ||
229 | |||
230 | #define CEPH_MDS_STATE_REPLAY 8 /* up, replaying journal. */ | ||
231 | #define CEPH_MDS_STATE_RESOLVE 9 /* up, disambiguating distributed | ||
232 | operations (import, rename, etc.) */ | ||
233 | #define CEPH_MDS_STATE_RECONNECT 10 /* up, reconnect to clients */ | ||
234 | #define CEPH_MDS_STATE_REJOIN 11 /* up, rejoining distributed cache */ | ||
235 | #define CEPH_MDS_STATE_CLIENTREPLAY 12 /* up, replaying client operations */ | ||
236 | #define CEPH_MDS_STATE_ACTIVE 13 /* up, active */ | ||
237 | #define CEPH_MDS_STATE_STOPPING 14 /* up, but exporting metadata */ | ||
238 | |||
239 | extern const char *ceph_mds_state_name(int s); | ||
240 | |||
241 | |||
242 | /* | ||
243 | * metadata lock types. | ||
244 | * - these are bitmasks.. we can compose them | ||
245 | * - they also define the lock ordering by the MDS | ||
246 | * - a few of these are internal to the mds | ||
247 | */ | ||
248 | #define CEPH_LOCK_DVERSION 1 | ||
249 | #define CEPH_LOCK_DN 2 | ||
250 | #define CEPH_LOCK_ISNAP 16 | ||
251 | #define CEPH_LOCK_IVERSION 32 /* mds internal */ | ||
252 | #define CEPH_LOCK_IFILE 64 | ||
253 | #define CEPH_LOCK_IAUTH 128 | ||
254 | #define CEPH_LOCK_ILINK 256 | ||
255 | #define CEPH_LOCK_IDFT 512 /* dir frag tree */ | ||
256 | #define CEPH_LOCK_INEST 1024 /* mds internal */ | ||
257 | #define CEPH_LOCK_IXATTR 2048 | ||
258 | #define CEPH_LOCK_IFLOCK 4096 /* advisory file locks */ | ||
259 | #define CEPH_LOCK_INO 8192 /* immutable inode bits; not a lock */ | ||
260 | |||
261 | /* client_session ops */ | ||
262 | enum { | ||
263 | CEPH_SESSION_REQUEST_OPEN, | ||
264 | CEPH_SESSION_OPEN, | ||
265 | CEPH_SESSION_REQUEST_CLOSE, | ||
266 | CEPH_SESSION_CLOSE, | ||
267 | CEPH_SESSION_REQUEST_RENEWCAPS, | ||
268 | CEPH_SESSION_RENEWCAPS, | ||
269 | CEPH_SESSION_STALE, | ||
270 | CEPH_SESSION_RECALL_STATE, | ||
271 | }; | ||
272 | |||
273 | extern const char *ceph_session_op_name(int op); | ||
274 | |||
275 | struct ceph_mds_session_head { | ||
276 | __le32 op; | ||
277 | __le64 seq; | ||
278 | struct ceph_timespec stamp; | ||
279 | __le32 max_caps, max_leases; | ||
280 | } __attribute__ ((packed)); | ||
281 | |||
282 | /* client_request */ | ||
283 | /* | ||
284 | * metadata ops. | ||
285 | * & 0x001000 -> write op | ||
286 | * & 0x010000 -> follow symlink (e.g. stat(), not lstat()). | ||
287 | & & 0x100000 -> use weird ino/path trace | ||
288 | */ | ||
289 | #define CEPH_MDS_OP_WRITE 0x001000 | ||
290 | enum { | ||
291 | CEPH_MDS_OP_LOOKUP = 0x00100, | ||
292 | CEPH_MDS_OP_GETATTR = 0x00101, | ||
293 | CEPH_MDS_OP_LOOKUPHASH = 0x00102, | ||
294 | CEPH_MDS_OP_LOOKUPPARENT = 0x00103, | ||
295 | |||
296 | CEPH_MDS_OP_SETXATTR = 0x01105, | ||
297 | CEPH_MDS_OP_RMXATTR = 0x01106, | ||
298 | CEPH_MDS_OP_SETLAYOUT = 0x01107, | ||
299 | CEPH_MDS_OP_SETATTR = 0x01108, | ||
300 | CEPH_MDS_OP_SETFILELOCK= 0x01109, | ||
301 | CEPH_MDS_OP_GETFILELOCK= 0x00110, | ||
302 | CEPH_MDS_OP_SETDIRLAYOUT=0x0110a, | ||
303 | |||
304 | CEPH_MDS_OP_MKNOD = 0x01201, | ||
305 | CEPH_MDS_OP_LINK = 0x01202, | ||
306 | CEPH_MDS_OP_UNLINK = 0x01203, | ||
307 | CEPH_MDS_OP_RENAME = 0x01204, | ||
308 | CEPH_MDS_OP_MKDIR = 0x01220, | ||
309 | CEPH_MDS_OP_RMDIR = 0x01221, | ||
310 | CEPH_MDS_OP_SYMLINK = 0x01222, | ||
311 | |||
312 | CEPH_MDS_OP_CREATE = 0x01301, | ||
313 | CEPH_MDS_OP_OPEN = 0x00302, | ||
314 | CEPH_MDS_OP_READDIR = 0x00305, | ||
315 | |||
316 | CEPH_MDS_OP_LOOKUPSNAP = 0x00400, | ||
317 | CEPH_MDS_OP_MKSNAP = 0x01400, | ||
318 | CEPH_MDS_OP_RMSNAP = 0x01401, | ||
319 | CEPH_MDS_OP_LSSNAP = 0x00402, | ||
320 | }; | ||
321 | |||
322 | extern const char *ceph_mds_op_name(int op); | ||
323 | |||
324 | |||
325 | #define CEPH_SETATTR_MODE 1 | ||
326 | #define CEPH_SETATTR_UID 2 | ||
327 | #define CEPH_SETATTR_GID 4 | ||
328 | #define CEPH_SETATTR_MTIME 8 | ||
329 | #define CEPH_SETATTR_ATIME 16 | ||
330 | #define CEPH_SETATTR_SIZE 32 | ||
331 | #define CEPH_SETATTR_CTIME 64 | ||
332 | |||
333 | union ceph_mds_request_args { | ||
334 | struct { | ||
335 | __le32 mask; /* CEPH_CAP_* */ | ||
336 | } __attribute__ ((packed)) getattr; | ||
337 | struct { | ||
338 | __le32 mode; | ||
339 | __le32 uid; | ||
340 | __le32 gid; | ||
341 | struct ceph_timespec mtime; | ||
342 | struct ceph_timespec atime; | ||
343 | __le64 size, old_size; /* old_size needed by truncate */ | ||
344 | __le32 mask; /* CEPH_SETATTR_* */ | ||
345 | } __attribute__ ((packed)) setattr; | ||
346 | struct { | ||
347 | __le32 frag; /* which dir fragment */ | ||
348 | __le32 max_entries; /* how many dentries to grab */ | ||
349 | __le32 max_bytes; | ||
350 | } __attribute__ ((packed)) readdir; | ||
351 | struct { | ||
352 | __le32 mode; | ||
353 | __le32 rdev; | ||
354 | } __attribute__ ((packed)) mknod; | ||
355 | struct { | ||
356 | __le32 mode; | ||
357 | } __attribute__ ((packed)) mkdir; | ||
358 | struct { | ||
359 | __le32 flags; | ||
360 | __le32 mode; | ||
361 | __le32 stripe_unit; /* layout for newly created file */ | ||
362 | __le32 stripe_count; /* ... */ | ||
363 | __le32 object_size; | ||
364 | __le32 file_replication; | ||
365 | __le32 preferred; | ||
366 | } __attribute__ ((packed)) open; | ||
367 | struct { | ||
368 | __le32 flags; | ||
369 | } __attribute__ ((packed)) setxattr; | ||
370 | struct { | ||
371 | struct ceph_file_layout layout; | ||
372 | } __attribute__ ((packed)) setlayout; | ||
373 | struct { | ||
374 | __u8 rule; /* currently fcntl or flock */ | ||
375 | __u8 type; /* shared, exclusive, remove*/ | ||
376 | __le64 pid; /* process id requesting the lock */ | ||
377 | __le64 pid_namespace; | ||
378 | __le64 start; /* initial location to lock */ | ||
379 | __le64 length; /* num bytes to lock from start */ | ||
380 | __u8 wait; /* will caller wait for lock to become available? */ | ||
381 | } __attribute__ ((packed)) filelock_change; | ||
382 | } __attribute__ ((packed)); | ||
383 | |||
384 | #define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */ | ||
385 | #define CEPH_MDS_FLAG_WANT_DENTRY 2 /* want dentry in reply */ | ||
386 | |||
387 | struct ceph_mds_request_head { | ||
388 | __le64 oldest_client_tid; | ||
389 | __le32 mdsmap_epoch; /* on client */ | ||
390 | __le32 flags; /* CEPH_MDS_FLAG_* */ | ||
391 | __u8 num_retry, num_fwd; /* count retry, fwd attempts */ | ||
392 | __le16 num_releases; /* # include cap/lease release records */ | ||
393 | __le32 op; /* mds op code */ | ||
394 | __le32 caller_uid, caller_gid; | ||
395 | __le64 ino; /* use this ino for openc, mkdir, mknod, | ||
396 | etc. (if replaying) */ | ||
397 | union ceph_mds_request_args args; | ||
398 | } __attribute__ ((packed)); | ||
399 | |||
400 | /* cap/lease release record */ | ||
401 | struct ceph_mds_request_release { | ||
402 | __le64 ino, cap_id; /* ino and unique cap id */ | ||
403 | __le32 caps, wanted; /* new issued, wanted */ | ||
404 | __le32 seq, issue_seq, mseq; | ||
405 | __le32 dname_seq; /* if releasing a dentry lease, a */ | ||
406 | __le32 dname_len; /* string follows. */ | ||
407 | } __attribute__ ((packed)); | ||
408 | |||
409 | /* client reply */ | ||
410 | struct ceph_mds_reply_head { | ||
411 | __le32 op; | ||
412 | __le32 result; | ||
413 | __le32 mdsmap_epoch; | ||
414 | __u8 safe; /* true if committed to disk */ | ||
415 | __u8 is_dentry, is_target; /* true if dentry, target inode records | ||
416 | are included with reply */ | ||
417 | } __attribute__ ((packed)); | ||
418 | |||
419 | /* one for each node split */ | ||
420 | struct ceph_frag_tree_split { | ||
421 | __le32 frag; /* this frag splits... */ | ||
422 | __le32 by; /* ...by this many bits */ | ||
423 | } __attribute__ ((packed)); | ||
424 | |||
425 | struct ceph_frag_tree_head { | ||
426 | __le32 nsplits; /* num ceph_frag_tree_split records */ | ||
427 | struct ceph_frag_tree_split splits[]; | ||
428 | } __attribute__ ((packed)); | ||
429 | |||
430 | /* capability issue, for bundling with mds reply */ | ||
431 | struct ceph_mds_reply_cap { | ||
432 | __le32 caps, wanted; /* caps issued, wanted */ | ||
433 | __le64 cap_id; | ||
434 | __le32 seq, mseq; | ||
435 | __le64 realm; /* snap realm */ | ||
436 | __u8 flags; /* CEPH_CAP_FLAG_* */ | ||
437 | } __attribute__ ((packed)); | ||
438 | |||
439 | #define CEPH_CAP_FLAG_AUTH 1 /* cap is issued by auth mds */ | ||
440 | |||
441 | /* inode record, for bundling with mds reply */ | ||
442 | struct ceph_mds_reply_inode { | ||
443 | __le64 ino; | ||
444 | __le64 snapid; | ||
445 | __le32 rdev; | ||
446 | __le64 version; /* inode version */ | ||
447 | __le64 xattr_version; /* version for xattr blob */ | ||
448 | struct ceph_mds_reply_cap cap; /* caps issued for this inode */ | ||
449 | struct ceph_file_layout layout; | ||
450 | struct ceph_timespec ctime, mtime, atime; | ||
451 | __le32 time_warp_seq; | ||
452 | __le64 size, max_size, truncate_size; | ||
453 | __le32 truncate_seq; | ||
454 | __le32 mode, uid, gid; | ||
455 | __le32 nlink; | ||
456 | __le64 files, subdirs, rbytes, rfiles, rsubdirs; /* dir stats */ | ||
457 | struct ceph_timespec rctime; | ||
458 | struct ceph_frag_tree_head fragtree; /* (must be at end of struct) */ | ||
459 | } __attribute__ ((packed)); | ||
460 | /* followed by frag array, then symlink string, then xattr blob */ | ||
461 | |||
462 | /* reply_lease follows dname, and reply_inode */ | ||
463 | struct ceph_mds_reply_lease { | ||
464 | __le16 mask; /* lease type(s) */ | ||
465 | __le32 duration_ms; /* lease duration */ | ||
466 | __le32 seq; | ||
467 | } __attribute__ ((packed)); | ||
468 | |||
469 | struct ceph_mds_reply_dirfrag { | ||
470 | __le32 frag; /* fragment */ | ||
471 | __le32 auth; /* auth mds, if this is a delegation point */ | ||
472 | __le32 ndist; /* number of mds' this is replicated on */ | ||
473 | __le32 dist[]; | ||
474 | } __attribute__ ((packed)); | ||
475 | |||
476 | #define CEPH_LOCK_FCNTL 1 | ||
477 | #define CEPH_LOCK_FLOCK 2 | ||
478 | |||
479 | #define CEPH_LOCK_SHARED 1 | ||
480 | #define CEPH_LOCK_EXCL 2 | ||
481 | #define CEPH_LOCK_UNLOCK 4 | ||
482 | |||
483 | struct ceph_filelock { | ||
484 | __le64 start;/* file offset to start lock at */ | ||
485 | __le64 length; /* num bytes to lock; 0 for all following start */ | ||
486 | __le64 client; /* which client holds the lock */ | ||
487 | __le64 pid; /* process id holding the lock on the client */ | ||
488 | __le64 pid_namespace; | ||
489 | __u8 type; /* shared lock, exclusive lock, or unlock */ | ||
490 | } __attribute__ ((packed)); | ||
491 | |||
492 | |||
493 | /* file access modes */ | ||
494 | #define CEPH_FILE_MODE_PIN 0 | ||
495 | #define CEPH_FILE_MODE_RD 1 | ||
496 | #define CEPH_FILE_MODE_WR 2 | ||
497 | #define CEPH_FILE_MODE_RDWR 3 /* RD | WR */ | ||
498 | #define CEPH_FILE_MODE_LAZY 4 /* lazy io */ | ||
499 | #define CEPH_FILE_MODE_NUM 8 /* bc these are bit fields.. mostly */ | ||
500 | |||
501 | int ceph_flags_to_mode(int flags); | ||
502 | |||
503 | |||
504 | /* capability bits */ | ||
505 | #define CEPH_CAP_PIN 1 /* no specific capabilities beyond the pin */ | ||
506 | |||
507 | /* generic cap bits */ | ||
508 | #define CEPH_CAP_GSHARED 1 /* client can reads */ | ||
509 | #define CEPH_CAP_GEXCL 2 /* client can read and update */ | ||
510 | #define CEPH_CAP_GCACHE 4 /* (file) client can cache reads */ | ||
511 | #define CEPH_CAP_GRD 8 /* (file) client can read */ | ||
512 | #define CEPH_CAP_GWR 16 /* (file) client can write */ | ||
513 | #define CEPH_CAP_GBUFFER 32 /* (file) client can buffer writes */ | ||
514 | #define CEPH_CAP_GWREXTEND 64 /* (file) client can extend EOF */ | ||
515 | #define CEPH_CAP_GLAZYIO 128 /* (file) client can perform lazy io */ | ||
516 | |||
517 | /* per-lock shift */ | ||
518 | #define CEPH_CAP_SAUTH 2 | ||
519 | #define CEPH_CAP_SLINK 4 | ||
520 | #define CEPH_CAP_SXATTR 6 | ||
521 | #define CEPH_CAP_SFILE 8 | ||
522 | #define CEPH_CAP_SFLOCK 20 | ||
523 | |||
524 | #define CEPH_CAP_BITS 22 | ||
525 | |||
526 | /* composed values */ | ||
527 | #define CEPH_CAP_AUTH_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SAUTH) | ||
528 | #define CEPH_CAP_AUTH_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SAUTH) | ||
529 | #define CEPH_CAP_LINK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SLINK) | ||
530 | #define CEPH_CAP_LINK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SLINK) | ||
531 | #define CEPH_CAP_XATTR_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SXATTR) | ||
532 | #define CEPH_CAP_XATTR_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SXATTR) | ||
533 | #define CEPH_CAP_FILE(x) (x << CEPH_CAP_SFILE) | ||
534 | #define CEPH_CAP_FILE_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFILE) | ||
535 | #define CEPH_CAP_FILE_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFILE) | ||
536 | #define CEPH_CAP_FILE_CACHE (CEPH_CAP_GCACHE << CEPH_CAP_SFILE) | ||
537 | #define CEPH_CAP_FILE_RD (CEPH_CAP_GRD << CEPH_CAP_SFILE) | ||
538 | #define CEPH_CAP_FILE_WR (CEPH_CAP_GWR << CEPH_CAP_SFILE) | ||
539 | #define CEPH_CAP_FILE_BUFFER (CEPH_CAP_GBUFFER << CEPH_CAP_SFILE) | ||
540 | #define CEPH_CAP_FILE_WREXTEND (CEPH_CAP_GWREXTEND << CEPH_CAP_SFILE) | ||
541 | #define CEPH_CAP_FILE_LAZYIO (CEPH_CAP_GLAZYIO << CEPH_CAP_SFILE) | ||
542 | #define CEPH_CAP_FLOCK_SHARED (CEPH_CAP_GSHARED << CEPH_CAP_SFLOCK) | ||
543 | #define CEPH_CAP_FLOCK_EXCL (CEPH_CAP_GEXCL << CEPH_CAP_SFLOCK) | ||
544 | |||
545 | |||
546 | /* cap masks (for getattr) */ | ||
547 | #define CEPH_STAT_CAP_INODE CEPH_CAP_PIN | ||
548 | #define CEPH_STAT_CAP_TYPE CEPH_CAP_PIN /* mode >> 12 */ | ||
549 | #define CEPH_STAT_CAP_SYMLINK CEPH_CAP_PIN | ||
550 | #define CEPH_STAT_CAP_UID CEPH_CAP_AUTH_SHARED | ||
551 | #define CEPH_STAT_CAP_GID CEPH_CAP_AUTH_SHARED | ||
552 | #define CEPH_STAT_CAP_MODE CEPH_CAP_AUTH_SHARED | ||
553 | #define CEPH_STAT_CAP_NLINK CEPH_CAP_LINK_SHARED | ||
554 | #define CEPH_STAT_CAP_LAYOUT CEPH_CAP_FILE_SHARED | ||
555 | #define CEPH_STAT_CAP_MTIME CEPH_CAP_FILE_SHARED | ||
556 | #define CEPH_STAT_CAP_SIZE CEPH_CAP_FILE_SHARED | ||
557 | #define CEPH_STAT_CAP_ATIME CEPH_CAP_FILE_SHARED /* fixme */ | ||
558 | #define CEPH_STAT_CAP_XATTR CEPH_CAP_XATTR_SHARED | ||
559 | #define CEPH_STAT_CAP_INODE_ALL (CEPH_CAP_PIN | \ | ||
560 | CEPH_CAP_AUTH_SHARED | \ | ||
561 | CEPH_CAP_LINK_SHARED | \ | ||
562 | CEPH_CAP_FILE_SHARED | \ | ||
563 | CEPH_CAP_XATTR_SHARED) | ||
564 | |||
565 | #define CEPH_CAP_ANY_SHARED (CEPH_CAP_AUTH_SHARED | \ | ||
566 | CEPH_CAP_LINK_SHARED | \ | ||
567 | CEPH_CAP_XATTR_SHARED | \ | ||
568 | CEPH_CAP_FILE_SHARED) | ||
569 | #define CEPH_CAP_ANY_RD (CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_RD | \ | ||
570 | CEPH_CAP_FILE_CACHE) | ||
571 | |||
572 | #define CEPH_CAP_ANY_EXCL (CEPH_CAP_AUTH_EXCL | \ | ||
573 | CEPH_CAP_LINK_EXCL | \ | ||
574 | CEPH_CAP_XATTR_EXCL | \ | ||
575 | CEPH_CAP_FILE_EXCL) | ||
576 | #define CEPH_CAP_ANY_FILE_WR (CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER | \ | ||
577 | CEPH_CAP_FILE_EXCL) | ||
578 | #define CEPH_CAP_ANY_WR (CEPH_CAP_ANY_EXCL | CEPH_CAP_ANY_FILE_WR) | ||
579 | #define CEPH_CAP_ANY (CEPH_CAP_ANY_RD | CEPH_CAP_ANY_EXCL | \ | ||
580 | CEPH_CAP_ANY_FILE_WR | CEPH_CAP_FILE_LAZYIO | \ | ||
581 | CEPH_CAP_PIN) | ||
582 | |||
583 | #define CEPH_CAP_LOCKS (CEPH_LOCK_IFILE | CEPH_LOCK_IAUTH | CEPH_LOCK_ILINK | \ | ||
584 | CEPH_LOCK_IXATTR) | ||
585 | |||
586 | int ceph_caps_for_mode(int mode); | ||
587 | |||
588 | enum { | ||
589 | CEPH_CAP_OP_GRANT, /* mds->client grant */ | ||
590 | CEPH_CAP_OP_REVOKE, /* mds->client revoke */ | ||
591 | CEPH_CAP_OP_TRUNC, /* mds->client trunc notify */ | ||
592 | CEPH_CAP_OP_EXPORT, /* mds has exported the cap */ | ||
593 | CEPH_CAP_OP_IMPORT, /* mds has imported the cap */ | ||
594 | CEPH_CAP_OP_UPDATE, /* client->mds update */ | ||
595 | CEPH_CAP_OP_DROP, /* client->mds drop cap bits */ | ||
596 | CEPH_CAP_OP_FLUSH, /* client->mds cap writeback */ | ||
597 | CEPH_CAP_OP_FLUSH_ACK, /* mds->client flushed */ | ||
598 | CEPH_CAP_OP_FLUSHSNAP, /* client->mds flush snapped metadata */ | ||
599 | CEPH_CAP_OP_FLUSHSNAP_ACK, /* mds->client flushed snapped metadata */ | ||
600 | CEPH_CAP_OP_RELEASE, /* client->mds release (clean) cap */ | ||
601 | CEPH_CAP_OP_RENEW, /* client->mds renewal request */ | ||
602 | }; | ||
603 | |||
604 | extern const char *ceph_cap_op_name(int op); | ||
605 | |||
606 | /* | ||
607 | * caps message, used for capability callbacks, acks, requests, etc. | ||
608 | */ | ||
609 | struct ceph_mds_caps { | ||
610 | __le32 op; /* CEPH_CAP_OP_* */ | ||
611 | __le64 ino, realm; | ||
612 | __le64 cap_id; | ||
613 | __le32 seq, issue_seq; | ||
614 | __le32 caps, wanted, dirty; /* latest issued/wanted/dirty */ | ||
615 | __le32 migrate_seq; | ||
616 | __le64 snap_follows; | ||
617 | __le32 snap_trace_len; | ||
618 | |||
619 | /* authlock */ | ||
620 | __le32 uid, gid, mode; | ||
621 | |||
622 | /* linklock */ | ||
623 | __le32 nlink; | ||
624 | |||
625 | /* xattrlock */ | ||
626 | __le32 xattr_len; | ||
627 | __le64 xattr_version; | ||
628 | |||
629 | /* filelock */ | ||
630 | __le64 size, max_size, truncate_size; | ||
631 | __le32 truncate_seq; | ||
632 | struct ceph_timespec mtime, atime, ctime; | ||
633 | struct ceph_file_layout layout; | ||
634 | __le32 time_warp_seq; | ||
635 | } __attribute__ ((packed)); | ||
636 | |||
637 | /* cap release msg head */ | ||
638 | struct ceph_mds_cap_release { | ||
639 | __le32 num; /* number of cap_items that follow */ | ||
640 | } __attribute__ ((packed)); | ||
641 | |||
642 | struct ceph_mds_cap_item { | ||
643 | __le64 ino; | ||
644 | __le64 cap_id; | ||
645 | __le32 migrate_seq, seq; | ||
646 | } __attribute__ ((packed)); | ||
647 | |||
648 | #define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */ | ||
649 | #define CEPH_MDS_LEASE_RELEASE 2 /* client -> mds */ | ||
650 | #define CEPH_MDS_LEASE_RENEW 3 /* client <-> mds */ | ||
651 | #define CEPH_MDS_LEASE_REVOKE_ACK 4 /* client -> mds */ | ||
652 | |||
653 | extern const char *ceph_lease_op_name(int o); | ||
654 | |||
655 | /* lease msg header */ | ||
656 | struct ceph_mds_lease { | ||
657 | __u8 action; /* CEPH_MDS_LEASE_* */ | ||
658 | __le16 mask; /* which lease */ | ||
659 | __le64 ino; | ||
660 | __le64 first, last; /* snap range */ | ||
661 | __le32 seq; | ||
662 | __le32 duration_ms; /* duration of renewal */ | ||
663 | } __attribute__ ((packed)); | ||
664 | /* followed by a __le32+string for dname */ | ||
665 | |||
666 | /* client reconnect */ | ||
667 | struct ceph_mds_cap_reconnect { | ||
668 | __le64 cap_id; | ||
669 | __le32 wanted; | ||
670 | __le32 issued; | ||
671 | __le64 snaprealm; | ||
672 | __le64 pathbase; /* base ino for our path to this ino */ | ||
673 | __le32 flock_len; /* size of flock state blob, if any */ | ||
674 | } __attribute__ ((packed)); | ||
675 | /* followed by flock blob */ | ||
676 | |||
677 | struct ceph_mds_cap_reconnect_v1 { | ||
678 | __le64 cap_id; | ||
679 | __le32 wanted; | ||
680 | __le32 issued; | ||
681 | __le64 size; | ||
682 | struct ceph_timespec mtime, atime; | ||
683 | __le64 snaprealm; | ||
684 | __le64 pathbase; /* base ino for our path to this ino */ | ||
685 | } __attribute__ ((packed)); | ||
686 | |||
687 | struct ceph_mds_snaprealm_reconnect { | ||
688 | __le64 ino; /* snap realm base */ | ||
689 | __le64 seq; /* snap seq for this snap realm */ | ||
690 | __le64 parent; /* parent realm */ | ||
691 | } __attribute__ ((packed)); | ||
692 | |||
693 | /* | ||
694 | * snaps | ||
695 | */ | ||
696 | enum { | ||
697 | CEPH_SNAP_OP_UPDATE, /* CREATE or DESTROY */ | ||
698 | CEPH_SNAP_OP_CREATE, | ||
699 | CEPH_SNAP_OP_DESTROY, | ||
700 | CEPH_SNAP_OP_SPLIT, | ||
701 | }; | ||
702 | |||
703 | extern const char *ceph_snap_op_name(int o); | ||
704 | |||
705 | /* snap msg header */ | ||
706 | struct ceph_mds_snap_head { | ||
707 | __le32 op; /* CEPH_SNAP_OP_* */ | ||
708 | __le64 split; /* ino to split off, if any */ | ||
709 | __le32 num_split_inos; /* # inos belonging to new child realm */ | ||
710 | __le32 num_split_realms; /* # child realms udner new child realm */ | ||
711 | __le32 trace_len; /* size of snap trace blob */ | ||
712 | } __attribute__ ((packed)); | ||
713 | /* followed by split ino list, then split realms, then the trace blob */ | ||
714 | |||
715 | /* | ||
716 | * encode info about a snaprealm, as viewed by a client | ||
717 | */ | ||
718 | struct ceph_mds_snap_realm { | ||
719 | __le64 ino; /* ino */ | ||
720 | __le64 created; /* snap: when created */ | ||
721 | __le64 parent; /* ino: parent realm */ | ||
722 | __le64 parent_since; /* snap: same parent since */ | ||
723 | __le64 seq; /* snap: version */ | ||
724 | __le32 num_snaps; | ||
725 | __le32 num_prior_parent_snaps; | ||
726 | } __attribute__ ((packed)); | ||
727 | /* followed by my snap list, then prior parent snap list */ | ||
728 | |||
729 | #endif | ||
diff --git a/include/linux/ceph/ceph_hash.h b/include/linux/ceph/ceph_hash.h new file mode 100644 index 000000000000..d099c3f90236 --- /dev/null +++ b/include/linux/ceph/ceph_hash.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef FS_CEPH_HASH_H | ||
2 | #define FS_CEPH_HASH_H | ||
3 | |||
4 | #define CEPH_STR_HASH_LINUX 0x1 /* linux dcache hash */ | ||
5 | #define CEPH_STR_HASH_RJENKINS 0x2 /* robert jenkins' */ | ||
6 | |||
7 | extern unsigned ceph_str_hash_linux(const char *s, unsigned len); | ||
8 | extern unsigned ceph_str_hash_rjenkins(const char *s, unsigned len); | ||
9 | |||
10 | extern unsigned ceph_str_hash(int type, const char *s, unsigned len); | ||
11 | extern const char *ceph_str_hash_name(int type); | ||
12 | |||
13 | #endif | ||
diff --git a/include/linux/ceph/debugfs.h b/include/linux/ceph/debugfs.h new file mode 100644 index 000000000000..2a79702e092b --- /dev/null +++ b/include/linux/ceph/debugfs.h | |||
@@ -0,0 +1,33 @@ | |||
1 | #ifndef _FS_CEPH_DEBUGFS_H | ||
2 | #define _FS_CEPH_DEBUGFS_H | ||
3 | |||
4 | #include "ceph_debug.h" | ||
5 | #include "types.h" | ||
6 | |||
7 | #define CEPH_DEFINE_SHOW_FUNC(name) \ | ||
8 | static int name##_open(struct inode *inode, struct file *file) \ | ||
9 | { \ | ||
10 | struct seq_file *sf; \ | ||
11 | int ret; \ | ||
12 | \ | ||
13 | ret = single_open(file, name, NULL); \ | ||
14 | sf = file->private_data; \ | ||
15 | sf->private = inode->i_private; \ | ||
16 | return ret; \ | ||
17 | } \ | ||
18 | \ | ||
19 | static const struct file_operations name##_fops = { \ | ||
20 | .open = name##_open, \ | ||
21 | .read = seq_read, \ | ||
22 | .llseek = seq_lseek, \ | ||
23 | .release = single_release, \ | ||
24 | }; | ||
25 | |||
26 | /* debugfs.c */ | ||
27 | extern int ceph_debugfs_init(void); | ||
28 | extern void ceph_debugfs_cleanup(void); | ||
29 | extern int ceph_debugfs_client_init(struct ceph_client *client); | ||
30 | extern void ceph_debugfs_client_cleanup(struct ceph_client *client); | ||
31 | |||
32 | #endif | ||
33 | |||
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h new file mode 100644 index 000000000000..c5b6939fb32a --- /dev/null +++ b/include/linux/ceph/decode.h | |||
@@ -0,0 +1,201 @@ | |||
1 | #ifndef __CEPH_DECODE_H | ||
2 | #define __CEPH_DECODE_H | ||
3 | |||
4 | #include <asm/unaligned.h> | ||
5 | #include <linux/time.h> | ||
6 | |||
7 | #include "types.h" | ||
8 | |||
9 | /* | ||
10 | * in all cases, | ||
11 | * void **p pointer to position pointer | ||
12 | * void *end pointer to end of buffer (last byte + 1) | ||
13 | */ | ||
14 | |||
15 | static inline u64 ceph_decode_64(void **p) | ||
16 | { | ||
17 | u64 v = get_unaligned_le64(*p); | ||
18 | *p += sizeof(u64); | ||
19 | return v; | ||
20 | } | ||
21 | static inline u32 ceph_decode_32(void **p) | ||
22 | { | ||
23 | u32 v = get_unaligned_le32(*p); | ||
24 | *p += sizeof(u32); | ||
25 | return v; | ||
26 | } | ||
27 | static inline u16 ceph_decode_16(void **p) | ||
28 | { | ||
29 | u16 v = get_unaligned_le16(*p); | ||
30 | *p += sizeof(u16); | ||
31 | return v; | ||
32 | } | ||
33 | static inline u8 ceph_decode_8(void **p) | ||
34 | { | ||
35 | u8 v = *(u8 *)*p; | ||
36 | (*p)++; | ||
37 | return v; | ||
38 | } | ||
39 | static inline void ceph_decode_copy(void **p, void *pv, size_t n) | ||
40 | { | ||
41 | memcpy(pv, *p, n); | ||
42 | *p += n; | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * bounds check input. | ||
47 | */ | ||
48 | #define ceph_decode_need(p, end, n, bad) \ | ||
49 | do { \ | ||
50 | if (unlikely(*(p) + (n) > (end))) \ | ||
51 | goto bad; \ | ||
52 | } while (0) | ||
53 | |||
54 | #define ceph_decode_64_safe(p, end, v, bad) \ | ||
55 | do { \ | ||
56 | ceph_decode_need(p, end, sizeof(u64), bad); \ | ||
57 | v = ceph_decode_64(p); \ | ||
58 | } while (0) | ||
59 | #define ceph_decode_32_safe(p, end, v, bad) \ | ||
60 | do { \ | ||
61 | ceph_decode_need(p, end, sizeof(u32), bad); \ | ||
62 | v = ceph_decode_32(p); \ | ||
63 | } while (0) | ||
64 | #define ceph_decode_16_safe(p, end, v, bad) \ | ||
65 | do { \ | ||
66 | ceph_decode_need(p, end, sizeof(u16), bad); \ | ||
67 | v = ceph_decode_16(p); \ | ||
68 | } while (0) | ||
69 | #define ceph_decode_8_safe(p, end, v, bad) \ | ||
70 | do { \ | ||
71 | ceph_decode_need(p, end, sizeof(u8), bad); \ | ||
72 | v = ceph_decode_8(p); \ | ||
73 | } while (0) | ||
74 | |||
75 | #define ceph_decode_copy_safe(p, end, pv, n, bad) \ | ||
76 | do { \ | ||
77 | ceph_decode_need(p, end, n, bad); \ | ||
78 | ceph_decode_copy(p, pv, n); \ | ||
79 | } while (0) | ||
80 | |||
81 | /* | ||
82 | * struct ceph_timespec <-> struct timespec | ||
83 | */ | ||
84 | static inline void ceph_decode_timespec(struct timespec *ts, | ||
85 | const struct ceph_timespec *tv) | ||
86 | { | ||
87 | ts->tv_sec = le32_to_cpu(tv->tv_sec); | ||
88 | ts->tv_nsec = le32_to_cpu(tv->tv_nsec); | ||
89 | } | ||
90 | static inline void ceph_encode_timespec(struct ceph_timespec *tv, | ||
91 | const struct timespec *ts) | ||
92 | { | ||
93 | tv->tv_sec = cpu_to_le32(ts->tv_sec); | ||
94 | tv->tv_nsec = cpu_to_le32(ts->tv_nsec); | ||
95 | } | ||
96 | |||
97 | /* | ||
98 | * sockaddr_storage <-> ceph_sockaddr | ||
99 | */ | ||
100 | static inline void ceph_encode_addr(struct ceph_entity_addr *a) | ||
101 | { | ||
102 | __be16 ss_family = htons(a->in_addr.ss_family); | ||
103 | a->in_addr.ss_family = *(__u16 *)&ss_family; | ||
104 | } | ||
105 | static inline void ceph_decode_addr(struct ceph_entity_addr *a) | ||
106 | { | ||
107 | __be16 ss_family = *(__be16 *)&a->in_addr.ss_family; | ||
108 | a->in_addr.ss_family = ntohs(ss_family); | ||
109 | WARN_ON(a->in_addr.ss_family == 512); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * encoders | ||
114 | */ | ||
115 | static inline void ceph_encode_64(void **p, u64 v) | ||
116 | { | ||
117 | put_unaligned_le64(v, (__le64 *)*p); | ||
118 | *p += sizeof(u64); | ||
119 | } | ||
120 | static inline void ceph_encode_32(void **p, u32 v) | ||
121 | { | ||
122 | put_unaligned_le32(v, (__le32 *)*p); | ||
123 | *p += sizeof(u32); | ||
124 | } | ||
125 | static inline void ceph_encode_16(void **p, u16 v) | ||
126 | { | ||
127 | put_unaligned_le16(v, (__le16 *)*p); | ||
128 | *p += sizeof(u16); | ||
129 | } | ||
130 | static inline void ceph_encode_8(void **p, u8 v) | ||
131 | { | ||
132 | *(u8 *)*p = v; | ||
133 | (*p)++; | ||
134 | } | ||
135 | static inline void ceph_encode_copy(void **p, const void *s, int len) | ||
136 | { | ||
137 | memcpy(*p, s, len); | ||
138 | *p += len; | ||
139 | } | ||
140 | |||
141 | /* | ||
142 | * filepath, string encoders | ||
143 | */ | ||
144 | static inline void ceph_encode_filepath(void **p, void *end, | ||
145 | u64 ino, const char *path) | ||
146 | { | ||
147 | u32 len = path ? strlen(path) : 0; | ||
148 | BUG_ON(*p + sizeof(ino) + sizeof(len) + len > end); | ||
149 | ceph_encode_8(p, 1); | ||
150 | ceph_encode_64(p, ino); | ||
151 | ceph_encode_32(p, len); | ||
152 | if (len) | ||
153 | memcpy(*p, path, len); | ||
154 | *p += len; | ||
155 | } | ||
156 | |||
157 | static inline void ceph_encode_string(void **p, void *end, | ||
158 | const char *s, u32 len) | ||
159 | { | ||
160 | BUG_ON(*p + sizeof(len) + len > end); | ||
161 | ceph_encode_32(p, len); | ||
162 | if (len) | ||
163 | memcpy(*p, s, len); | ||
164 | *p += len; | ||
165 | } | ||
166 | |||
167 | #define ceph_encode_need(p, end, n, bad) \ | ||
168 | do { \ | ||
169 | if (unlikely(*(p) + (n) > (end))) \ | ||
170 | goto bad; \ | ||
171 | } while (0) | ||
172 | |||
173 | #define ceph_encode_64_safe(p, end, v, bad) \ | ||
174 | do { \ | ||
175 | ceph_encode_need(p, end, sizeof(u64), bad); \ | ||
176 | ceph_encode_64(p, v); \ | ||
177 | } while (0) | ||
178 | #define ceph_encode_32_safe(p, end, v, bad) \ | ||
179 | do { \ | ||
180 | ceph_encode_need(p, end, sizeof(u32), bad); \ | ||
181 | ceph_encode_32(p, v); \ | ||
182 | } while (0) | ||
183 | #define ceph_encode_16_safe(p, end, v, bad) \ | ||
184 | do { \ | ||
185 | ceph_encode_need(p, end, sizeof(u16), bad); \ | ||
186 | ceph_encode_16(p, v); \ | ||
187 | } while (0) | ||
188 | |||
189 | #define ceph_encode_copy_safe(p, end, pv, n, bad) \ | ||
190 | do { \ | ||
191 | ceph_encode_need(p, end, n, bad); \ | ||
192 | ceph_encode_copy(p, pv, n); \ | ||
193 | } while (0) | ||
194 | #define ceph_encode_string_safe(p, end, s, n, bad) \ | ||
195 | do { \ | ||
196 | ceph_encode_need(p, end, n, bad); \ | ||
197 | ceph_encode_string(p, end, s, n); \ | ||
198 | } while (0) | ||
199 | |||
200 | |||
201 | #endif | ||
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h new file mode 100644 index 000000000000..f22b2e941686 --- /dev/null +++ b/include/linux/ceph/libceph.h | |||
@@ -0,0 +1,249 @@ | |||
1 | #ifndef _FS_CEPH_LIBCEPH_H | ||
2 | #define _FS_CEPH_LIBCEPH_H | ||
3 | |||
4 | #include "ceph_debug.h" | ||
5 | |||
6 | #include <asm/unaligned.h> | ||
7 | #include <linux/backing-dev.h> | ||
8 | #include <linux/completion.h> | ||
9 | #include <linux/exportfs.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/mempool.h> | ||
12 | #include <linux/pagemap.h> | ||
13 | #include <linux/wait.h> | ||
14 | #include <linux/writeback.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | #include "types.h" | ||
18 | #include "messenger.h" | ||
19 | #include "msgpool.h" | ||
20 | #include "mon_client.h" | ||
21 | #include "osd_client.h" | ||
22 | #include "ceph_fs.h" | ||
23 | |||
24 | /* | ||
25 | * Supported features | ||
26 | */ | ||
27 | #define CEPH_FEATURE_SUPPORTED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
28 | #define CEPH_FEATURE_REQUIRED_DEFAULT CEPH_FEATURE_NOSRCADDR | ||
29 | |||
30 | /* | ||
31 | * mount options | ||
32 | */ | ||
33 | #define CEPH_OPT_FSID (1<<0) | ||
34 | #define CEPH_OPT_NOSHARE (1<<1) /* don't share client with other sbs */ | ||
35 | #define CEPH_OPT_MYIP (1<<2) /* specified my ip */ | ||
36 | #define CEPH_OPT_NOCRC (1<<3) /* no data crc on writes */ | ||
37 | |||
38 | #define CEPH_OPT_DEFAULT (0); | ||
39 | |||
40 | #define ceph_set_opt(client, opt) \ | ||
41 | (client)->options->flags |= CEPH_OPT_##opt; | ||
42 | #define ceph_test_opt(client, opt) \ | ||
43 | (!!((client)->options->flags & CEPH_OPT_##opt)) | ||
44 | |||
45 | struct ceph_options { | ||
46 | int flags; | ||
47 | struct ceph_fsid fsid; | ||
48 | struct ceph_entity_addr my_addr; | ||
49 | int mount_timeout; | ||
50 | int osd_idle_ttl; | ||
51 | int osd_timeout; | ||
52 | int osd_keepalive_timeout; | ||
53 | |||
54 | /* | ||
55 | * any type that can't be simply compared or doesn't need need | ||
56 | * to be compared should go beyond this point, | ||
57 | * ceph_compare_options() should be updated accordingly | ||
58 | */ | ||
59 | |||
60 | struct ceph_entity_addr *mon_addr; /* should be the first | ||
61 | pointer type of args */ | ||
62 | int num_mon; | ||
63 | char *name; | ||
64 | char *secret; | ||
65 | }; | ||
66 | |||
67 | /* | ||
68 | * defaults | ||
69 | */ | ||
70 | #define CEPH_MOUNT_TIMEOUT_DEFAULT 60 | ||
71 | #define CEPH_OSD_TIMEOUT_DEFAULT 60 /* seconds */ | ||
72 | #define CEPH_OSD_KEEPALIVE_DEFAULT 5 | ||
73 | #define CEPH_OSD_IDLE_TTL_DEFAULT 60 | ||
74 | #define CEPH_MOUNT_RSIZE_DEFAULT (512*1024) /* readahead */ | ||
75 | |||
76 | #define CEPH_MSG_MAX_FRONT_LEN (16*1024*1024) | ||
77 | #define CEPH_MSG_MAX_DATA_LEN (16*1024*1024) | ||
78 | |||
79 | #define CEPH_AUTH_NAME_DEFAULT "guest" | ||
80 | |||
81 | /* | ||
82 | * Delay telling the MDS we no longer want caps, in case we reopen | ||
83 | * the file. Delay a minimum amount of time, even if we send a cap | ||
84 | * message for some other reason. Otherwise, take the oppotunity to | ||
85 | * update the mds to avoid sending another message later. | ||
86 | */ | ||
87 | #define CEPH_CAPS_WANTED_DELAY_MIN_DEFAULT 5 /* cap release delay */ | ||
88 | #define CEPH_CAPS_WANTED_DELAY_MAX_DEFAULT 60 /* cap release delay */ | ||
89 | |||
90 | #define CEPH_CAP_RELEASE_SAFETY_DEFAULT (CEPH_CAPS_PER_RELEASE * 4) | ||
91 | |||
92 | /* mount state */ | ||
93 | enum { | ||
94 | CEPH_MOUNT_MOUNTING, | ||
95 | CEPH_MOUNT_MOUNTED, | ||
96 | CEPH_MOUNT_UNMOUNTING, | ||
97 | CEPH_MOUNT_UNMOUNTED, | ||
98 | CEPH_MOUNT_SHUTDOWN, | ||
99 | }; | ||
100 | |||
101 | /* | ||
102 | * subtract jiffies | ||
103 | */ | ||
104 | static inline unsigned long time_sub(unsigned long a, unsigned long b) | ||
105 | { | ||
106 | BUG_ON(time_after(b, a)); | ||
107 | return (long)a - (long)b; | ||
108 | } | ||
109 | |||
110 | struct ceph_mds_client; | ||
111 | |||
112 | /* | ||
113 | * per client state | ||
114 | * | ||
115 | * possibly shared by multiple mount points, if they are | ||
116 | * mounting the same ceph filesystem/cluster. | ||
117 | */ | ||
118 | struct ceph_client { | ||
119 | struct ceph_fsid fsid; | ||
120 | bool have_fsid; | ||
121 | |||
122 | void *private; | ||
123 | |||
124 | struct ceph_options *options; | ||
125 | |||
126 | struct mutex mount_mutex; /* serialize mount attempts */ | ||
127 | wait_queue_head_t auth_wq; | ||
128 | int auth_err; | ||
129 | |||
130 | int (*extra_mon_dispatch)(struct ceph_client *, struct ceph_msg *); | ||
131 | |||
132 | u32 supported_features; | ||
133 | u32 required_features; | ||
134 | |||
135 | struct ceph_messenger *msgr; /* messenger instance */ | ||
136 | struct ceph_mon_client monc; | ||
137 | struct ceph_osd_client osdc; | ||
138 | |||
139 | #ifdef CONFIG_DEBUG_FS | ||
140 | struct dentry *debugfs_dir; | ||
141 | struct dentry *debugfs_monmap; | ||
142 | struct dentry *debugfs_osdmap; | ||
143 | #endif | ||
144 | }; | ||
145 | |||
146 | |||
147 | |||
148 | /* | ||
149 | * snapshots | ||
150 | */ | ||
151 | |||
152 | /* | ||
153 | * A "snap context" is the set of existing snapshots when we | ||
154 | * write data. It is used by the OSD to guide its COW behavior. | ||
155 | * | ||
156 | * The ceph_snap_context is refcounted, and attached to each dirty | ||
157 | * page, indicating which context the dirty data belonged when it was | ||
158 | * dirtied. | ||
159 | */ | ||
160 | struct ceph_snap_context { | ||
161 | atomic_t nref; | ||
162 | u64 seq; | ||
163 | int num_snaps; | ||
164 | u64 snaps[]; | ||
165 | }; | ||
166 | |||
167 | static inline struct ceph_snap_context * | ||
168 | ceph_get_snap_context(struct ceph_snap_context *sc) | ||
169 | { | ||
170 | /* | ||
171 | printk("get_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
172 | atomic_read(&sc->nref)+1); | ||
173 | */ | ||
174 | if (sc) | ||
175 | atomic_inc(&sc->nref); | ||
176 | return sc; | ||
177 | } | ||
178 | |||
179 | static inline void ceph_put_snap_context(struct ceph_snap_context *sc) | ||
180 | { | ||
181 | if (!sc) | ||
182 | return; | ||
183 | /* | ||
184 | printk("put_snap_context %p %d -> %d\n", sc, atomic_read(&sc->nref), | ||
185 | atomic_read(&sc->nref)-1); | ||
186 | */ | ||
187 | if (atomic_dec_and_test(&sc->nref)) { | ||
188 | /*printk(" deleting snap_context %p\n", sc);*/ | ||
189 | kfree(sc); | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * calculate the number of pages a given length and offset map onto, | ||
195 | * if we align the data. | ||
196 | */ | ||
197 | static inline int calc_pages_for(u64 off, u64 len) | ||
198 | { | ||
199 | return ((off+len+PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT) - | ||
200 | (off >> PAGE_CACHE_SHIFT); | ||
201 | } | ||
202 | |||
203 | /* ceph_common.c */ | ||
204 | extern const char *ceph_msg_type_name(int type); | ||
205 | extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid); | ||
206 | extern struct kmem_cache *ceph_inode_cachep; | ||
207 | extern struct kmem_cache *ceph_cap_cachep; | ||
208 | extern struct kmem_cache *ceph_dentry_cachep; | ||
209 | extern struct kmem_cache *ceph_file_cachep; | ||
210 | |||
211 | extern int ceph_parse_options(struct ceph_options **popt, char *options, | ||
212 | const char *dev_name, const char *dev_name_end, | ||
213 | int (*parse_extra_token)(char *c, void *private), | ||
214 | void *private); | ||
215 | extern void ceph_destroy_options(struct ceph_options *opt); | ||
216 | extern int ceph_compare_options(struct ceph_options *new_opt, | ||
217 | struct ceph_client *client); | ||
218 | extern struct ceph_client *ceph_create_client(struct ceph_options *opt, | ||
219 | void *private); | ||
220 | extern u64 ceph_client_id(struct ceph_client *client); | ||
221 | extern void ceph_destroy_client(struct ceph_client *client); | ||
222 | extern int __ceph_open_session(struct ceph_client *client, | ||
223 | unsigned long started); | ||
224 | extern int ceph_open_session(struct ceph_client *client); | ||
225 | |||
226 | /* pagevec.c */ | ||
227 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
228 | |||
229 | extern struct page **ceph_get_direct_page_vector(const char __user *data, | ||
230 | int num_pages, | ||
231 | loff_t off, size_t len); | ||
232 | extern void ceph_put_page_vector(struct page **pages, int num_pages); | ||
233 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | ||
234 | extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags); | ||
235 | extern int ceph_copy_user_to_page_vector(struct page **pages, | ||
236 | const char __user *data, | ||
237 | loff_t off, size_t len); | ||
238 | extern int ceph_copy_to_page_vector(struct page **pages, | ||
239 | const char *data, | ||
240 | loff_t off, size_t len); | ||
241 | extern int ceph_copy_from_page_vector(struct page **pages, | ||
242 | char *data, | ||
243 | loff_t off, size_t len); | ||
244 | extern int ceph_copy_page_vector_to_user(struct page **pages, char __user *data, | ||
245 | loff_t off, size_t len); | ||
246 | extern void ceph_zero_page_vector_range(int off, int len, struct page **pages); | ||
247 | |||
248 | |||
249 | #endif /* _FS_CEPH_SUPER_H */ | ||
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h new file mode 100644 index 000000000000..4c5cb0880bba --- /dev/null +++ b/include/linux/ceph/mdsmap.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef _FS_CEPH_MDSMAP_H | ||
2 | #define _FS_CEPH_MDSMAP_H | ||
3 | |||
4 | #include "types.h" | ||
5 | |||
6 | /* | ||
7 | * mds map - describe servers in the mds cluster. | ||
8 | * | ||
9 | * we limit fields to those the client actually xcares about | ||
10 | */ | ||
11 | struct ceph_mds_info { | ||
12 | u64 global_id; | ||
13 | struct ceph_entity_addr addr; | ||
14 | s32 state; | ||
15 | int num_export_targets; | ||
16 | bool laggy; | ||
17 | u32 *export_targets; | ||
18 | }; | ||
19 | |||
20 | struct ceph_mdsmap { | ||
21 | u32 m_epoch, m_client_epoch, m_last_failure; | ||
22 | u32 m_root; | ||
23 | u32 m_session_timeout; /* seconds */ | ||
24 | u32 m_session_autoclose; /* seconds */ | ||
25 | u64 m_max_file_size; | ||
26 | u32 m_max_mds; /* size of m_addr, m_state arrays */ | ||
27 | struct ceph_mds_info *m_info; | ||
28 | |||
29 | /* which object pools file data can be stored in */ | ||
30 | int m_num_data_pg_pools; | ||
31 | u32 *m_data_pg_pools; | ||
32 | u32 m_cas_pg_pool; | ||
33 | }; | ||
34 | |||
35 | static inline struct ceph_entity_addr * | ||
36 | ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w) | ||
37 | { | ||
38 | if (w >= m->m_max_mds) | ||
39 | return NULL; | ||
40 | return &m->m_info[w].addr; | ||
41 | } | ||
42 | |||
43 | static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w) | ||
44 | { | ||
45 | BUG_ON(w < 0); | ||
46 | if (w >= m->m_max_mds) | ||
47 | return CEPH_MDS_STATE_DNE; | ||
48 | return m->m_info[w].state; | ||
49 | } | ||
50 | |||
51 | static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w) | ||
52 | { | ||
53 | if (w >= 0 && w < m->m_max_mds) | ||
54 | return m->m_info[w].laggy; | ||
55 | return false; | ||
56 | } | ||
57 | |||
58 | extern int ceph_mdsmap_get_random_mds(struct ceph_mdsmap *m); | ||
59 | extern struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end); | ||
60 | extern void ceph_mdsmap_destroy(struct ceph_mdsmap *m); | ||
61 | |||
62 | #endif | ||
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h new file mode 100644 index 000000000000..5956d62c3057 --- /dev/null +++ b/include/linux/ceph/messenger.h | |||
@@ -0,0 +1,261 @@ | |||
1 | #ifndef __FS_CEPH_MESSENGER_H | ||
2 | #define __FS_CEPH_MESSENGER_H | ||
3 | |||
4 | #include <linux/kref.h> | ||
5 | #include <linux/mutex.h> | ||
6 | #include <linux/net.h> | ||
7 | #include <linux/radix-tree.h> | ||
8 | #include <linux/uio.h> | ||
9 | #include <linux/version.h> | ||
10 | #include <linux/workqueue.h> | ||
11 | |||
12 | #include "types.h" | ||
13 | #include "buffer.h" | ||
14 | |||
15 | struct ceph_msg; | ||
16 | struct ceph_connection; | ||
17 | |||
18 | extern struct workqueue_struct *ceph_msgr_wq; /* receive work queue */ | ||
19 | |||
20 | /* | ||
21 | * Ceph defines these callbacks for handling connection events. | ||
22 | */ | ||
23 | struct ceph_connection_operations { | ||
24 | struct ceph_connection *(*get)(struct ceph_connection *); | ||
25 | void (*put)(struct ceph_connection *); | ||
26 | |||
27 | /* handle an incoming message. */ | ||
28 | void (*dispatch) (struct ceph_connection *con, struct ceph_msg *m); | ||
29 | |||
30 | /* authorize an outgoing connection */ | ||
31 | int (*get_authorizer) (struct ceph_connection *con, | ||
32 | void **buf, int *len, int *proto, | ||
33 | void **reply_buf, int *reply_len, int force_new); | ||
34 | int (*verify_authorizer_reply) (struct ceph_connection *con, int len); | ||
35 | int (*invalidate_authorizer)(struct ceph_connection *con); | ||
36 | |||
37 | /* protocol version mismatch */ | ||
38 | void (*bad_proto) (struct ceph_connection *con); | ||
39 | |||
40 | /* there was some error on the socket (disconnect, whatever) */ | ||
41 | void (*fault) (struct ceph_connection *con); | ||
42 | |||
43 | /* a remote host as terminated a message exchange session, and messages | ||
44 | * we sent (or they tried to send us) may be lost. */ | ||
45 | void (*peer_reset) (struct ceph_connection *con); | ||
46 | |||
47 | struct ceph_msg * (*alloc_msg) (struct ceph_connection *con, | ||
48 | struct ceph_msg_header *hdr, | ||
49 | int *skip); | ||
50 | }; | ||
51 | |||
52 | /* use format string %s%d */ | ||
53 | #define ENTITY_NAME(n) ceph_entity_type_name((n).type), le64_to_cpu((n).num) | ||
54 | |||
55 | struct ceph_messenger { | ||
56 | struct ceph_entity_inst inst; /* my name+address */ | ||
57 | struct ceph_entity_addr my_enc_addr; | ||
58 | struct page *zero_page; /* used in certain error cases */ | ||
59 | |||
60 | bool nocrc; | ||
61 | |||
62 | /* | ||
63 | * the global_seq counts connections i (attempt to) initiate | ||
64 | * in order to disambiguate certain connect race conditions. | ||
65 | */ | ||
66 | u32 global_seq; | ||
67 | spinlock_t global_seq_lock; | ||
68 | |||
69 | u32 supported_features; | ||
70 | u32 required_features; | ||
71 | }; | ||
72 | |||
73 | /* | ||
74 | * a single message. it contains a header (src, dest, message type, etc.), | ||
75 | * footer (crc values, mainly), a "front" message body, and possibly a | ||
76 | * data payload (stored in some number of pages). | ||
77 | */ | ||
78 | struct ceph_msg { | ||
79 | struct ceph_msg_header hdr; /* header */ | ||
80 | struct ceph_msg_footer footer; /* footer */ | ||
81 | struct kvec front; /* unaligned blobs of message */ | ||
82 | struct ceph_buffer *middle; | ||
83 | struct page **pages; /* data payload. NOT OWNER. */ | ||
84 | unsigned nr_pages; /* size of page array */ | ||
85 | struct ceph_pagelist *pagelist; /* instead of pages */ | ||
86 | struct list_head list_head; | ||
87 | struct kref kref; | ||
88 | struct bio *bio; /* instead of pages/pagelist */ | ||
89 | struct bio *bio_iter; /* bio iterator */ | ||
90 | int bio_seg; /* current bio segment */ | ||
91 | struct ceph_pagelist *trail; /* the trailing part of the data */ | ||
92 | bool front_is_vmalloc; | ||
93 | bool more_to_follow; | ||
94 | bool needs_out_seq; | ||
95 | int front_max; | ||
96 | |||
97 | struct ceph_msgpool *pool; | ||
98 | }; | ||
99 | |||
100 | struct ceph_msg_pos { | ||
101 | int page, page_pos; /* which page; offset in page */ | ||
102 | int data_pos; /* offset in data payload */ | ||
103 | int did_page_crc; /* true if we've calculated crc for current page */ | ||
104 | }; | ||
105 | |||
106 | /* ceph connection fault delay defaults, for exponential backoff */ | ||
107 | #define BASE_DELAY_INTERVAL (HZ/2) | ||
108 | #define MAX_DELAY_INTERVAL (5 * 60 * HZ) | ||
109 | |||
110 | /* | ||
111 | * ceph_connection state bit flags | ||
112 | * | ||
113 | * QUEUED and BUSY are used together to ensure that only a single | ||
114 | * thread is currently opening, reading or writing data to the socket. | ||
115 | */ | ||
116 | #define LOSSYTX 0 /* we can close channel or drop messages on errors */ | ||
117 | #define CONNECTING 1 | ||
118 | #define NEGOTIATING 2 | ||
119 | #define KEEPALIVE_PENDING 3 | ||
120 | #define WRITE_PENDING 4 /* we have data ready to send */ | ||
121 | #define QUEUED 5 /* there is work queued on this connection */ | ||
122 | #define BUSY 6 /* work is being done */ | ||
123 | #define STANDBY 8 /* no outgoing messages, socket closed. we keep | ||
124 | * the ceph_connection around to maintain shared | ||
125 | * state with the peer. */ | ||
126 | #define CLOSED 10 /* we've closed the connection */ | ||
127 | #define SOCK_CLOSED 11 /* socket state changed to closed */ | ||
128 | #define OPENING 13 /* open connection w/ (possibly new) peer */ | ||
129 | #define DEAD 14 /* dead, about to kfree */ | ||
130 | |||
131 | /* | ||
132 | * A single connection with another host. | ||
133 | * | ||
134 | * We maintain a queue of outgoing messages, and some session state to | ||
135 | * ensure that we can preserve the lossless, ordered delivery of | ||
136 | * messages in the case of a TCP disconnect. | ||
137 | */ | ||
138 | struct ceph_connection { | ||
139 | void *private; | ||
140 | atomic_t nref; | ||
141 | |||
142 | const struct ceph_connection_operations *ops; | ||
143 | |||
144 | struct ceph_messenger *msgr; | ||
145 | struct socket *sock; | ||
146 | unsigned long state; /* connection state (see flags above) */ | ||
147 | const char *error_msg; /* error message, if any */ | ||
148 | |||
149 | struct ceph_entity_addr peer_addr; /* peer address */ | ||
150 | struct ceph_entity_name peer_name; /* peer name */ | ||
151 | struct ceph_entity_addr peer_addr_for_me; | ||
152 | unsigned peer_features; | ||
153 | u32 connect_seq; /* identify the most recent connection | ||
154 | attempt for this connection, client */ | ||
155 | u32 peer_global_seq; /* peer's global seq for this connection */ | ||
156 | |||
157 | int auth_retry; /* true if we need a newer authorizer */ | ||
158 | void *auth_reply_buf; /* where to put the authorizer reply */ | ||
159 | int auth_reply_buf_len; | ||
160 | |||
161 | struct mutex mutex; | ||
162 | |||
163 | /* out queue */ | ||
164 | struct list_head out_queue; | ||
165 | struct list_head out_sent; /* sending or sent but unacked */ | ||
166 | u64 out_seq; /* last message queued for send */ | ||
167 | bool out_keepalive_pending; | ||
168 | |||
169 | u64 in_seq, in_seq_acked; /* last message received, acked */ | ||
170 | |||
171 | /* connection negotiation temps */ | ||
172 | char in_banner[CEPH_BANNER_MAX_LEN]; | ||
173 | union { | ||
174 | struct { /* outgoing connection */ | ||
175 | struct ceph_msg_connect out_connect; | ||
176 | struct ceph_msg_connect_reply in_reply; | ||
177 | }; | ||
178 | struct { /* incoming */ | ||
179 | struct ceph_msg_connect in_connect; | ||
180 | struct ceph_msg_connect_reply out_reply; | ||
181 | }; | ||
182 | }; | ||
183 | struct ceph_entity_addr actual_peer_addr; | ||
184 | |||
185 | /* message out temps */ | ||
186 | struct ceph_msg *out_msg; /* sending message (== tail of | ||
187 | out_sent) */ | ||
188 | bool out_msg_done; | ||
189 | struct ceph_msg_pos out_msg_pos; | ||
190 | |||
191 | struct kvec out_kvec[8], /* sending header/footer data */ | ||
192 | *out_kvec_cur; | ||
193 | int out_kvec_left; /* kvec's left in out_kvec */ | ||
194 | int out_skip; /* skip this many bytes */ | ||
195 | int out_kvec_bytes; /* total bytes left */ | ||
196 | bool out_kvec_is_msg; /* kvec refers to out_msg */ | ||
197 | int out_more; /* there is more data after the kvecs */ | ||
198 | __le64 out_temp_ack; /* for writing an ack */ | ||
199 | |||
200 | /* message in temps */ | ||
201 | struct ceph_msg_header in_hdr; | ||
202 | struct ceph_msg *in_msg; | ||
203 | struct ceph_msg_pos in_msg_pos; | ||
204 | u32 in_front_crc, in_middle_crc, in_data_crc; /* calculated crc */ | ||
205 | |||
206 | char in_tag; /* protocol control byte */ | ||
207 | int in_base_pos; /* bytes read */ | ||
208 | __le64 in_temp_ack; /* for reading an ack */ | ||
209 | |||
210 | struct delayed_work work; /* send|recv work */ | ||
211 | unsigned long delay; /* current delay interval */ | ||
212 | }; | ||
213 | |||
214 | |||
215 | extern const char *ceph_pr_addr(const struct sockaddr_storage *ss); | ||
216 | extern int ceph_parse_ips(const char *c, const char *end, | ||
217 | struct ceph_entity_addr *addr, | ||
218 | int max_count, int *count); | ||
219 | |||
220 | |||
221 | extern int ceph_msgr_init(void); | ||
222 | extern void ceph_msgr_exit(void); | ||
223 | extern void ceph_msgr_flush(void); | ||
224 | |||
225 | extern struct ceph_messenger *ceph_messenger_create( | ||
226 | struct ceph_entity_addr *myaddr, | ||
227 | u32 features, u32 required); | ||
228 | extern void ceph_messenger_destroy(struct ceph_messenger *); | ||
229 | |||
230 | extern void ceph_con_init(struct ceph_messenger *msgr, | ||
231 | struct ceph_connection *con); | ||
232 | extern void ceph_con_open(struct ceph_connection *con, | ||
233 | struct ceph_entity_addr *addr); | ||
234 | extern bool ceph_con_opened(struct ceph_connection *con); | ||
235 | extern void ceph_con_close(struct ceph_connection *con); | ||
236 | extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg); | ||
237 | extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg); | ||
238 | extern void ceph_con_revoke_message(struct ceph_connection *con, | ||
239 | struct ceph_msg *msg); | ||
240 | extern void ceph_con_keepalive(struct ceph_connection *con); | ||
241 | extern struct ceph_connection *ceph_con_get(struct ceph_connection *con); | ||
242 | extern void ceph_con_put(struct ceph_connection *con); | ||
243 | |||
244 | extern struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags); | ||
245 | extern void ceph_msg_kfree(struct ceph_msg *m); | ||
246 | |||
247 | |||
248 | static inline struct ceph_msg *ceph_msg_get(struct ceph_msg *msg) | ||
249 | { | ||
250 | kref_get(&msg->kref); | ||
251 | return msg; | ||
252 | } | ||
253 | extern void ceph_msg_last_put(struct kref *kref); | ||
254 | static inline void ceph_msg_put(struct ceph_msg *msg) | ||
255 | { | ||
256 | kref_put(&msg->kref, ceph_msg_last_put); | ||
257 | } | ||
258 | |||
259 | extern void ceph_msg_dump(struct ceph_msg *msg); | ||
260 | |||
261 | #endif | ||
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h new file mode 100644 index 000000000000..545f85917780 --- /dev/null +++ b/include/linux/ceph/mon_client.h | |||
@@ -0,0 +1,122 @@ | |||
1 | #ifndef _FS_CEPH_MON_CLIENT_H | ||
2 | #define _FS_CEPH_MON_CLIENT_H | ||
3 | |||
4 | #include <linux/completion.h> | ||
5 | #include <linux/kref.h> | ||
6 | #include <linux/rbtree.h> | ||
7 | |||
8 | #include "messenger.h" | ||
9 | |||
10 | struct ceph_client; | ||
11 | struct ceph_mount_args; | ||
12 | struct ceph_auth_client; | ||
13 | |||
14 | /* | ||
15 | * The monitor map enumerates the set of all monitors. | ||
16 | */ | ||
17 | struct ceph_monmap { | ||
18 | struct ceph_fsid fsid; | ||
19 | u32 epoch; | ||
20 | u32 num_mon; | ||
21 | struct ceph_entity_inst mon_inst[0]; | ||
22 | }; | ||
23 | |||
24 | struct ceph_mon_client; | ||
25 | struct ceph_mon_generic_request; | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Generic mechanism for resending monitor requests. | ||
30 | */ | ||
31 | typedef void (*ceph_monc_request_func_t)(struct ceph_mon_client *monc, | ||
32 | int newmon); | ||
33 | |||
34 | /* a pending monitor request */ | ||
35 | struct ceph_mon_request { | ||
36 | struct ceph_mon_client *monc; | ||
37 | struct delayed_work delayed_work; | ||
38 | unsigned long delay; | ||
39 | ceph_monc_request_func_t do_request; | ||
40 | }; | ||
41 | |||
42 | /* | ||
43 | * ceph_mon_generic_request is being used for the statfs and poolop requests | ||
44 | * which are bening done a bit differently because we need to get data back | ||
45 | * to the caller | ||
46 | */ | ||
47 | struct ceph_mon_generic_request { | ||
48 | struct kref kref; | ||
49 | u64 tid; | ||
50 | struct rb_node node; | ||
51 | int result; | ||
52 | void *buf; | ||
53 | int buf_len; | ||
54 | struct completion completion; | ||
55 | struct ceph_msg *request; /* original request */ | ||
56 | struct ceph_msg *reply; /* and reply */ | ||
57 | }; | ||
58 | |||
59 | struct ceph_mon_client { | ||
60 | struct ceph_client *client; | ||
61 | struct ceph_monmap *monmap; | ||
62 | |||
63 | struct mutex mutex; | ||
64 | struct delayed_work delayed_work; | ||
65 | |||
66 | struct ceph_auth_client *auth; | ||
67 | struct ceph_msg *m_auth, *m_auth_reply, *m_subscribe, *m_subscribe_ack; | ||
68 | int pending_auth; | ||
69 | |||
70 | bool hunting; | ||
71 | int cur_mon; /* last monitor i contacted */ | ||
72 | unsigned long sub_sent, sub_renew_after; | ||
73 | struct ceph_connection *con; | ||
74 | bool have_fsid; | ||
75 | |||
76 | /* pending generic requests */ | ||
77 | struct rb_root generic_request_tree; | ||
78 | int num_generic_requests; | ||
79 | u64 last_tid; | ||
80 | |||
81 | /* mds/osd map */ | ||
82 | int want_mdsmap; | ||
83 | int want_next_osdmap; /* 1 = want, 2 = want+asked */ | ||
84 | u32 have_osdmap, have_mdsmap; | ||
85 | |||
86 | #ifdef CONFIG_DEBUG_FS | ||
87 | struct dentry *debugfs_file; | ||
88 | #endif | ||
89 | }; | ||
90 | |||
91 | extern struct ceph_monmap *ceph_monmap_decode(void *p, void *end); | ||
92 | extern int ceph_monmap_contains(struct ceph_monmap *m, | ||
93 | struct ceph_entity_addr *addr); | ||
94 | |||
95 | extern int ceph_monc_init(struct ceph_mon_client *monc, struct ceph_client *cl); | ||
96 | extern void ceph_monc_stop(struct ceph_mon_client *monc); | ||
97 | |||
98 | /* | ||
99 | * The model here is to indicate that we need a new map of at least | ||
100 | * epoch @want, and also call in when we receive a map. We will | ||
101 | * periodically rerequest the map from the monitor cluster until we | ||
102 | * get what we want. | ||
103 | */ | ||
104 | extern int ceph_monc_got_mdsmap(struct ceph_mon_client *monc, u32 have); | ||
105 | extern int ceph_monc_got_osdmap(struct ceph_mon_client *monc, u32 have); | ||
106 | |||
107 | extern void ceph_monc_request_next_osdmap(struct ceph_mon_client *monc); | ||
108 | |||
109 | extern int ceph_monc_do_statfs(struct ceph_mon_client *monc, | ||
110 | struct ceph_statfs *buf); | ||
111 | |||
112 | extern int ceph_monc_open_session(struct ceph_mon_client *monc); | ||
113 | |||
114 | extern int ceph_monc_validate_auth(struct ceph_mon_client *monc); | ||
115 | |||
116 | extern int ceph_monc_create_snapid(struct ceph_mon_client *monc, | ||
117 | u32 pool, u64 *snapid); | ||
118 | |||
119 | extern int ceph_monc_delete_snapid(struct ceph_mon_client *monc, | ||
120 | u32 pool, u64 snapid); | ||
121 | |||
122 | #endif | ||
diff --git a/include/linux/ceph/msgpool.h b/include/linux/ceph/msgpool.h new file mode 100644 index 000000000000..a362605f9368 --- /dev/null +++ b/include/linux/ceph/msgpool.h | |||
@@ -0,0 +1,25 @@ | |||
1 | #ifndef _FS_CEPH_MSGPOOL | ||
2 | #define _FS_CEPH_MSGPOOL | ||
3 | |||
4 | #include <linux/mempool.h> | ||
5 | #include "messenger.h" | ||
6 | |||
7 | /* | ||
8 | * we use memory pools for preallocating messages we may receive, to | ||
9 | * avoid unexpected OOM conditions. | ||
10 | */ | ||
11 | struct ceph_msgpool { | ||
12 | const char *name; | ||
13 | mempool_t *pool; | ||
14 | int front_len; /* preallocated payload size */ | ||
15 | }; | ||
16 | |||
17 | extern int ceph_msgpool_init(struct ceph_msgpool *pool, | ||
18 | int front_len, int size, bool blocking, | ||
19 | const char *name); | ||
20 | extern void ceph_msgpool_destroy(struct ceph_msgpool *pool); | ||
21 | extern struct ceph_msg *ceph_msgpool_get(struct ceph_msgpool *, | ||
22 | int front_len); | ||
23 | extern void ceph_msgpool_put(struct ceph_msgpool *, struct ceph_msg *); | ||
24 | |||
25 | #endif | ||
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h new file mode 100644 index 000000000000..680d3d648cac --- /dev/null +++ b/include/linux/ceph/msgr.h | |||
@@ -0,0 +1,175 @@ | |||
1 | #ifndef CEPH_MSGR_H | ||
2 | #define CEPH_MSGR_H | ||
3 | |||
4 | /* | ||
5 | * Data types for message passing layer used by Ceph. | ||
6 | */ | ||
7 | |||
8 | #define CEPH_MON_PORT 6789 /* default monitor port */ | ||
9 | |||
10 | /* | ||
11 | * client-side processes will try to bind to ports in this | ||
12 | * range, simply for the benefit of tools like nmap or wireshark | ||
13 | * that would like to identify the protocol. | ||
14 | */ | ||
15 | #define CEPH_PORT_FIRST 6789 | ||
16 | #define CEPH_PORT_START 6800 /* non-monitors start here */ | ||
17 | #define CEPH_PORT_LAST 6900 | ||
18 | |||
19 | /* | ||
20 | * tcp connection banner. include a protocol version. and adjust | ||
21 | * whenever the wire protocol changes. try to keep this string length | ||
22 | * constant. | ||
23 | */ | ||
24 | #define CEPH_BANNER "ceph v027" | ||
25 | #define CEPH_BANNER_MAX_LEN 30 | ||
26 | |||
27 | |||
28 | /* | ||
29 | * Rollover-safe type and comparator for 32-bit sequence numbers. | ||
30 | * Comparator returns -1, 0, or 1. | ||
31 | */ | ||
32 | typedef __u32 ceph_seq_t; | ||
33 | |||
34 | static inline __s32 ceph_seq_cmp(__u32 a, __u32 b) | ||
35 | { | ||
36 | return (__s32)a - (__s32)b; | ||
37 | } | ||
38 | |||
39 | |||
40 | /* | ||
41 | * entity_name -- logical name for a process participating in the | ||
42 | * network, e.g. 'mds0' or 'osd3'. | ||
43 | */ | ||
44 | struct ceph_entity_name { | ||
45 | __u8 type; /* CEPH_ENTITY_TYPE_* */ | ||
46 | __le64 num; | ||
47 | } __attribute__ ((packed)); | ||
48 | |||
49 | #define CEPH_ENTITY_TYPE_MON 0x01 | ||
50 | #define CEPH_ENTITY_TYPE_MDS 0x02 | ||
51 | #define CEPH_ENTITY_TYPE_OSD 0x04 | ||
52 | #define CEPH_ENTITY_TYPE_CLIENT 0x08 | ||
53 | #define CEPH_ENTITY_TYPE_AUTH 0x20 | ||
54 | |||
55 | #define CEPH_ENTITY_TYPE_ANY 0xFF | ||
56 | |||
57 | extern const char *ceph_entity_type_name(int type); | ||
58 | |||
59 | /* | ||
60 | * entity_addr -- network address | ||
61 | */ | ||
62 | struct ceph_entity_addr { | ||
63 | __le32 type; | ||
64 | __le32 nonce; /* unique id for process (e.g. pid) */ | ||
65 | struct sockaddr_storage in_addr; | ||
66 | } __attribute__ ((packed)); | ||
67 | |||
68 | struct ceph_entity_inst { | ||
69 | struct ceph_entity_name name; | ||
70 | struct ceph_entity_addr addr; | ||
71 | } __attribute__ ((packed)); | ||
72 | |||
73 | |||
74 | /* used by message exchange protocol */ | ||
75 | #define CEPH_MSGR_TAG_READY 1 /* server->client: ready for messages */ | ||
76 | #define CEPH_MSGR_TAG_RESETSESSION 2 /* server->client: reset, try again */ | ||
77 | #define CEPH_MSGR_TAG_WAIT 3 /* server->client: wait for racing | ||
78 | incoming connection */ | ||
79 | #define CEPH_MSGR_TAG_RETRY_SESSION 4 /* server->client + cseq: try again | ||
80 | with higher cseq */ | ||
81 | #define CEPH_MSGR_TAG_RETRY_GLOBAL 5 /* server->client + gseq: try again | ||
82 | with higher gseq */ | ||
83 | #define CEPH_MSGR_TAG_CLOSE 6 /* closing pipe */ | ||
84 | #define CEPH_MSGR_TAG_MSG 7 /* message */ | ||
85 | #define CEPH_MSGR_TAG_ACK 8 /* message ack */ | ||
86 | #define CEPH_MSGR_TAG_KEEPALIVE 9 /* just a keepalive byte! */ | ||
87 | #define CEPH_MSGR_TAG_BADPROTOVER 10 /* bad protocol version */ | ||
88 | #define CEPH_MSGR_TAG_BADAUTHORIZER 11 /* bad authorizer */ | ||
89 | #define CEPH_MSGR_TAG_FEATURES 12 /* insufficient features */ | ||
90 | |||
91 | |||
92 | /* | ||
93 | * connection negotiation | ||
94 | */ | ||
95 | struct ceph_msg_connect { | ||
96 | __le64 features; /* supported feature bits */ | ||
97 | __le32 host_type; /* CEPH_ENTITY_TYPE_* */ | ||
98 | __le32 global_seq; /* count connections initiated by this host */ | ||
99 | __le32 connect_seq; /* count connections initiated in this session */ | ||
100 | __le32 protocol_version; | ||
101 | __le32 authorizer_protocol; | ||
102 | __le32 authorizer_len; | ||
103 | __u8 flags; /* CEPH_MSG_CONNECT_* */ | ||
104 | } __attribute__ ((packed)); | ||
105 | |||
106 | struct ceph_msg_connect_reply { | ||
107 | __u8 tag; | ||
108 | __le64 features; /* feature bits for this session */ | ||
109 | __le32 global_seq; | ||
110 | __le32 connect_seq; | ||
111 | __le32 protocol_version; | ||
112 | __le32 authorizer_len; | ||
113 | __u8 flags; | ||
114 | } __attribute__ ((packed)); | ||
115 | |||
116 | #define CEPH_MSG_CONNECT_LOSSY 1 /* messages i send may be safely dropped */ | ||
117 | |||
118 | |||
119 | /* | ||
120 | * message header | ||
121 | */ | ||
122 | struct ceph_msg_header_old { | ||
123 | __le64 seq; /* message seq# for this session */ | ||
124 | __le64 tid; /* transaction id */ | ||
125 | __le16 type; /* message type */ | ||
126 | __le16 priority; /* priority. higher value == higher priority */ | ||
127 | __le16 version; /* version of message encoding */ | ||
128 | |||
129 | __le32 front_len; /* bytes in main payload */ | ||
130 | __le32 middle_len;/* bytes in middle payload */ | ||
131 | __le32 data_len; /* bytes of data payload */ | ||
132 | __le16 data_off; /* sender: include full offset; | ||
133 | receiver: mask against ~PAGE_MASK */ | ||
134 | |||
135 | struct ceph_entity_inst src, orig_src; | ||
136 | __le32 reserved; | ||
137 | __le32 crc; /* header crc32c */ | ||
138 | } __attribute__ ((packed)); | ||
139 | |||
140 | struct ceph_msg_header { | ||
141 | __le64 seq; /* message seq# for this session */ | ||
142 | __le64 tid; /* transaction id */ | ||
143 | __le16 type; /* message type */ | ||
144 | __le16 priority; /* priority. higher value == higher priority */ | ||
145 | __le16 version; /* version of message encoding */ | ||
146 | |||
147 | __le32 front_len; /* bytes in main payload */ | ||
148 | __le32 middle_len;/* bytes in middle payload */ | ||
149 | __le32 data_len; /* bytes of data payload */ | ||
150 | __le16 data_off; /* sender: include full offset; | ||
151 | receiver: mask against ~PAGE_MASK */ | ||
152 | |||
153 | struct ceph_entity_name src; | ||
154 | __le32 reserved; | ||
155 | __le32 crc; /* header crc32c */ | ||
156 | } __attribute__ ((packed)); | ||
157 | |||
158 | #define CEPH_MSG_PRIO_LOW 64 | ||
159 | #define CEPH_MSG_PRIO_DEFAULT 127 | ||
160 | #define CEPH_MSG_PRIO_HIGH 196 | ||
161 | #define CEPH_MSG_PRIO_HIGHEST 255 | ||
162 | |||
163 | /* | ||
164 | * follows data payload | ||
165 | */ | ||
166 | struct ceph_msg_footer { | ||
167 | __le32 front_crc, middle_crc, data_crc; | ||
168 | __u8 flags; | ||
169 | } __attribute__ ((packed)); | ||
170 | |||
171 | #define CEPH_MSG_FOOTER_COMPLETE (1<<0) /* msg wasn't aborted */ | ||
172 | #define CEPH_MSG_FOOTER_NOCRC (1<<1) /* no data crc */ | ||
173 | |||
174 | |||
175 | #endif | ||
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h new file mode 100644 index 000000000000..6c91fb032c39 --- /dev/null +++ b/include/linux/ceph/osd_client.h | |||
@@ -0,0 +1,234 @@ | |||
1 | #ifndef _FS_CEPH_OSD_CLIENT_H | ||
2 | #define _FS_CEPH_OSD_CLIENT_H | ||
3 | |||
4 | #include <linux/completion.h> | ||
5 | #include <linux/kref.h> | ||
6 | #include <linux/mempool.h> | ||
7 | #include <linux/rbtree.h> | ||
8 | |||
9 | #include "types.h" | ||
10 | #include "osdmap.h" | ||
11 | #include "messenger.h" | ||
12 | |||
13 | struct ceph_msg; | ||
14 | struct ceph_snap_context; | ||
15 | struct ceph_osd_request; | ||
16 | struct ceph_osd_client; | ||
17 | struct ceph_authorizer; | ||
18 | struct ceph_pagelist; | ||
19 | |||
20 | /* | ||
21 | * completion callback for async writepages | ||
22 | */ | ||
23 | typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *, | ||
24 | struct ceph_msg *); | ||
25 | |||
26 | /* a given osd we're communicating with */ | ||
27 | struct ceph_osd { | ||
28 | atomic_t o_ref; | ||
29 | struct ceph_osd_client *o_osdc; | ||
30 | int o_osd; | ||
31 | int o_incarnation; | ||
32 | struct rb_node o_node; | ||
33 | struct ceph_connection o_con; | ||
34 | struct list_head o_requests; | ||
35 | struct list_head o_osd_lru; | ||
36 | struct ceph_authorizer *o_authorizer; | ||
37 | void *o_authorizer_buf, *o_authorizer_reply_buf; | ||
38 | size_t o_authorizer_buf_len, o_authorizer_reply_buf_len; | ||
39 | unsigned long lru_ttl; | ||
40 | int o_marked_for_keepalive; | ||
41 | struct list_head o_keepalive_item; | ||
42 | }; | ||
43 | |||
44 | /* an in-flight request */ | ||
45 | struct ceph_osd_request { | ||
46 | u64 r_tid; /* unique for this client */ | ||
47 | struct rb_node r_node; | ||
48 | struct list_head r_req_lru_item; | ||
49 | struct list_head r_osd_item; | ||
50 | struct ceph_osd *r_osd; | ||
51 | struct ceph_pg r_pgid; | ||
52 | int r_pg_osds[CEPH_PG_MAX_SIZE]; | ||
53 | int r_num_pg_osds; | ||
54 | |||
55 | struct ceph_connection *r_con_filling_msg; | ||
56 | |||
57 | struct ceph_msg *r_request, *r_reply; | ||
58 | int r_result; | ||
59 | int r_flags; /* any additional flags for the osd */ | ||
60 | u32 r_sent; /* >0 if r_request is sending/sent */ | ||
61 | int r_got_reply; | ||
62 | |||
63 | struct ceph_osd_client *r_osdc; | ||
64 | struct kref r_kref; | ||
65 | bool r_mempool; | ||
66 | struct completion r_completion, r_safe_completion; | ||
67 | ceph_osdc_callback_t r_callback, r_safe_callback; | ||
68 | struct ceph_eversion r_reassert_version; | ||
69 | struct list_head r_unsafe_item; | ||
70 | |||
71 | struct inode *r_inode; /* for use by callbacks */ | ||
72 | void *r_priv; /* ditto */ | ||
73 | |||
74 | char r_oid[40]; /* object name */ | ||
75 | int r_oid_len; | ||
76 | unsigned long r_stamp; /* send OR check time */ | ||
77 | bool r_resend; /* msg send failed, needs retry */ | ||
78 | |||
79 | struct ceph_file_layout r_file_layout; | ||
80 | struct ceph_snap_context *r_snapc; /* snap context for writes */ | ||
81 | unsigned r_num_pages; /* size of page array (follows) */ | ||
82 | struct page **r_pages; /* pages for data payload */ | ||
83 | int r_pages_from_pool; | ||
84 | int r_own_pages; /* if true, i own page list */ | ||
85 | #ifdef CONFIG_BLOCK | ||
86 | struct bio *r_bio; /* instead of pages */ | ||
87 | #endif | ||
88 | |||
89 | struct ceph_pagelist *r_trail; /* trailing part of the data */ | ||
90 | }; | ||
91 | |||
92 | struct ceph_osd_client { | ||
93 | struct ceph_client *client; | ||
94 | |||
95 | struct ceph_osdmap *osdmap; /* current map */ | ||
96 | struct rw_semaphore map_sem; | ||
97 | struct completion map_waiters; | ||
98 | u64 last_requested_map; | ||
99 | |||
100 | struct mutex request_mutex; | ||
101 | struct rb_root osds; /* osds */ | ||
102 | struct list_head osd_lru; /* idle osds */ | ||
103 | u64 timeout_tid; /* tid of timeout triggering rq */ | ||
104 | u64 last_tid; /* tid of last request */ | ||
105 | struct rb_root requests; /* pending requests */ | ||
106 | struct list_head req_lru; /* pending requests lru */ | ||
107 | int num_requests; | ||
108 | struct delayed_work timeout_work; | ||
109 | struct delayed_work osds_timeout_work; | ||
110 | #ifdef CONFIG_DEBUG_FS | ||
111 | struct dentry *debugfs_file; | ||
112 | #endif | ||
113 | |||
114 | mempool_t *req_mempool; | ||
115 | |||
116 | struct ceph_msgpool msgpool_op; | ||
117 | struct ceph_msgpool msgpool_op_reply; | ||
118 | }; | ||
119 | |||
120 | struct ceph_osd_req_op { | ||
121 | u16 op; /* CEPH_OSD_OP_* */ | ||
122 | u32 flags; /* CEPH_OSD_FLAG_* */ | ||
123 | union { | ||
124 | struct { | ||
125 | u64 offset, length; | ||
126 | u64 truncate_size; | ||
127 | u32 truncate_seq; | ||
128 | } extent; | ||
129 | struct { | ||
130 | const char *name; | ||
131 | u32 name_len; | ||
132 | const char *val; | ||
133 | u32 value_len; | ||
134 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
135 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
136 | } xattr; | ||
137 | struct { | ||
138 | const char *class_name; | ||
139 | __u8 class_len; | ||
140 | const char *method_name; | ||
141 | __u8 method_len; | ||
142 | __u8 argc; | ||
143 | const char *indata; | ||
144 | u32 indata_len; | ||
145 | } cls; | ||
146 | struct { | ||
147 | u64 cookie, count; | ||
148 | } pgls; | ||
149 | struct { | ||
150 | u64 snapid; | ||
151 | } snap; | ||
152 | }; | ||
153 | u32 payload_len; | ||
154 | }; | ||
155 | |||
156 | extern int ceph_osdc_init(struct ceph_osd_client *osdc, | ||
157 | struct ceph_client *client); | ||
158 | extern void ceph_osdc_stop(struct ceph_osd_client *osdc); | ||
159 | |||
160 | extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc, | ||
161 | struct ceph_msg *msg); | ||
162 | extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc, | ||
163 | struct ceph_msg *msg); | ||
164 | |||
165 | extern void ceph_calc_raw_layout(struct ceph_osd_client *osdc, | ||
166 | struct ceph_file_layout *layout, | ||
167 | u64 snapid, | ||
168 | u64 off, u64 *plen, u64 *bno, | ||
169 | struct ceph_osd_request *req, | ||
170 | struct ceph_osd_req_op *op); | ||
171 | |||
172 | extern struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc, | ||
173 | int flags, | ||
174 | struct ceph_snap_context *snapc, | ||
175 | struct ceph_osd_req_op *ops, | ||
176 | bool use_mempool, | ||
177 | gfp_t gfp_flags, | ||
178 | struct page **pages, | ||
179 | struct bio *bio); | ||
180 | |||
181 | extern void ceph_osdc_build_request(struct ceph_osd_request *req, | ||
182 | u64 off, u64 *plen, | ||
183 | struct ceph_osd_req_op *src_ops, | ||
184 | struct ceph_snap_context *snapc, | ||
185 | struct timespec *mtime, | ||
186 | const char *oid, | ||
187 | int oid_len); | ||
188 | |||
189 | extern struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *, | ||
190 | struct ceph_file_layout *layout, | ||
191 | struct ceph_vino vino, | ||
192 | u64 offset, u64 *len, int op, int flags, | ||
193 | struct ceph_snap_context *snapc, | ||
194 | int do_sync, u32 truncate_seq, | ||
195 | u64 truncate_size, | ||
196 | struct timespec *mtime, | ||
197 | bool use_mempool, int num_reply); | ||
198 | |||
199 | static inline void ceph_osdc_get_request(struct ceph_osd_request *req) | ||
200 | { | ||
201 | kref_get(&req->r_kref); | ||
202 | } | ||
203 | extern void ceph_osdc_release_request(struct kref *kref); | ||
204 | static inline void ceph_osdc_put_request(struct ceph_osd_request *req) | ||
205 | { | ||
206 | kref_put(&req->r_kref, ceph_osdc_release_request); | ||
207 | } | ||
208 | |||
209 | extern int ceph_osdc_start_request(struct ceph_osd_client *osdc, | ||
210 | struct ceph_osd_request *req, | ||
211 | bool nofail); | ||
212 | extern int ceph_osdc_wait_request(struct ceph_osd_client *osdc, | ||
213 | struct ceph_osd_request *req); | ||
214 | extern void ceph_osdc_sync(struct ceph_osd_client *osdc); | ||
215 | |||
216 | extern int ceph_osdc_readpages(struct ceph_osd_client *osdc, | ||
217 | struct ceph_vino vino, | ||
218 | struct ceph_file_layout *layout, | ||
219 | u64 off, u64 *plen, | ||
220 | u32 truncate_seq, u64 truncate_size, | ||
221 | struct page **pages, int nr_pages); | ||
222 | |||
223 | extern int ceph_osdc_writepages(struct ceph_osd_client *osdc, | ||
224 | struct ceph_vino vino, | ||
225 | struct ceph_file_layout *layout, | ||
226 | struct ceph_snap_context *sc, | ||
227 | u64 off, u64 len, | ||
228 | u32 truncate_seq, u64 truncate_size, | ||
229 | struct timespec *mtime, | ||
230 | struct page **pages, int nr_pages, | ||
231 | int flags, int do_sync, bool nofail); | ||
232 | |||
233 | #endif | ||
234 | |||
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h new file mode 100644 index 000000000000..ba4c205cbb01 --- /dev/null +++ b/include/linux/ceph/osdmap.h | |||
@@ -0,0 +1,130 @@ | |||
1 | #ifndef _FS_CEPH_OSDMAP_H | ||
2 | #define _FS_CEPH_OSDMAP_H | ||
3 | |||
4 | #include <linux/rbtree.h> | ||
5 | #include "types.h" | ||
6 | #include "ceph_fs.h" | ||
7 | #include <linux/crush/crush.h> | ||
8 | |||
9 | /* | ||
10 | * The osd map describes the current membership of the osd cluster and | ||
11 | * specifies the mapping of objects to placement groups and placement | ||
12 | * groups to (sets of) osds. That is, it completely specifies the | ||
13 | * (desired) distribution of all data objects in the system at some | ||
14 | * point in time. | ||
15 | * | ||
16 | * Each map version is identified by an epoch, which increases monotonically. | ||
17 | * | ||
18 | * The map can be updated either via an incremental map (diff) describing | ||
19 | * the change between two successive epochs, or as a fully encoded map. | ||
20 | */ | ||
21 | struct ceph_pg_pool_info { | ||
22 | struct rb_node node; | ||
23 | int id; | ||
24 | struct ceph_pg_pool v; | ||
25 | int pg_num_mask, pgp_num_mask, lpg_num_mask, lpgp_num_mask; | ||
26 | char *name; | ||
27 | }; | ||
28 | |||
29 | struct ceph_pg_mapping { | ||
30 | struct rb_node node; | ||
31 | struct ceph_pg pgid; | ||
32 | int len; | ||
33 | int osds[]; | ||
34 | }; | ||
35 | |||
36 | struct ceph_osdmap { | ||
37 | struct ceph_fsid fsid; | ||
38 | u32 epoch; | ||
39 | u32 mkfs_epoch; | ||
40 | struct ceph_timespec created, modified; | ||
41 | |||
42 | u32 flags; /* CEPH_OSDMAP_* */ | ||
43 | |||
44 | u32 max_osd; /* size of osd_state, _offload, _addr arrays */ | ||
45 | u8 *osd_state; /* CEPH_OSD_* */ | ||
46 | u32 *osd_weight; /* 0 = failed, 0x10000 = 100% normal */ | ||
47 | struct ceph_entity_addr *osd_addr; | ||
48 | |||
49 | struct rb_root pg_temp; | ||
50 | struct rb_root pg_pools; | ||
51 | u32 pool_max; | ||
52 | |||
53 | /* the CRUSH map specifies the mapping of placement groups to | ||
54 | * the list of osds that store+replicate them. */ | ||
55 | struct crush_map *crush; | ||
56 | }; | ||
57 | |||
58 | /* | ||
59 | * file layout helpers | ||
60 | */ | ||
61 | #define ceph_file_layout_su(l) ((__s32)le32_to_cpu((l).fl_stripe_unit)) | ||
62 | #define ceph_file_layout_stripe_count(l) \ | ||
63 | ((__s32)le32_to_cpu((l).fl_stripe_count)) | ||
64 | #define ceph_file_layout_object_size(l) ((__s32)le32_to_cpu((l).fl_object_size)) | ||
65 | #define ceph_file_layout_cas_hash(l) ((__s32)le32_to_cpu((l).fl_cas_hash)) | ||
66 | #define ceph_file_layout_object_su(l) \ | ||
67 | ((__s32)le32_to_cpu((l).fl_object_stripe_unit)) | ||
68 | #define ceph_file_layout_pg_preferred(l) \ | ||
69 | ((__s32)le32_to_cpu((l).fl_pg_preferred)) | ||
70 | #define ceph_file_layout_pg_pool(l) \ | ||
71 | ((__s32)le32_to_cpu((l).fl_pg_pool)) | ||
72 | |||
73 | static inline unsigned ceph_file_layout_stripe_width(struct ceph_file_layout *l) | ||
74 | { | ||
75 | return le32_to_cpu(l->fl_stripe_unit) * | ||
76 | le32_to_cpu(l->fl_stripe_count); | ||
77 | } | ||
78 | |||
79 | /* "period" == bytes before i start on a new set of objects */ | ||
80 | static inline unsigned ceph_file_layout_period(struct ceph_file_layout *l) | ||
81 | { | ||
82 | return le32_to_cpu(l->fl_object_size) * | ||
83 | le32_to_cpu(l->fl_stripe_count); | ||
84 | } | ||
85 | |||
86 | |||
87 | static inline int ceph_osd_is_up(struct ceph_osdmap *map, int osd) | ||
88 | { | ||
89 | return (osd < map->max_osd) && (map->osd_state[osd] & CEPH_OSD_UP); | ||
90 | } | ||
91 | |||
92 | static inline bool ceph_osdmap_flag(struct ceph_osdmap *map, int flag) | ||
93 | { | ||
94 | return map && (map->flags & flag); | ||
95 | } | ||
96 | |||
97 | extern char *ceph_osdmap_state_str(char *str, int len, int state); | ||
98 | |||
99 | static inline struct ceph_entity_addr *ceph_osd_addr(struct ceph_osdmap *map, | ||
100 | int osd) | ||
101 | { | ||
102 | if (osd >= map->max_osd) | ||
103 | return NULL; | ||
104 | return &map->osd_addr[osd]; | ||
105 | } | ||
106 | |||
107 | extern struct ceph_osdmap *osdmap_decode(void **p, void *end); | ||
108 | extern struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end, | ||
109 | struct ceph_osdmap *map, | ||
110 | struct ceph_messenger *msgr); | ||
111 | extern void ceph_osdmap_destroy(struct ceph_osdmap *map); | ||
112 | |||
113 | /* calculate mapping of a file extent to an object */ | ||
114 | extern void ceph_calc_file_object_mapping(struct ceph_file_layout *layout, | ||
115 | u64 off, u64 *plen, | ||
116 | u64 *bno, u64 *oxoff, u64 *oxlen); | ||
117 | |||
118 | /* calculate mapping of object to a placement group */ | ||
119 | extern int ceph_calc_object_layout(struct ceph_object_layout *ol, | ||
120 | const char *oid, | ||
121 | struct ceph_file_layout *fl, | ||
122 | struct ceph_osdmap *osdmap); | ||
123 | extern int ceph_calc_pg_acting(struct ceph_osdmap *osdmap, struct ceph_pg pgid, | ||
124 | int *acting); | ||
125 | extern int ceph_calc_pg_primary(struct ceph_osdmap *osdmap, | ||
126 | struct ceph_pg pgid); | ||
127 | |||
128 | extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name); | ||
129 | |||
130 | #endif | ||
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h new file mode 100644 index 000000000000..9660d6b0a35d --- /dev/null +++ b/include/linux/ceph/pagelist.h | |||
@@ -0,0 +1,75 @@ | |||
1 | #ifndef __FS_CEPH_PAGELIST_H | ||
2 | #define __FS_CEPH_PAGELIST_H | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | |||
6 | struct ceph_pagelist { | ||
7 | struct list_head head; | ||
8 | void *mapped_tail; | ||
9 | size_t length; | ||
10 | size_t room; | ||
11 | struct list_head free_list; | ||
12 | size_t num_pages_free; | ||
13 | }; | ||
14 | |||
15 | struct ceph_pagelist_cursor { | ||
16 | struct ceph_pagelist *pl; /* pagelist, for error checking */ | ||
17 | struct list_head *page_lru; /* page in list */ | ||
18 | size_t room; /* room remaining to reset to */ | ||
19 | }; | ||
20 | |||
21 | static inline void ceph_pagelist_init(struct ceph_pagelist *pl) | ||
22 | { | ||
23 | INIT_LIST_HEAD(&pl->head); | ||
24 | pl->mapped_tail = NULL; | ||
25 | pl->length = 0; | ||
26 | pl->room = 0; | ||
27 | INIT_LIST_HEAD(&pl->free_list); | ||
28 | pl->num_pages_free = 0; | ||
29 | } | ||
30 | |||
31 | extern int ceph_pagelist_release(struct ceph_pagelist *pl); | ||
32 | |||
33 | extern int ceph_pagelist_append(struct ceph_pagelist *pl, const void *d, size_t l); | ||
34 | |||
35 | extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space); | ||
36 | |||
37 | extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl); | ||
38 | |||
39 | extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl, | ||
40 | struct ceph_pagelist_cursor *c); | ||
41 | |||
42 | extern int ceph_pagelist_truncate(struct ceph_pagelist *pl, | ||
43 | struct ceph_pagelist_cursor *c); | ||
44 | |||
45 | static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v) | ||
46 | { | ||
47 | __le64 ev = cpu_to_le64(v); | ||
48 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
49 | } | ||
50 | static inline int ceph_pagelist_encode_32(struct ceph_pagelist *pl, u32 v) | ||
51 | { | ||
52 | __le32 ev = cpu_to_le32(v); | ||
53 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
54 | } | ||
55 | static inline int ceph_pagelist_encode_16(struct ceph_pagelist *pl, u16 v) | ||
56 | { | ||
57 | __le16 ev = cpu_to_le16(v); | ||
58 | return ceph_pagelist_append(pl, &ev, sizeof(ev)); | ||
59 | } | ||
60 | static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v) | ||
61 | { | ||
62 | return ceph_pagelist_append(pl, &v, 1); | ||
63 | } | ||
64 | static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl, | ||
65 | char *s, size_t len) | ||
66 | { | ||
67 | int ret = ceph_pagelist_encode_32(pl, len); | ||
68 | if (ret) | ||
69 | return ret; | ||
70 | if (len) | ||
71 | return ceph_pagelist_append(pl, s, len); | ||
72 | return 0; | ||
73 | } | ||
74 | |||
75 | #endif | ||
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h new file mode 100644 index 000000000000..6d5247f2e81b --- /dev/null +++ b/include/linux/ceph/rados.h | |||
@@ -0,0 +1,405 @@ | |||
1 | #ifndef CEPH_RADOS_H | ||
2 | #define CEPH_RADOS_H | ||
3 | |||
4 | /* | ||
5 | * Data types for the Ceph distributed object storage layer RADOS | ||
6 | * (Reliable Autonomic Distributed Object Store). | ||
7 | */ | ||
8 | |||
9 | #include "msgr.h" | ||
10 | |||
11 | /* | ||
12 | * osdmap encoding versions | ||
13 | */ | ||
14 | #define CEPH_OSDMAP_INC_VERSION 5 | ||
15 | #define CEPH_OSDMAP_INC_VERSION_EXT 5 | ||
16 | #define CEPH_OSDMAP_VERSION 5 | ||
17 | #define CEPH_OSDMAP_VERSION_EXT 5 | ||
18 | |||
19 | /* | ||
20 | * fs id | ||
21 | */ | ||
22 | struct ceph_fsid { | ||
23 | unsigned char fsid[16]; | ||
24 | }; | ||
25 | |||
26 | static inline int ceph_fsid_compare(const struct ceph_fsid *a, | ||
27 | const struct ceph_fsid *b) | ||
28 | { | ||
29 | return memcmp(a, b, sizeof(*a)); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * ino, object, etc. | ||
34 | */ | ||
35 | typedef __le64 ceph_snapid_t; | ||
36 | #define CEPH_SNAPDIR ((__u64)(-1)) /* reserved for hidden .snap dir */ | ||
37 | #define CEPH_NOSNAP ((__u64)(-2)) /* "head", "live" revision */ | ||
38 | #define CEPH_MAXSNAP ((__u64)(-3)) /* largest valid snapid */ | ||
39 | |||
40 | struct ceph_timespec { | ||
41 | __le32 tv_sec; | ||
42 | __le32 tv_nsec; | ||
43 | } __attribute__ ((packed)); | ||
44 | |||
45 | |||
46 | /* | ||
47 | * object layout - how objects are mapped into PGs | ||
48 | */ | ||
49 | #define CEPH_OBJECT_LAYOUT_HASH 1 | ||
50 | #define CEPH_OBJECT_LAYOUT_LINEAR 2 | ||
51 | #define CEPH_OBJECT_LAYOUT_HASHINO 3 | ||
52 | |||
53 | /* | ||
54 | * pg layout -- how PGs are mapped onto (sets of) OSDs | ||
55 | */ | ||
56 | #define CEPH_PG_LAYOUT_CRUSH 0 | ||
57 | #define CEPH_PG_LAYOUT_HASH 1 | ||
58 | #define CEPH_PG_LAYOUT_LINEAR 2 | ||
59 | #define CEPH_PG_LAYOUT_HYBRID 3 | ||
60 | |||
61 | #define CEPH_PG_MAX_SIZE 16 /* max # osds in a single pg */ | ||
62 | |||
63 | /* | ||
64 | * placement group. | ||
65 | * we encode this into one __le64. | ||
66 | */ | ||
67 | struct ceph_pg { | ||
68 | __le16 preferred; /* preferred primary osd */ | ||
69 | __le16 ps; /* placement seed */ | ||
70 | __le32 pool; /* object pool */ | ||
71 | } __attribute__ ((packed)); | ||
72 | |||
73 | /* | ||
74 | * pg_pool is a set of pgs storing a pool of objects | ||
75 | * | ||
76 | * pg_num -- base number of pseudorandomly placed pgs | ||
77 | * | ||
78 | * pgp_num -- effective number when calculating pg placement. this | ||
79 | * is used for pg_num increases. new pgs result in data being "split" | ||
80 | * into new pgs. for this to proceed smoothly, new pgs are intiially | ||
81 | * colocated with their parents; that is, pgp_num doesn't increase | ||
82 | * until the new pgs have successfully split. only _then_ are the new | ||
83 | * pgs placed independently. | ||
84 | * | ||
85 | * lpg_num -- localized pg count (per device). replicas are randomly | ||
86 | * selected. | ||
87 | * | ||
88 | * lpgp_num -- as above. | ||
89 | */ | ||
90 | #define CEPH_PG_TYPE_REP 1 | ||
91 | #define CEPH_PG_TYPE_RAID4 2 | ||
92 | #define CEPH_PG_POOL_VERSION 2 | ||
93 | struct ceph_pg_pool { | ||
94 | __u8 type; /* CEPH_PG_TYPE_* */ | ||
95 | __u8 size; /* number of osds in each pg */ | ||
96 | __u8 crush_ruleset; /* crush placement rule */ | ||
97 | __u8 object_hash; /* hash mapping object name to ps */ | ||
98 | __le32 pg_num, pgp_num; /* number of pg's */ | ||
99 | __le32 lpg_num, lpgp_num; /* number of localized pg's */ | ||
100 | __le32 last_change; /* most recent epoch changed */ | ||
101 | __le64 snap_seq; /* seq for per-pool snapshot */ | ||
102 | __le32 snap_epoch; /* epoch of last snap */ | ||
103 | __le32 num_snaps; | ||
104 | __le32 num_removed_snap_intervals; /* if non-empty, NO per-pool snaps */ | ||
105 | __le64 auid; /* who owns the pg */ | ||
106 | } __attribute__ ((packed)); | ||
107 | |||
108 | /* | ||
109 | * stable_mod func is used to control number of placement groups. | ||
110 | * similar to straight-up modulo, but produces a stable mapping as b | ||
111 | * increases over time. b is the number of bins, and bmask is the | ||
112 | * containing power of 2 minus 1. | ||
113 | * | ||
114 | * b <= bmask and bmask=(2**n)-1 | ||
115 | * e.g., b=12 -> bmask=15, b=123 -> bmask=127 | ||
116 | */ | ||
117 | static inline int ceph_stable_mod(int x, int b, int bmask) | ||
118 | { | ||
119 | if ((x & bmask) < b) | ||
120 | return x & bmask; | ||
121 | else | ||
122 | return x & (bmask >> 1); | ||
123 | } | ||
124 | |||
125 | /* | ||
126 | * object layout - how a given object should be stored. | ||
127 | */ | ||
128 | struct ceph_object_layout { | ||
129 | struct ceph_pg ol_pgid; /* raw pg, with _full_ ps precision. */ | ||
130 | __le32 ol_stripe_unit; /* for per-object parity, if any */ | ||
131 | } __attribute__ ((packed)); | ||
132 | |||
133 | /* | ||
134 | * compound epoch+version, used by storage layer to serialize mutations | ||
135 | */ | ||
136 | struct ceph_eversion { | ||
137 | __le32 epoch; | ||
138 | __le64 version; | ||
139 | } __attribute__ ((packed)); | ||
140 | |||
141 | /* | ||
142 | * osd map bits | ||
143 | */ | ||
144 | |||
145 | /* status bits */ | ||
146 | #define CEPH_OSD_EXISTS 1 | ||
147 | #define CEPH_OSD_UP 2 | ||
148 | |||
149 | /* osd weights. fixed point value: 0x10000 == 1.0 ("in"), 0 == "out" */ | ||
150 | #define CEPH_OSD_IN 0x10000 | ||
151 | #define CEPH_OSD_OUT 0 | ||
152 | |||
153 | |||
154 | /* | ||
155 | * osd map flag bits | ||
156 | */ | ||
157 | #define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */ | ||
158 | #define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */ | ||
159 | #define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */ | ||
160 | #define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */ | ||
161 | #define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */ | ||
162 | |||
163 | /* | ||
164 | * osd ops | ||
165 | */ | ||
166 | #define CEPH_OSD_OP_MODE 0xf000 | ||
167 | #define CEPH_OSD_OP_MODE_RD 0x1000 | ||
168 | #define CEPH_OSD_OP_MODE_WR 0x2000 | ||
169 | #define CEPH_OSD_OP_MODE_RMW 0x3000 | ||
170 | #define CEPH_OSD_OP_MODE_SUB 0x4000 | ||
171 | |||
172 | #define CEPH_OSD_OP_TYPE 0x0f00 | ||
173 | #define CEPH_OSD_OP_TYPE_LOCK 0x0100 | ||
174 | #define CEPH_OSD_OP_TYPE_DATA 0x0200 | ||
175 | #define CEPH_OSD_OP_TYPE_ATTR 0x0300 | ||
176 | #define CEPH_OSD_OP_TYPE_EXEC 0x0400 | ||
177 | #define CEPH_OSD_OP_TYPE_PG 0x0500 | ||
178 | |||
179 | enum { | ||
180 | /** data **/ | ||
181 | /* read */ | ||
182 | CEPH_OSD_OP_READ = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 1, | ||
183 | CEPH_OSD_OP_STAT = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 2, | ||
184 | |||
185 | /* fancy read */ | ||
186 | CEPH_OSD_OP_MASKTRUNC = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 4, | ||
187 | |||
188 | /* write */ | ||
189 | CEPH_OSD_OP_WRITE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 1, | ||
190 | CEPH_OSD_OP_WRITEFULL = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 2, | ||
191 | CEPH_OSD_OP_TRUNCATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 3, | ||
192 | CEPH_OSD_OP_ZERO = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 4, | ||
193 | CEPH_OSD_OP_DELETE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 5, | ||
194 | |||
195 | /* fancy write */ | ||
196 | CEPH_OSD_OP_APPEND = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 6, | ||
197 | CEPH_OSD_OP_STARTSYNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 7, | ||
198 | CEPH_OSD_OP_SETTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 8, | ||
199 | CEPH_OSD_OP_TRIMTRUNC = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 9, | ||
200 | |||
201 | CEPH_OSD_OP_TMAPUP = CEPH_OSD_OP_MODE_RMW | CEPH_OSD_OP_TYPE_DATA | 10, | ||
202 | CEPH_OSD_OP_TMAPPUT = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 11, | ||
203 | CEPH_OSD_OP_TMAPGET = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_DATA | 12, | ||
204 | |||
205 | CEPH_OSD_OP_CREATE = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 13, | ||
206 | CEPH_OSD_OP_ROLLBACK= CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_DATA | 14, | ||
207 | |||
208 | /** attrs **/ | ||
209 | /* read */ | ||
210 | CEPH_OSD_OP_GETXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 1, | ||
211 | CEPH_OSD_OP_GETXATTRS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 2, | ||
212 | CEPH_OSD_OP_CMPXATTR = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_ATTR | 3, | ||
213 | |||
214 | /* write */ | ||
215 | CEPH_OSD_OP_SETXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 1, | ||
216 | CEPH_OSD_OP_SETXATTRS = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 2, | ||
217 | CEPH_OSD_OP_RESETXATTRS = CEPH_OSD_OP_MODE_WR|CEPH_OSD_OP_TYPE_ATTR | 3, | ||
218 | CEPH_OSD_OP_RMXATTR = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_ATTR | 4, | ||
219 | |||
220 | /** subop **/ | ||
221 | CEPH_OSD_OP_PULL = CEPH_OSD_OP_MODE_SUB | 1, | ||
222 | CEPH_OSD_OP_PUSH = CEPH_OSD_OP_MODE_SUB | 2, | ||
223 | CEPH_OSD_OP_BALANCEREADS = CEPH_OSD_OP_MODE_SUB | 3, | ||
224 | CEPH_OSD_OP_UNBALANCEREADS = CEPH_OSD_OP_MODE_SUB | 4, | ||
225 | CEPH_OSD_OP_SCRUB = CEPH_OSD_OP_MODE_SUB | 5, | ||
226 | |||
227 | /** lock **/ | ||
228 | CEPH_OSD_OP_WRLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 1, | ||
229 | CEPH_OSD_OP_WRUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 2, | ||
230 | CEPH_OSD_OP_RDLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 3, | ||
231 | CEPH_OSD_OP_RDUNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 4, | ||
232 | CEPH_OSD_OP_UPLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 5, | ||
233 | CEPH_OSD_OP_DNLOCK = CEPH_OSD_OP_MODE_WR | CEPH_OSD_OP_TYPE_LOCK | 6, | ||
234 | |||
235 | /** exec **/ | ||
236 | CEPH_OSD_OP_CALL = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_EXEC | 1, | ||
237 | |||
238 | /** pg **/ | ||
239 | CEPH_OSD_OP_PGLS = CEPH_OSD_OP_MODE_RD | CEPH_OSD_OP_TYPE_PG | 1, | ||
240 | }; | ||
241 | |||
242 | static inline int ceph_osd_op_type_lock(int op) | ||
243 | { | ||
244 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_LOCK; | ||
245 | } | ||
246 | static inline int ceph_osd_op_type_data(int op) | ||
247 | { | ||
248 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_DATA; | ||
249 | } | ||
250 | static inline int ceph_osd_op_type_attr(int op) | ||
251 | { | ||
252 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_ATTR; | ||
253 | } | ||
254 | static inline int ceph_osd_op_type_exec(int op) | ||
255 | { | ||
256 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_EXEC; | ||
257 | } | ||
258 | static inline int ceph_osd_op_type_pg(int op) | ||
259 | { | ||
260 | return (op & CEPH_OSD_OP_TYPE) == CEPH_OSD_OP_TYPE_PG; | ||
261 | } | ||
262 | |||
263 | static inline int ceph_osd_op_mode_subop(int op) | ||
264 | { | ||
265 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_SUB; | ||
266 | } | ||
267 | static inline int ceph_osd_op_mode_read(int op) | ||
268 | { | ||
269 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_RD; | ||
270 | } | ||
271 | static inline int ceph_osd_op_mode_modify(int op) | ||
272 | { | ||
273 | return (op & CEPH_OSD_OP_MODE) == CEPH_OSD_OP_MODE_WR; | ||
274 | } | ||
275 | |||
276 | /* | ||
277 | * note that the following tmap stuff is also defined in the ceph librados.h | ||
278 | * any modification here needs to be updated there | ||
279 | */ | ||
280 | #define CEPH_OSD_TMAP_HDR 'h' | ||
281 | #define CEPH_OSD_TMAP_SET 's' | ||
282 | #define CEPH_OSD_TMAP_RM 'r' | ||
283 | |||
284 | extern const char *ceph_osd_op_name(int op); | ||
285 | |||
286 | |||
287 | /* | ||
288 | * osd op flags | ||
289 | * | ||
290 | * An op may be READ, WRITE, or READ|WRITE. | ||
291 | */ | ||
292 | enum { | ||
293 | CEPH_OSD_FLAG_ACK = 1, /* want (or is) "ack" ack */ | ||
294 | CEPH_OSD_FLAG_ONNVRAM = 2, /* want (or is) "onnvram" ack */ | ||
295 | CEPH_OSD_FLAG_ONDISK = 4, /* want (or is) "ondisk" ack */ | ||
296 | CEPH_OSD_FLAG_RETRY = 8, /* resend attempt */ | ||
297 | CEPH_OSD_FLAG_READ = 16, /* op may read */ | ||
298 | CEPH_OSD_FLAG_WRITE = 32, /* op may write */ | ||
299 | CEPH_OSD_FLAG_ORDERSNAP = 64, /* EOLDSNAP if snapc is out of order */ | ||
300 | CEPH_OSD_FLAG_PEERSTAT = 128, /* msg includes osd_peer_stat */ | ||
301 | CEPH_OSD_FLAG_BALANCE_READS = 256, | ||
302 | CEPH_OSD_FLAG_PARALLELEXEC = 512, /* execute op in parallel */ | ||
303 | CEPH_OSD_FLAG_PGOP = 1024, /* pg op, no object */ | ||
304 | CEPH_OSD_FLAG_EXEC = 2048, /* op may exec */ | ||
305 | CEPH_OSD_FLAG_EXEC_PUBLIC = 4096, /* op may exec (public) */ | ||
306 | }; | ||
307 | |||
308 | enum { | ||
309 | CEPH_OSD_OP_FLAG_EXCL = 1, /* EXCL object create */ | ||
310 | }; | ||
311 | |||
312 | #define EOLDSNAPC ERESTART /* ORDERSNAP flag set; writer has old snapc*/ | ||
313 | #define EBLACKLISTED ESHUTDOWN /* blacklisted */ | ||
314 | |||
315 | /* xattr comparison */ | ||
316 | enum { | ||
317 | CEPH_OSD_CMPXATTR_OP_NOP = 0, | ||
318 | CEPH_OSD_CMPXATTR_OP_EQ = 1, | ||
319 | CEPH_OSD_CMPXATTR_OP_NE = 2, | ||
320 | CEPH_OSD_CMPXATTR_OP_GT = 3, | ||
321 | CEPH_OSD_CMPXATTR_OP_GTE = 4, | ||
322 | CEPH_OSD_CMPXATTR_OP_LT = 5, | ||
323 | CEPH_OSD_CMPXATTR_OP_LTE = 6 | ||
324 | }; | ||
325 | |||
326 | enum { | ||
327 | CEPH_OSD_CMPXATTR_MODE_STRING = 1, | ||
328 | CEPH_OSD_CMPXATTR_MODE_U64 = 2 | ||
329 | }; | ||
330 | |||
331 | /* | ||
332 | * an individual object operation. each may be accompanied by some data | ||
333 | * payload | ||
334 | */ | ||
335 | struct ceph_osd_op { | ||
336 | __le16 op; /* CEPH_OSD_OP_* */ | ||
337 | __le32 flags; /* CEPH_OSD_FLAG_* */ | ||
338 | union { | ||
339 | struct { | ||
340 | __le64 offset, length; | ||
341 | __le64 truncate_size; | ||
342 | __le32 truncate_seq; | ||
343 | } __attribute__ ((packed)) extent; | ||
344 | struct { | ||
345 | __le32 name_len; | ||
346 | __le32 value_len; | ||
347 | __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ | ||
348 | __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ | ||
349 | } __attribute__ ((packed)) xattr; | ||
350 | struct { | ||
351 | __u8 class_len; | ||
352 | __u8 method_len; | ||
353 | __u8 argc; | ||
354 | __le32 indata_len; | ||
355 | } __attribute__ ((packed)) cls; | ||
356 | struct { | ||
357 | __le64 cookie, count; | ||
358 | } __attribute__ ((packed)) pgls; | ||
359 | struct { | ||
360 | __le64 snapid; | ||
361 | } __attribute__ ((packed)) snap; | ||
362 | }; | ||
363 | __le32 payload_len; | ||
364 | } __attribute__ ((packed)); | ||
365 | |||
366 | /* | ||
367 | * osd request message header. each request may include multiple | ||
368 | * ceph_osd_op object operations. | ||
369 | */ | ||
370 | struct ceph_osd_request_head { | ||
371 | __le32 client_inc; /* client incarnation */ | ||
372 | struct ceph_object_layout layout; /* pgid */ | ||
373 | __le32 osdmap_epoch; /* client's osdmap epoch */ | ||
374 | |||
375 | __le32 flags; | ||
376 | |||
377 | struct ceph_timespec mtime; /* for mutations only */ | ||
378 | struct ceph_eversion reassert_version; /* if we are replaying op */ | ||
379 | |||
380 | __le32 object_len; /* length of object name */ | ||
381 | |||
382 | __le64 snapid; /* snapid to read */ | ||
383 | __le64 snap_seq; /* writer's snap context */ | ||
384 | __le32 num_snaps; | ||
385 | |||
386 | __le16 num_ops; | ||
387 | struct ceph_osd_op ops[]; /* followed by ops[], obj, ticket, snaps */ | ||
388 | } __attribute__ ((packed)); | ||
389 | |||
390 | struct ceph_osd_reply_head { | ||
391 | __le32 client_inc; /* client incarnation */ | ||
392 | __le32 flags; | ||
393 | struct ceph_object_layout layout; | ||
394 | __le32 osdmap_epoch; | ||
395 | struct ceph_eversion reassert_version; /* for replaying uncommitted */ | ||
396 | |||
397 | __le32 result; /* result code */ | ||
398 | |||
399 | __le32 object_len; /* length of object name */ | ||
400 | __le32 num_ops; | ||
401 | struct ceph_osd_op ops[0]; /* ops[], object */ | ||
402 | } __attribute__ ((packed)); | ||
403 | |||
404 | |||
405 | #endif | ||
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h new file mode 100644 index 000000000000..28b35a005ec2 --- /dev/null +++ b/include/linux/ceph/types.h | |||
@@ -0,0 +1,29 @@ | |||
1 | #ifndef _FS_CEPH_TYPES_H | ||
2 | #define _FS_CEPH_TYPES_H | ||
3 | |||
4 | /* needed before including ceph_fs.h */ | ||
5 | #include <linux/in.h> | ||
6 | #include <linux/types.h> | ||
7 | #include <linux/fcntl.h> | ||
8 | #include <linux/string.h> | ||
9 | |||
10 | #include "ceph_fs.h" | ||
11 | #include "ceph_frag.h" | ||
12 | #include "ceph_hash.h" | ||
13 | |||
14 | /* | ||
15 | * Identify inodes by both their ino AND snapshot id (a u64). | ||
16 | */ | ||
17 | struct ceph_vino { | ||
18 | u64 ino; | ||
19 | u64 snap; | ||
20 | }; | ||
21 | |||
22 | |||
23 | /* context for the caps reservation mechanism */ | ||
24 | struct ceph_cap_reservation { | ||
25 | int count; | ||
26 | }; | ||
27 | |||
28 | |||
29 | #endif | ||
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index ed3e92e41c6e..709dfb901d11 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -75,7 +75,7 @@ struct cgroup_subsys_state { | |||
75 | 75 | ||
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | /* ID for this css, if possible */ | 77 | /* ID for this css, if possible */ |
78 | struct css_id *id; | 78 | struct css_id __rcu *id; |
79 | }; | 79 | }; |
80 | 80 | ||
81 | /* bits in struct cgroup_subsys_state flags field */ | 81 | /* bits in struct cgroup_subsys_state flags field */ |
@@ -205,7 +205,7 @@ struct cgroup { | |||
205 | struct list_head children; /* my children */ | 205 | struct list_head children; /* my children */ |
206 | 206 | ||
207 | struct cgroup *parent; /* my parent */ | 207 | struct cgroup *parent; /* my parent */ |
208 | struct dentry *dentry; /* cgroup fs entry, RCU protected */ | 208 | struct dentry __rcu *dentry; /* cgroup fs entry, RCU protected */ |
209 | 209 | ||
210 | /* Private pointers for each registered subsystem */ | 210 | /* Private pointers for each registered subsystem */ |
211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; | 211 | struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT]; |
@@ -578,7 +578,12 @@ struct task_struct *cgroup_iter_next(struct cgroup *cgrp, | |||
578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); | 578 | void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it); |
579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); | 579 | int cgroup_scan_tasks(struct cgroup_scanner *scan); |
580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); | 580 | int cgroup_attach_task(struct cgroup *, struct task_struct *); |
581 | int cgroup_attach_task_current_cg(struct task_struct *); | 581 | int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); |
582 | |||
583 | static inline int cgroup_attach_task_current_cg(struct task_struct *tsk) | ||
584 | { | ||
585 | return cgroup_attach_task_all(current, tsk); | ||
586 | } | ||
582 | 587 | ||
583 | /* | 588 | /* |
584 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works | 589 | * CSS ID is ID for cgroup_subsys_state structs under subsys. This only works |
@@ -636,6 +641,11 @@ static inline int cgroupstats_build(struct cgroupstats *stats, | |||
636 | } | 641 | } |
637 | 642 | ||
638 | /* No cgroups - nothing to do */ | 643 | /* No cgroups - nothing to do */ |
644 | static inline int cgroup_attach_task_all(struct task_struct *from, | ||
645 | struct task_struct *t) | ||
646 | { | ||
647 | return 0; | ||
648 | } | ||
639 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) | 649 | static inline int cgroup_attach_task_current_cg(struct task_struct *t) |
640 | { | 650 | { |
641 | return 0; | 651 | return 0; |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 9ddc8780e8db..5778b559d59c 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvector(int type, | |||
360 | const struct compat_iovec __user *uvector, unsigned long nr_segs, | 360 | const struct compat_iovec __user *uvector, unsigned long nr_segs, |
361 | unsigned long fast_segs, struct iovec *fast_pointer, | 361 | unsigned long fast_segs, struct iovec *fast_pointer, |
362 | struct iovec **ret_pointer); | 362 | struct iovec **ret_pointer); |
363 | |||
364 | extern void __user *compat_alloc_user_space(unsigned long len); | ||
365 | |||
363 | #endif /* CONFIG_COMPAT */ | 366 | #endif /* CONFIG_COMPAT */ |
364 | #endif /* _LINUX_COMPAT_H */ | 367 | #endif /* _LINUX_COMPAT_H */ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index c1a62c56a660..320d6c94ff84 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -16,7 +16,11 @@ | |||
16 | # define __release(x) __context__(x,-1) | 16 | # define __release(x) __context__(x,-1) |
17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) | 17 | # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0) |
18 | # define __percpu __attribute__((noderef, address_space(3))) | 18 | # define __percpu __attribute__((noderef, address_space(3))) |
19 | #ifdef CONFIG_SPARSE_RCU_POINTER | ||
20 | # define __rcu __attribute__((noderef, address_space(4))) | ||
21 | #else | ||
19 | # define __rcu | 22 | # define __rcu |
23 | #endif | ||
20 | extern void __chk_user_ptr(const volatile void __user *); | 24 | extern void __chk_user_ptr(const volatile void __user *); |
21 | extern void __chk_io_ptr(const volatile void __iomem *); | 25 | extern void __chk_io_ptr(const volatile void __iomem *); |
22 | #else | 26 | #else |
diff --git a/include/linux/coredump.h b/include/linux/coredump.h index 8ba66a9d9022..ba4b85a6d9b8 100644 --- a/include/linux/coredump.h +++ b/include/linux/coredump.h | |||
@@ -9,37 +9,7 @@ | |||
9 | * These are the only things you should do on a core-file: use only these | 9 | * These are the only things you should do on a core-file: use only these |
10 | * functions to write out all the necessary info. | 10 | * functions to write out all the necessary info. |
11 | */ | 11 | */ |
12 | static inline int dump_write(struct file *file, const void *addr, int nr) | 12 | extern int dump_write(struct file *file, const void *addr, int nr); |
13 | { | 13 | extern int dump_seek(struct file *file, loff_t off); |
14 | return file->f_op->write(file, addr, nr, &file->f_pos) == nr; | ||
15 | } | ||
16 | |||
17 | static inline int dump_seek(struct file *file, loff_t off) | ||
18 | { | ||
19 | int ret = 1; | ||
20 | |||
21 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
22 | if (file->f_op->llseek(file, off, SEEK_CUR) < 0) | ||
23 | return 0; | ||
24 | } else { | ||
25 | char *buf = (char *)get_zeroed_page(GFP_KERNEL); | ||
26 | |||
27 | if (!buf) | ||
28 | return 0; | ||
29 | while (off > 0) { | ||
30 | unsigned long n = off; | ||
31 | |||
32 | if (n > PAGE_SIZE) | ||
33 | n = PAGE_SIZE; | ||
34 | if (!dump_write(file, buf, n)) { | ||
35 | ret = 0; | ||
36 | break; | ||
37 | } | ||
38 | off -= n; | ||
39 | } | ||
40 | free_page((unsigned long)buf); | ||
41 | } | ||
42 | return ret; | ||
43 | } | ||
44 | 14 | ||
45 | #endif /* _LINUX_COREDUMP_H */ | 15 | #endif /* _LINUX_COREDUMP_H */ |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 36ca9721a0c2..1be416bbbb82 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -53,6 +53,7 @@ struct cpuidle_state { | |||
53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ | 53 | #define CPUIDLE_FLAG_BALANCED (0x40) /* medium latency, moderate savings */ |
54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ | 54 | #define CPUIDLE_FLAG_DEEP (0x80) /* high latency, large savings */ |
55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ | 55 | #define CPUIDLE_FLAG_IGNORE (0x100) /* ignore during this idle period */ |
56 | #define CPUIDLE_FLAG_TLB_FLUSHED (0x200) /* tlb will be flushed */ | ||
56 | 57 | ||
57 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | 58 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) |
58 | 59 | ||
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4d2c39573f36..4aaeab376446 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -84,7 +84,7 @@ struct thread_group_cred { | |||
84 | atomic_t usage; | 84 | atomic_t usage; |
85 | pid_t tgid; /* thread group process ID */ | 85 | pid_t tgid; /* thread group process ID */ |
86 | spinlock_t lock; | 86 | spinlock_t lock; |
87 | struct key *session_keyring; /* keyring inherited over fork */ | 87 | struct key __rcu *session_keyring; /* keyring inherited over fork */ |
88 | struct key *process_keyring; /* keyring private to this process */ | 88 | struct key *process_keyring; /* keyring private to this process */ |
89 | struct rcu_head rcu; /* RCU deletion hook */ | 89 | struct rcu_head rcu; /* RCU deletion hook */ |
90 | }; | 90 | }; |
diff --git a/include/linux/crush/crush.h b/include/linux/crush/crush.h new file mode 100644 index 000000000000..97e435b191f4 --- /dev/null +++ b/include/linux/crush/crush.h | |||
@@ -0,0 +1,180 @@ | |||
1 | #ifndef CEPH_CRUSH_CRUSH_H | ||
2 | #define CEPH_CRUSH_CRUSH_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | /* | ||
7 | * CRUSH is a pseudo-random data distribution algorithm that | ||
8 | * efficiently distributes input values (typically, data objects) | ||
9 | * across a heterogeneous, structured storage cluster. | ||
10 | * | ||
11 | * The algorithm was originally described in detail in this paper | ||
12 | * (although the algorithm has evolved somewhat since then): | ||
13 | * | ||
14 | * http://www.ssrc.ucsc.edu/Papers/weil-sc06.pdf | ||
15 | * | ||
16 | * LGPL2 | ||
17 | */ | ||
18 | |||
19 | |||
20 | #define CRUSH_MAGIC 0x00010000ul /* for detecting algorithm revisions */ | ||
21 | |||
22 | |||
23 | #define CRUSH_MAX_DEPTH 10 /* max crush hierarchy depth */ | ||
24 | #define CRUSH_MAX_SET 10 /* max size of a mapping result */ | ||
25 | |||
26 | |||
27 | /* | ||
28 | * CRUSH uses user-defined "rules" to describe how inputs should be | ||
29 | * mapped to devices. A rule consists of sequence of steps to perform | ||
30 | * to generate the set of output devices. | ||
31 | */ | ||
32 | struct crush_rule_step { | ||
33 | __u32 op; | ||
34 | __s32 arg1; | ||
35 | __s32 arg2; | ||
36 | }; | ||
37 | |||
38 | /* step op codes */ | ||
39 | enum { | ||
40 | CRUSH_RULE_NOOP = 0, | ||
41 | CRUSH_RULE_TAKE = 1, /* arg1 = value to start with */ | ||
42 | CRUSH_RULE_CHOOSE_FIRSTN = 2, /* arg1 = num items to pick */ | ||
43 | /* arg2 = type */ | ||
44 | CRUSH_RULE_CHOOSE_INDEP = 3, /* same */ | ||
45 | CRUSH_RULE_EMIT = 4, /* no args */ | ||
46 | CRUSH_RULE_CHOOSE_LEAF_FIRSTN = 6, | ||
47 | CRUSH_RULE_CHOOSE_LEAF_INDEP = 7, | ||
48 | }; | ||
49 | |||
50 | /* | ||
51 | * for specifying choose num (arg1) relative to the max parameter | ||
52 | * passed to do_rule | ||
53 | */ | ||
54 | #define CRUSH_CHOOSE_N 0 | ||
55 | #define CRUSH_CHOOSE_N_MINUS(x) (-(x)) | ||
56 | |||
57 | /* | ||
58 | * The rule mask is used to describe what the rule is intended for. | ||
59 | * Given a ruleset and size of output set, we search through the | ||
60 | * rule list for a matching rule_mask. | ||
61 | */ | ||
62 | struct crush_rule_mask { | ||
63 | __u8 ruleset; | ||
64 | __u8 type; | ||
65 | __u8 min_size; | ||
66 | __u8 max_size; | ||
67 | }; | ||
68 | |||
69 | struct crush_rule { | ||
70 | __u32 len; | ||
71 | struct crush_rule_mask mask; | ||
72 | struct crush_rule_step steps[0]; | ||
73 | }; | ||
74 | |||
75 | #define crush_rule_size(len) (sizeof(struct crush_rule) + \ | ||
76 | (len)*sizeof(struct crush_rule_step)) | ||
77 | |||
78 | |||
79 | |||
80 | /* | ||
81 | * A bucket is a named container of other items (either devices or | ||
82 | * other buckets). Items within a bucket are chosen using one of a | ||
83 | * few different algorithms. The table summarizes how the speed of | ||
84 | * each option measures up against mapping stability when items are | ||
85 | * added or removed. | ||
86 | * | ||
87 | * Bucket Alg Speed Additions Removals | ||
88 | * ------------------------------------------------ | ||
89 | * uniform O(1) poor poor | ||
90 | * list O(n) optimal poor | ||
91 | * tree O(log n) good good | ||
92 | * straw O(n) optimal optimal | ||
93 | */ | ||
94 | enum { | ||
95 | CRUSH_BUCKET_UNIFORM = 1, | ||
96 | CRUSH_BUCKET_LIST = 2, | ||
97 | CRUSH_BUCKET_TREE = 3, | ||
98 | CRUSH_BUCKET_STRAW = 4 | ||
99 | }; | ||
100 | extern const char *crush_bucket_alg_name(int alg); | ||
101 | |||
102 | struct crush_bucket { | ||
103 | __s32 id; /* this'll be negative */ | ||
104 | __u16 type; /* non-zero; type=0 is reserved for devices */ | ||
105 | __u8 alg; /* one of CRUSH_BUCKET_* */ | ||
106 | __u8 hash; /* which hash function to use, CRUSH_HASH_* */ | ||
107 | __u32 weight; /* 16-bit fixed point */ | ||
108 | __u32 size; /* num items */ | ||
109 | __s32 *items; | ||
110 | |||
111 | /* | ||
112 | * cached random permutation: used for uniform bucket and for | ||
113 | * the linear search fallback for the other bucket types. | ||
114 | */ | ||
115 | __u32 perm_x; /* @x for which *perm is defined */ | ||
116 | __u32 perm_n; /* num elements of *perm that are permuted/defined */ | ||
117 | __u32 *perm; | ||
118 | }; | ||
119 | |||
120 | struct crush_bucket_uniform { | ||
121 | struct crush_bucket h; | ||
122 | __u32 item_weight; /* 16-bit fixed point; all items equally weighted */ | ||
123 | }; | ||
124 | |||
125 | struct crush_bucket_list { | ||
126 | struct crush_bucket h; | ||
127 | __u32 *item_weights; /* 16-bit fixed point */ | ||
128 | __u32 *sum_weights; /* 16-bit fixed point. element i is sum | ||
129 | of weights 0..i, inclusive */ | ||
130 | }; | ||
131 | |||
132 | struct crush_bucket_tree { | ||
133 | struct crush_bucket h; /* note: h.size is _tree_ size, not number of | ||
134 | actual items */ | ||
135 | __u8 num_nodes; | ||
136 | __u32 *node_weights; | ||
137 | }; | ||
138 | |||
139 | struct crush_bucket_straw { | ||
140 | struct crush_bucket h; | ||
141 | __u32 *item_weights; /* 16-bit fixed point */ | ||
142 | __u32 *straws; /* 16-bit fixed point */ | ||
143 | }; | ||
144 | |||
145 | |||
146 | |||
147 | /* | ||
148 | * CRUSH map includes all buckets, rules, etc. | ||
149 | */ | ||
150 | struct crush_map { | ||
151 | struct crush_bucket **buckets; | ||
152 | struct crush_rule **rules; | ||
153 | |||
154 | /* | ||
155 | * Parent pointers to identify the parent bucket a device or | ||
156 | * bucket in the hierarchy. If an item appears more than | ||
157 | * once, this is the _last_ time it appeared (where buckets | ||
158 | * are processed in bucket id order, from -1 on down to | ||
159 | * -max_buckets. | ||
160 | */ | ||
161 | __u32 *bucket_parents; | ||
162 | __u32 *device_parents; | ||
163 | |||
164 | __s32 max_buckets; | ||
165 | __u32 max_rules; | ||
166 | __s32 max_devices; | ||
167 | }; | ||
168 | |||
169 | |||
170 | /* crush.c */ | ||
171 | extern int crush_get_bucket_item_weight(struct crush_bucket *b, int pos); | ||
172 | extern void crush_calc_parents(struct crush_map *map); | ||
173 | extern void crush_destroy_bucket_uniform(struct crush_bucket_uniform *b); | ||
174 | extern void crush_destroy_bucket_list(struct crush_bucket_list *b); | ||
175 | extern void crush_destroy_bucket_tree(struct crush_bucket_tree *b); | ||
176 | extern void crush_destroy_bucket_straw(struct crush_bucket_straw *b); | ||
177 | extern void crush_destroy_bucket(struct crush_bucket *b); | ||
178 | extern void crush_destroy(struct crush_map *map); | ||
179 | |||
180 | #endif | ||
diff --git a/include/linux/crush/hash.h b/include/linux/crush/hash.h new file mode 100644 index 000000000000..91e884230d5d --- /dev/null +++ b/include/linux/crush/hash.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef CEPH_CRUSH_HASH_H | ||
2 | #define CEPH_CRUSH_HASH_H | ||
3 | |||
4 | #define CRUSH_HASH_RJENKINS1 0 | ||
5 | |||
6 | #define CRUSH_HASH_DEFAULT CRUSH_HASH_RJENKINS1 | ||
7 | |||
8 | extern const char *crush_hash_name(int type); | ||
9 | |||
10 | extern __u32 crush_hash32(int type, __u32 a); | ||
11 | extern __u32 crush_hash32_2(int type, __u32 a, __u32 b); | ||
12 | extern __u32 crush_hash32_3(int type, __u32 a, __u32 b, __u32 c); | ||
13 | extern __u32 crush_hash32_4(int type, __u32 a, __u32 b, __u32 c, __u32 d); | ||
14 | extern __u32 crush_hash32_5(int type, __u32 a, __u32 b, __u32 c, __u32 d, | ||
15 | __u32 e); | ||
16 | |||
17 | #endif | ||
diff --git a/include/linux/crush/mapper.h b/include/linux/crush/mapper.h new file mode 100644 index 000000000000..c46b99c18bb0 --- /dev/null +++ b/include/linux/crush/mapper.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef CEPH_CRUSH_MAPPER_H | ||
2 | #define CEPH_CRUSH_MAPPER_H | ||
3 | |||
4 | /* | ||
5 | * CRUSH functions for find rules and then mapping an input to an | ||
6 | * output set. | ||
7 | * | ||
8 | * LGPL2 | ||
9 | */ | ||
10 | |||
11 | #include "crush.h" | ||
12 | |||
13 | extern int crush_find_rule(struct crush_map *map, int pool, int type, int size); | ||
14 | extern int crush_do_rule(struct crush_map *map, | ||
15 | int ruleno, | ||
16 | int x, int *result, int result_max, | ||
17 | int forcefeed, /* -1 for none */ | ||
18 | __u32 *weights); | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/dccp.h b/include/linux/dccp.h index 7434a8353e23..7187bd8a75f6 100644 --- a/include/linux/dccp.h +++ b/include/linux/dccp.h | |||
@@ -165,8 +165,10 @@ enum { | |||
165 | DCCPO_TIMESTAMP_ECHO = 42, | 165 | DCCPO_TIMESTAMP_ECHO = 42, |
166 | DCCPO_ELAPSED_TIME = 43, | 166 | DCCPO_ELAPSED_TIME = 43, |
167 | DCCPO_MAX = 45, | 167 | DCCPO_MAX = 45, |
168 | DCCPO_MIN_CCID_SPECIFIC = 128, | 168 | DCCPO_MIN_RX_CCID_SPECIFIC = 128, /* from sender to receiver */ |
169 | DCCPO_MAX_CCID_SPECIFIC = 255, | 169 | DCCPO_MAX_RX_CCID_SPECIFIC = 191, |
170 | DCCPO_MIN_TX_CCID_SPECIFIC = 192, /* from receiver to sender */ | ||
171 | DCCPO_MAX_TX_CCID_SPECIFIC = 255, | ||
170 | }; | 172 | }; |
171 | /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ | 173 | /* maximum size of a single TLV-encoded DCCP option (sans type/len bytes) */ |
172 | #define DCCP_SINGLE_OPT_MAXLEN 253 | 174 | #define DCCP_SINGLE_OPT_MAXLEN 253 |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 29b3ce3f2a1d..2833452ea01c 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -49,7 +49,6 @@ struct task_struct; | |||
49 | 49 | ||
50 | #ifdef CONFIG_LOCKDEP | 50 | #ifdef CONFIG_LOCKDEP |
51 | extern void debug_show_all_locks(void); | 51 | extern void debug_show_all_locks(void); |
52 | extern void __debug_show_held_locks(struct task_struct *task); | ||
53 | extern void debug_show_held_locks(struct task_struct *task); | 52 | extern void debug_show_held_locks(struct task_struct *task); |
54 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | 53 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); |
55 | extern void debug_check_no_locks_held(struct task_struct *task); | 54 | extern void debug_check_no_locks_held(struct task_struct *task); |
@@ -58,10 +57,6 @@ static inline void debug_show_all_locks(void) | |||
58 | { | 57 | { |
59 | } | 58 | } |
60 | 59 | ||
61 | static inline void __debug_show_held_locks(struct task_struct *task) | ||
62 | { | ||
63 | } | ||
64 | |||
65 | static inline void debug_show_held_locks(struct task_struct *task) | 60 | static inline void debug_show_held_locks(struct task_struct *task) |
66 | { | 61 | { |
67 | } | 62 | } |
diff --git a/include/linux/device.h b/include/linux/device.h index 516fecacf27b..dd4895313468 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -751,4 +751,11 @@ do { \ | |||
751 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) | 751 | MODULE_ALIAS("char-major-" __stringify(major) "-" __stringify(minor)) |
752 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ | 752 | #define MODULE_ALIAS_CHARDEV_MAJOR(major) \ |
753 | MODULE_ALIAS("char-major-" __stringify(major) "-*") | 753 | MODULE_ALIAS("char-major-" __stringify(major) "-*") |
754 | |||
755 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
756 | extern long sysfs_deprecated; | ||
757 | #else | ||
758 | #define sysfs_deprecated 0 | ||
759 | #endif | ||
760 | |||
754 | #endif /* _DEVICE_H_ */ | 761 | #endif /* _DEVICE_H_ */ |
diff --git a/include/linux/dlm.h b/include/linux/dlm.h index 0b3518c42356..d4e02f5353a0 100644 --- a/include/linux/dlm.h +++ b/include/linux/dlm.h | |||
@@ -48,10 +48,10 @@ typedef void dlm_lockspace_t; | |||
48 | * | 48 | * |
49 | * 0 if lock request was successful | 49 | * 0 if lock request was successful |
50 | * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE | 50 | * -EAGAIN if request would block and is flagged DLM_LKF_NOQUEUE |
51 | * -ENOMEM if there is no memory to process request | ||
52 | * -EINVAL if there are invalid parameters | ||
53 | * -DLM_EUNLOCK if unlock request was successful | 51 | * -DLM_EUNLOCK if unlock request was successful |
54 | * -DLM_ECANCEL if a cancel completed successfully | 52 | * -DLM_ECANCEL if a cancel completed successfully |
53 | * -EDEADLK if a deadlock was detected | ||
54 | * -ETIMEDOUT if the lock request was canceled due to a timeout | ||
55 | */ | 55 | */ |
56 | 56 | ||
57 | #define DLM_SBF_DEMOTED 0x01 | 57 | #define DLM_SBF_DEMOTED 0x01 |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index ce29b8151198..ba8319ae5fcc 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -102,6 +102,9 @@ static inline u64 dma_get_mask(struct device *dev) | |||
102 | return DMA_BIT_MASK(32); | 102 | return DMA_BIT_MASK(32); |
103 | } | 103 | } |
104 | 104 | ||
105 | #ifdef ARCH_HAS_DMA_SET_COHERENT_MASK | ||
106 | int dma_set_coherent_mask(struct device *dev, u64 mask); | ||
107 | #else | ||
105 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | 108 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
106 | { | 109 | { |
107 | if (!dma_supported(dev, mask)) | 110 | if (!dma_supported(dev, mask)) |
@@ -109,6 +112,7 @@ static inline int dma_set_coherent_mask(struct device *dev, u64 mask) | |||
109 | dev->coherent_dma_mask = mask; | 112 | dev->coherent_dma_mask = mask; |
110 | return 0; | 113 | return 0; |
111 | } | 114 | } |
115 | #endif | ||
112 | 116 | ||
113 | extern u64 dma_get_required_mask(struct device *dev); | 117 | extern u64 dma_get_required_mask(struct device *dev); |
114 | 118 | ||
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h index c61d4ca27bcc..e2106495cc11 100644 --- a/include/linux/dmaengine.h +++ b/include/linux/dmaengine.h | |||
@@ -548,7 +548,7 @@ static inline bool dma_dev_has_pq_continue(struct dma_device *dma) | |||
548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; | 548 | return (dma->max_pq & DMA_HAS_PQ_CONTINUE) == DMA_HAS_PQ_CONTINUE; |
549 | } | 549 | } |
550 | 550 | ||
551 | static unsigned short dma_dev_to_maxpq(struct dma_device *dma) | 551 | static inline unsigned short dma_dev_to_maxpq(struct dma_device *dma) |
552 | { | 552 | { |
553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; | 553 | return dma->max_pq & ~DMA_HAS_PQ_CONTINUE; |
554 | } | 554 | } |
diff --git a/include/linux/dmar.h b/include/linux/dmar.h index d7cecc90ed34..a7d9dc21391d 100644 --- a/include/linux/dmar.h +++ b/include/linux/dmar.h | |||
@@ -57,15 +57,15 @@ extern int dmar_table_init(void); | |||
57 | extern int dmar_dev_scope_init(void); | 57 | extern int dmar_dev_scope_init(void); |
58 | 58 | ||
59 | /* Intel IOMMU detection */ | 59 | /* Intel IOMMU detection */ |
60 | extern void detect_intel_iommu(void); | 60 | extern int detect_intel_iommu(void); |
61 | extern int enable_drhd_fault_handling(void); | 61 | extern int enable_drhd_fault_handling(void); |
62 | 62 | ||
63 | extern int parse_ioapics_under_ir(void); | 63 | extern int parse_ioapics_under_ir(void); |
64 | extern int alloc_iommu(struct dmar_drhd_unit *); | 64 | extern int alloc_iommu(struct dmar_drhd_unit *); |
65 | #else | 65 | #else |
66 | static inline void detect_intel_iommu(void) | 66 | static inline int detect_intel_iommu(void) |
67 | { | 67 | { |
68 | return; | 68 | return -ENODEV; |
69 | } | 69 | } |
70 | 70 | ||
71 | static inline int dmar_table_init(void) | 71 | static inline int dmar_table_init(void) |
@@ -106,6 +106,7 @@ struct irte { | |||
106 | __u64 high; | 106 | __u64 high; |
107 | }; | 107 | }; |
108 | }; | 108 | }; |
109 | |||
109 | #ifdef CONFIG_INTR_REMAP | 110 | #ifdef CONFIG_INTR_REMAP |
110 | extern int intr_remapping_enabled; | 111 | extern int intr_remapping_enabled; |
111 | extern int intr_remapping_supported(void); | 112 | extern int intr_remapping_supported(void); |
@@ -119,11 +120,8 @@ extern int alloc_irte(struct intel_iommu *iommu, int irq, u16 count); | |||
119 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, | 120 | extern int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, |
120 | u16 sub_handle); | 121 | u16 sub_handle); |
121 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); | 122 | extern int map_irq_to_irte_handle(int irq, u16 *sub_handle); |
122 | extern int clear_irte_irq(int irq, struct intel_iommu *iommu, u16 index); | ||
123 | extern int flush_irte(int irq); | ||
124 | extern int free_irte(int irq); | 123 | extern int free_irte(int irq); |
125 | 124 | ||
126 | extern int irq_remapped(int irq); | ||
127 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); | 125 | extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); |
128 | extern struct intel_iommu *map_ioapic_to_ir(int apic); | 126 | extern struct intel_iommu *map_ioapic_to_ir(int apic); |
129 | extern struct intel_iommu *map_hpet_to_ir(u8 id); | 127 | extern struct intel_iommu *map_hpet_to_ir(u8 id); |
@@ -177,7 +175,6 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
177 | return 0; | 175 | return 0; |
178 | } | 176 | } |
179 | 177 | ||
180 | #define irq_remapped(irq) (0) | ||
181 | #define enable_intr_remapping(mode) (-1) | 178 | #define enable_intr_remapping(mode) (-1) |
182 | #define disable_intr_remapping() (0) | 179 | #define disable_intr_remapping() (0) |
183 | #define reenable_intr_remapping(mode) (0) | 180 | #define reenable_intr_remapping(mode) (0) |
@@ -187,8 +184,9 @@ static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev) | |||
187 | /* Can't use the common MSI interrupt functions | 184 | /* Can't use the common MSI interrupt functions |
188 | * since DMAR is not a pci device | 185 | * since DMAR is not a pci device |
189 | */ | 186 | */ |
190 | extern void dmar_msi_unmask(unsigned int irq); | 187 | struct irq_data; |
191 | extern void dmar_msi_mask(unsigned int irq); | 188 | extern void dmar_msi_unmask(struct irq_data *data); |
189 | extern void dmar_msi_mask(struct irq_data *data); | ||
192 | extern void dmar_msi_read(int irq, struct msi_msg *msg); | 190 | extern void dmar_msi_read(int irq, struct msi_msg *msg); |
193 | extern void dmar_msi_write(int irq, struct msi_msg *msg); | 191 | extern void dmar_msi_write(int irq, struct msi_msg *msg); |
194 | extern int dmar_set_interrupt(struct intel_iommu *iommu); | 192 | extern int dmar_set_interrupt(struct intel_iommu *iommu); |
diff --git a/include/linux/drbd.h b/include/linux/drbd.h index 479ee3a1d901..9b2a0158f399 100644 --- a/include/linux/drbd.h +++ b/include/linux/drbd.h | |||
@@ -53,10 +53,10 @@ | |||
53 | 53 | ||
54 | 54 | ||
55 | extern const char *drbd_buildtag(void); | 55 | extern const char *drbd_buildtag(void); |
56 | #define REL_VERSION "8.3.8.1" | 56 | #define REL_VERSION "8.3.9rc2" |
57 | #define API_VERSION 88 | 57 | #define API_VERSION 88 |
58 | #define PRO_VERSION_MIN 86 | 58 | #define PRO_VERSION_MIN 86 |
59 | #define PRO_VERSION_MAX 94 | 59 | #define PRO_VERSION_MAX 95 |
60 | 60 | ||
61 | 61 | ||
62 | enum drbd_io_error_p { | 62 | enum drbd_io_error_p { |
@@ -91,6 +91,11 @@ enum drbd_after_sb_p { | |||
91 | ASB_VIOLENTLY | 91 | ASB_VIOLENTLY |
92 | }; | 92 | }; |
93 | 93 | ||
94 | enum drbd_on_no_data { | ||
95 | OND_IO_ERROR, | ||
96 | OND_SUSPEND_IO | ||
97 | }; | ||
98 | |||
94 | /* KEEP the order, do not delete or insert. Only append. */ | 99 | /* KEEP the order, do not delete or insert. Only append. */ |
95 | enum drbd_ret_codes { | 100 | enum drbd_ret_codes { |
96 | ERR_CODE_BASE = 100, | 101 | ERR_CODE_BASE = 100, |
@@ -140,6 +145,7 @@ enum drbd_ret_codes { | |||
140 | ERR_CONNECTED = 151, /* DRBD 8.3 only */ | 145 | ERR_CONNECTED = 151, /* DRBD 8.3 only */ |
141 | ERR_PERM = 152, | 146 | ERR_PERM = 152, |
142 | ERR_NEED_APV_93 = 153, | 147 | ERR_NEED_APV_93 = 153, |
148 | ERR_STONITH_AND_PROT_A = 154, | ||
143 | 149 | ||
144 | /* insert new ones above this line */ | 150 | /* insert new ones above this line */ |
145 | AFTER_LAST_ERR_CODE | 151 | AFTER_LAST_ERR_CODE |
@@ -226,13 +232,17 @@ union drbd_state { | |||
226 | unsigned conn:5 ; /* 17/32 cstates */ | 232 | unsigned conn:5 ; /* 17/32 cstates */ |
227 | unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ | 233 | unsigned disk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ |
228 | unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ | 234 | unsigned pdsk:4 ; /* 8/16 from D_DISKLESS to D_UP_TO_DATE */ |
229 | unsigned susp:1 ; /* 2/2 IO suspended no/yes */ | 235 | unsigned susp:1 ; /* 2/2 IO suspended no/yes (by user) */ |
230 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ | 236 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ |
231 | unsigned peer_isp:1 ; | 237 | unsigned peer_isp:1 ; |
232 | unsigned user_isp:1 ; | 238 | unsigned user_isp:1 ; |
233 | unsigned _pad:11; /* 0 unused */ | 239 | unsigned susp_nod:1 ; /* IO suspended because no data */ |
240 | unsigned susp_fen:1 ; /* IO suspended because fence peer handler runs*/ | ||
241 | unsigned _pad:9; /* 0 unused */ | ||
234 | #elif defined(__BIG_ENDIAN_BITFIELD) | 242 | #elif defined(__BIG_ENDIAN_BITFIELD) |
235 | unsigned _pad:11; /* 0 unused */ | 243 | unsigned _pad:9; |
244 | unsigned susp_fen:1 ; | ||
245 | unsigned susp_nod:1 ; | ||
236 | unsigned user_isp:1 ; | 246 | unsigned user_isp:1 ; |
237 | unsigned peer_isp:1 ; | 247 | unsigned peer_isp:1 ; |
238 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ | 248 | unsigned aftr_isp:1 ; /* isp .. imposed sync pause */ |
@@ -312,6 +322,8 @@ enum drbd_timeout_flag { | |||
312 | 322 | ||
313 | #define DRBD_MAGIC 0x83740267 | 323 | #define DRBD_MAGIC 0x83740267 |
314 | #define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) | 324 | #define BE_DRBD_MAGIC __constant_cpu_to_be32(DRBD_MAGIC) |
325 | #define DRBD_MAGIC_BIG 0x835a | ||
326 | #define BE_DRBD_MAGIC_BIG __constant_cpu_to_be16(DRBD_MAGIC_BIG) | ||
315 | 327 | ||
316 | /* these are of type "int" */ | 328 | /* these are of type "int" */ |
317 | #define DRBD_MD_INDEX_INTERNAL -1 | 329 | #define DRBD_MD_INDEX_INTERNAL -1 |
diff --git a/include/linux/drbd_limits.h b/include/linux/drbd_limits.h index 440b42e38e89..4ac33f34b77e 100644 --- a/include/linux/drbd_limits.h +++ b/include/linux/drbd_limits.h | |||
@@ -128,26 +128,31 @@ | |||
128 | #define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT | 128 | #define DRBD_AFTER_SB_1P_DEF ASB_DISCONNECT |
129 | #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT | 129 | #define DRBD_AFTER_SB_2P_DEF ASB_DISCONNECT |
130 | #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT | 130 | #define DRBD_RR_CONFLICT_DEF ASB_DISCONNECT |
131 | #define DRBD_ON_NO_DATA_DEF OND_IO_ERROR | ||
131 | 132 | ||
132 | #define DRBD_MAX_BIO_BVECS_MIN 0 | 133 | #define DRBD_MAX_BIO_BVECS_MIN 0 |
133 | #define DRBD_MAX_BIO_BVECS_MAX 128 | 134 | #define DRBD_MAX_BIO_BVECS_MAX 128 |
134 | #define DRBD_MAX_BIO_BVECS_DEF 0 | 135 | #define DRBD_MAX_BIO_BVECS_DEF 0 |
135 | 136 | ||
136 | #define DRBD_DP_VOLUME_MIN 4 | 137 | #define DRBD_C_PLAN_AHEAD_MIN 0 |
137 | #define DRBD_DP_VOLUME_MAX 1048576 | 138 | #define DRBD_C_PLAN_AHEAD_MAX 300 |
138 | #define DRBD_DP_VOLUME_DEF 16384 | 139 | #define DRBD_C_PLAN_AHEAD_DEF 0 /* RS rate controller disabled by default */ |
139 | 140 | ||
140 | #define DRBD_DP_INTERVAL_MIN 1 | 141 | #define DRBD_C_DELAY_TARGET_MIN 1 |
141 | #define DRBD_DP_INTERVAL_MAX 600 | 142 | #define DRBD_C_DELAY_TARGET_MAX 100 |
142 | #define DRBD_DP_INTERVAL_DEF 5 | 143 | #define DRBD_C_DELAY_TARGET_DEF 10 |
143 | 144 | ||
144 | #define DRBD_RS_THROTTLE_TH_MIN 1 | 145 | #define DRBD_C_FILL_TARGET_MIN 0 |
145 | #define DRBD_RS_THROTTLE_TH_MAX 600 | 146 | #define DRBD_C_FILL_TARGET_MAX (1<<20) /* 500MByte in sec */ |
146 | #define DRBD_RS_THROTTLE_TH_DEF 20 | 147 | #define DRBD_C_FILL_TARGET_DEF 0 /* By default disabled -> controlled by delay_target */ |
147 | 148 | ||
148 | #define DRBD_RS_HOLD_OFF_TH_MIN 1 | 149 | #define DRBD_C_MAX_RATE_MIN 250 /* kByte/sec */ |
149 | #define DRBD_RS_HOLD_OFF_TH_MAX 6000 | 150 | #define DRBD_C_MAX_RATE_MAX (4 << 20) |
150 | #define DRBD_RS_HOLD_OFF_TH_DEF 100 | 151 | #define DRBD_C_MAX_RATE_DEF 102400 |
152 | |||
153 | #define DRBD_C_MIN_RATE_MIN 0 /* kByte/sec */ | ||
154 | #define DRBD_C_MIN_RATE_MAX (4 << 20) | ||
155 | #define DRBD_C_MIN_RATE_DEF 4096 | ||
151 | 156 | ||
152 | #undef RANGE | 157 | #undef RANGE |
153 | #endif | 158 | #endif |
diff --git a/include/linux/drbd_nl.h b/include/linux/drbd_nl.h index 5f042810a56c..ade91107c9a5 100644 --- a/include/linux/drbd_nl.h +++ b/include/linux/drbd_nl.h | |||
@@ -87,6 +87,12 @@ NL_PACKET(syncer_conf, 8, | |||
87 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) | 87 | NL_STRING( 51, T_MAY_IGNORE, cpu_mask, 32) |
88 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) | 88 | NL_STRING( 64, T_MAY_IGNORE, csums_alg, SHARED_SECRET_MAX) |
89 | NL_BIT( 65, T_MAY_IGNORE, use_rle) | 89 | NL_BIT( 65, T_MAY_IGNORE, use_rle) |
90 | NL_INTEGER( 75, T_MAY_IGNORE, on_no_data) | ||
91 | NL_INTEGER( 76, T_MAY_IGNORE, c_plan_ahead) | ||
92 | NL_INTEGER( 77, T_MAY_IGNORE, c_delay_target) | ||
93 | NL_INTEGER( 78, T_MAY_IGNORE, c_fill_target) | ||
94 | NL_INTEGER( 79, T_MAY_IGNORE, c_max_rate) | ||
95 | NL_INTEGER( 80, T_MAY_IGNORE, c_min_rate) | ||
90 | ) | 96 | ) |
91 | 97 | ||
92 | NL_PACKET(invalidate, 9, ) | 98 | NL_PACKET(invalidate, 9, ) |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index 52c0da4bdd18..a90b3892074a 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _DYNAMIC_DEBUG_H | 1 | #ifndef _DYNAMIC_DEBUG_H |
2 | #define _DYNAMIC_DEBUG_H | 2 | #define _DYNAMIC_DEBUG_H |
3 | 3 | ||
4 | #include <linux/jump_label.h> | ||
5 | |||
4 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which | 6 | /* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which |
5 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They | 7 | * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They |
6 | * use independent hash functions, to reduce the chance of false positives. | 8 | * use independent hash functions, to reduce the chance of false positives. |
@@ -22,8 +24,6 @@ struct _ddebug { | |||
22 | const char *function; | 24 | const char *function; |
23 | const char *filename; | 25 | const char *filename; |
24 | const char *format; | 26 | const char *format; |
25 | char primary_hash; | ||
26 | char secondary_hash; | ||
27 | unsigned int lineno:24; | 27 | unsigned int lineno:24; |
28 | /* | 28 | /* |
29 | * The flags field controls the behaviour at the callsite. | 29 | * The flags field controls the behaviour at the callsite. |
@@ -33,6 +33,7 @@ struct _ddebug { | |||
33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ | 33 | #define _DPRINTK_FLAGS_PRINT (1<<0) /* printk() a message using the format */ |
34 | #define _DPRINTK_FLAGS_DEFAULT 0 | 34 | #define _DPRINTK_FLAGS_DEFAULT 0 |
35 | unsigned int flags:8; | 35 | unsigned int flags:8; |
36 | char enabled; | ||
36 | } __attribute__((aligned(8))); | 37 | } __attribute__((aligned(8))); |
37 | 38 | ||
38 | 39 | ||
@@ -42,33 +43,35 @@ int ddebug_add_module(struct _ddebug *tab, unsigned int n, | |||
42 | #if defined(CONFIG_DYNAMIC_DEBUG) | 43 | #if defined(CONFIG_DYNAMIC_DEBUG) |
43 | extern int ddebug_remove_module(const char *mod_name); | 44 | extern int ddebug_remove_module(const char *mod_name); |
44 | 45 | ||
45 | #define __dynamic_dbg_enabled(dd) ({ \ | ||
46 | int __ret = 0; \ | ||
47 | if (unlikely((dynamic_debug_enabled & (1LL << DEBUG_HASH)) && \ | ||
48 | (dynamic_debug_enabled2 & (1LL << DEBUG_HASH2)))) \ | ||
49 | if (unlikely(dd.flags)) \ | ||
50 | __ret = 1; \ | ||
51 | __ret; }) | ||
52 | |||
53 | #define dynamic_pr_debug(fmt, ...) do { \ | 46 | #define dynamic_pr_debug(fmt, ...) do { \ |
47 | __label__ do_printk; \ | ||
48 | __label__ out; \ | ||
54 | static struct _ddebug descriptor \ | 49 | static struct _ddebug descriptor \ |
55 | __used \ | 50 | __used \ |
56 | __attribute__((section("__verbose"), aligned(8))) = \ | 51 | __attribute__((section("__verbose"), aligned(8))) = \ |
57 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 52 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
58 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 53 | _DPRINTK_FLAGS_DEFAULT }; \ |
59 | if (__dynamic_dbg_enabled(descriptor)) \ | 54 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
60 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | 55 | goto out; \ |
56 | do_printk: \ | ||
57 | printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); \ | ||
58 | out: ; \ | ||
61 | } while (0) | 59 | } while (0) |
62 | 60 | ||
63 | 61 | ||
64 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ | 62 | #define dynamic_dev_dbg(dev, fmt, ...) do { \ |
63 | __label__ do_printk; \ | ||
64 | __label__ out; \ | ||
65 | static struct _ddebug descriptor \ | 65 | static struct _ddebug descriptor \ |
66 | __used \ | 66 | __used \ |
67 | __attribute__((section("__verbose"), aligned(8))) = \ | 67 | __attribute__((section("__verbose"), aligned(8))) = \ |
68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, DEBUG_HASH, \ | 68 | { KBUILD_MODNAME, __func__, __FILE__, fmt, __LINE__, \ |
69 | DEBUG_HASH2, __LINE__, _DPRINTK_FLAGS_DEFAULT }; \ | 69 | _DPRINTK_FLAGS_DEFAULT }; \ |
70 | if (__dynamic_dbg_enabled(descriptor)) \ | 70 | JUMP_LABEL(&descriptor.enabled, do_printk); \ |
71 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | 71 | goto out; \ |
72 | do_printk: \ | ||
73 | dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \ | ||
74 | out: ; \ | ||
72 | } while (0) | 75 | } while (0) |
73 | 76 | ||
74 | #else | 77 | #else |
@@ -80,7 +83,7 @@ static inline int ddebug_remove_module(const char *mod) | |||
80 | 83 | ||
81 | #define dynamic_pr_debug(fmt, ...) \ | 84 | #define dynamic_pr_debug(fmt, ...) \ |
82 | do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) | 85 | do { if (0) printk(KERN_DEBUG pr_fmt(fmt), ##__VA_ARGS__); } while (0) |
83 | #define dynamic_dev_dbg(dev, format, ...) \ | 86 | #define dynamic_dev_dbg(dev, fmt, ...) \ |
84 | do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) | 87 | do { if (0) dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); } while (0) |
85 | #endif | 88 | #endif |
86 | 89 | ||
diff --git a/include/linux/early_res.h b/include/linux/early_res.h deleted file mode 100644 index 29c09f57a13c..000000000000 --- a/include/linux/early_res.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | #ifndef _LINUX_EARLY_RES_H | ||
2 | #define _LINUX_EARLY_RES_H | ||
3 | #ifdef __KERNEL__ | ||
4 | |||
5 | extern void reserve_early(u64 start, u64 end, char *name); | ||
6 | extern void reserve_early_overlap_ok(u64 start, u64 end, char *name); | ||
7 | extern void free_early(u64 start, u64 end); | ||
8 | void free_early_partial(u64 start, u64 end); | ||
9 | extern void early_res_to_bootmem(u64 start, u64 end); | ||
10 | |||
11 | void reserve_early_without_check(u64 start, u64 end, char *name); | ||
12 | u64 find_early_area(u64 ei_start, u64 ei_last, u64 start, u64 end, | ||
13 | u64 size, u64 align); | ||
14 | u64 find_early_area_size(u64 ei_start, u64 ei_last, u64 start, | ||
15 | u64 *sizep, u64 align); | ||
16 | u64 find_fw_memmap_area(u64 start, u64 end, u64 size, u64 align); | ||
17 | u64 get_max_mapped(void); | ||
18 | #include <linux/range.h> | ||
19 | int get_free_all_memory_range(struct range **rangep, int nodeid); | ||
20 | |||
21 | #endif /* __KERNEL__ */ | ||
22 | |||
23 | #endif /* _LINUX_EARLY_RES_H */ | ||
diff --git a/include/linux/edac.h b/include/linux/edac.h index 7cf92e8a4196..36c66443bdfd 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #define _LINUX_EDAC_H_ | 13 | #define _LINUX_EDAC_H_ |
14 | 14 | ||
15 | #include <asm/atomic.h> | 15 | #include <asm/atomic.h> |
16 | #include <linux/sysdev.h> | ||
16 | 17 | ||
17 | #define EDAC_OPSTATE_INVAL -1 | 18 | #define EDAC_OPSTATE_INVAL -1 |
18 | #define EDAC_OPSTATE_POLL 0 | 19 | #define EDAC_OPSTATE_POLL 0 |
@@ -22,9 +23,12 @@ | |||
22 | extern int edac_op_state; | 23 | extern int edac_op_state; |
23 | extern int edac_err_assert; | 24 | extern int edac_err_assert; |
24 | extern atomic_t edac_handlers; | 25 | extern atomic_t edac_handlers; |
26 | extern struct sysdev_class edac_class; | ||
25 | 27 | ||
26 | extern int edac_handler_set(void); | 28 | extern int edac_handler_set(void); |
27 | extern void edac_atomic_assert_error(void); | 29 | extern void edac_atomic_assert_error(void); |
30 | extern struct sysdev_class *edac_get_sysfs_class(void); | ||
31 | extern void edac_put_sysfs_class(void); | ||
28 | 32 | ||
29 | static inline void opstate_init(void) | 33 | static inline void opstate_init(void) |
30 | { | 34 | { |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2c958f4fce1e..4fd978e7eb83 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -93,6 +93,7 @@ struct elevator_queue | |||
93 | struct elevator_type *elevator_type; | 93 | struct elevator_type *elevator_type; |
94 | struct mutex sysfs_lock; | 94 | struct mutex sysfs_lock; |
95 | struct hlist_head *hash; | 95 | struct hlist_head *hash; |
96 | unsigned int registered:1; | ||
96 | }; | 97 | }; |
97 | 98 | ||
98 | /* | 99 | /* |
@@ -136,6 +137,7 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t); | |||
136 | 137 | ||
137 | extern int elevator_init(struct request_queue *, char *); | 138 | extern int elevator_init(struct request_queue *, char *); |
138 | extern void elevator_exit(struct elevator_queue *); | 139 | extern void elevator_exit(struct elevator_queue *); |
140 | extern int elevator_change(struct request_queue *, const char *); | ||
139 | extern int elv_rq_merge_ok(struct request *, struct bio *); | 141 | extern int elv_rq_merge_ok(struct request *, struct bio *); |
140 | 142 | ||
141 | /* | 143 | /* |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2308fbb4523a..f16a01081e15 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
@@ -71,7 +71,7 @@ static inline int is_zero_ether_addr(const u8 *addr) | |||
71 | */ | 71 | */ |
72 | static inline int is_multicast_ether_addr(const u8 *addr) | 72 | static inline int is_multicast_ether_addr(const u8 *addr) |
73 | { | 73 | { |
74 | return (0x01 & addr[0]); | 74 | return 0x01 & addr[0]; |
75 | } | 75 | } |
76 | 76 | ||
77 | /** | 77 | /** |
@@ -82,7 +82,7 @@ static inline int is_multicast_ether_addr(const u8 *addr) | |||
82 | */ | 82 | */ |
83 | static inline int is_local_ether_addr(const u8 *addr) | 83 | static inline int is_local_ether_addr(const u8 *addr) |
84 | { | 84 | { |
85 | return (0x02 & addr[0]); | 85 | return 0x02 & addr[0]; |
86 | } | 86 | } |
87 | 87 | ||
88 | /** | 88 | /** |
@@ -237,13 +237,29 @@ static inline bool is_etherdev_addr(const struct net_device *dev, | |||
237 | * entry points. | 237 | * entry points. |
238 | */ | 238 | */ |
239 | 239 | ||
240 | static inline int compare_ether_header(const void *a, const void *b) | 240 | static inline unsigned long compare_ether_header(const void *a, const void *b) |
241 | { | 241 | { |
242 | #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64 | ||
243 | unsigned long fold; | ||
244 | |||
245 | /* | ||
246 | * We want to compare 14 bytes: | ||
247 | * [a0 ... a13] ^ [b0 ... b13] | ||
248 | * Use two long XOR, ORed together, with an overlap of two bytes. | ||
249 | * [a0 a1 a2 a3 a4 a5 a6 a7 ] ^ [b0 b1 b2 b3 b4 b5 b6 b7 ] | | ||
250 | * [a6 a7 a8 a9 a10 a11 a12 a13] ^ [b6 b7 b8 b9 b10 b11 b12 b13] | ||
251 | * This means the [a6 a7] ^ [b6 b7] part is done two times. | ||
252 | */ | ||
253 | fold = *(unsigned long *)a ^ *(unsigned long *)b; | ||
254 | fold |= *(unsigned long *)(a + 6) ^ *(unsigned long *)(b + 6); | ||
255 | return fold; | ||
256 | #else | ||
242 | u32 *a32 = (u32 *)((u8 *)a + 2); | 257 | u32 *a32 = (u32 *)((u8 *)a + 2); |
243 | u32 *b32 = (u32 *)((u8 *)b + 2); | 258 | u32 *b32 = (u32 *)((u8 *)b + 2); |
244 | 259 | ||
245 | return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | | 260 | return (*(u16 *)a ^ *(u16 *)b) | (a32[0] ^ b32[0]) | |
246 | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); | 261 | (a32[1] ^ b32[1]) | (a32[2] ^ b32[2]); |
262 | #endif | ||
247 | } | 263 | } |
248 | 264 | ||
249 | #endif /* _LINUX_ETHERDEVICE_H */ | 265 | #endif /* _LINUX_ETHERDEVICE_H */ |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index 991269e5b152..6628a507fd3b 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define _LINUX_ETHTOOL_H | 14 | #define _LINUX_ETHTOOL_H |
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/if_ether.h> | ||
17 | 18 | ||
18 | /* This should work for both 32 and 64 bit userland. */ | 19 | /* This should work for both 32 and 64 bit userland. */ |
19 | struct ethtool_cmd { | 20 | struct ethtool_cmd { |
@@ -308,15 +309,28 @@ struct ethtool_perm_addr { | |||
308 | * flag differs from the read-only value. | 309 | * flag differs from the read-only value. |
309 | */ | 310 | */ |
310 | enum ethtool_flags { | 311 | enum ethtool_flags { |
312 | ETH_FLAG_TXVLAN = (1 << 7), /* TX VLAN offload enabled */ | ||
313 | ETH_FLAG_RXVLAN = (1 << 8), /* RX VLAN offload enabled */ | ||
311 | ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ | 314 | ETH_FLAG_LRO = (1 << 15), /* LRO is enabled */ |
312 | ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ | 315 | ETH_FLAG_NTUPLE = (1 << 27), /* N-tuple filters enabled */ |
313 | ETH_FLAG_RXHASH = (1 << 28), | 316 | ETH_FLAG_RXHASH = (1 << 28), |
314 | }; | 317 | }; |
315 | 318 | ||
316 | /* The following structures are for supporting RX network flow | 319 | /* The following structures are for supporting RX network flow |
317 | * classification configuration. Note, all multibyte fields, e.g., | 320 | * classification and RX n-tuple configuration. Note, all multibyte |
318 | * ip4src, ip4dst, psrc, pdst, spi, etc. are expected to be in network | 321 | * fields, e.g., ip4src, ip4dst, psrc, pdst, spi, etc. are expected to |
319 | * byte order. | 322 | * be in network byte order. |
323 | */ | ||
324 | |||
325 | /** | ||
326 | * struct ethtool_tcpip4_spec - flow specification for TCP/IPv4 etc. | ||
327 | * @ip4src: Source host | ||
328 | * @ip4dst: Destination host | ||
329 | * @psrc: Source port | ||
330 | * @pdst: Destination port | ||
331 | * @tos: Type-of-service | ||
332 | * | ||
333 | * This can be used to specify a TCP/IPv4, UDP/IPv4 or SCTP/IPv4 flow. | ||
320 | */ | 334 | */ |
321 | struct ethtool_tcpip4_spec { | 335 | struct ethtool_tcpip4_spec { |
322 | __be32 ip4src; | 336 | __be32 ip4src; |
@@ -326,6 +340,15 @@ struct ethtool_tcpip4_spec { | |||
326 | __u8 tos; | 340 | __u8 tos; |
327 | }; | 341 | }; |
328 | 342 | ||
343 | /** | ||
344 | * struct ethtool_ah_espip4_spec - flow specification for IPsec/IPv4 | ||
345 | * @ip4src: Source host | ||
346 | * @ip4dst: Destination host | ||
347 | * @spi: Security parameters index | ||
348 | * @tos: Type-of-service | ||
349 | * | ||
350 | * This can be used to specify an IPsec transport or tunnel over IPv4. | ||
351 | */ | ||
329 | struct ethtool_ah_espip4_spec { | 352 | struct ethtool_ah_espip4_spec { |
330 | __be32 ip4src; | 353 | __be32 ip4src; |
331 | __be32 ip4dst; | 354 | __be32 ip4dst; |
@@ -333,21 +356,17 @@ struct ethtool_ah_espip4_spec { | |||
333 | __u8 tos; | 356 | __u8 tos; |
334 | }; | 357 | }; |
335 | 358 | ||
336 | struct ethtool_rawip4_spec { | ||
337 | __be32 ip4src; | ||
338 | __be32 ip4dst; | ||
339 | __u8 hdata[64]; | ||
340 | }; | ||
341 | |||
342 | struct ethtool_ether_spec { | ||
343 | __be16 ether_type; | ||
344 | __u8 frame_size; | ||
345 | __u8 eframe[16]; | ||
346 | }; | ||
347 | |||
348 | #define ETH_RX_NFC_IP4 1 | 359 | #define ETH_RX_NFC_IP4 1 |
349 | #define ETH_RX_NFC_IP6 2 | ||
350 | 360 | ||
361 | /** | ||
362 | * struct ethtool_usrip4_spec - general flow specification for IPv4 | ||
363 | * @ip4src: Source host | ||
364 | * @ip4dst: Destination host | ||
365 | * @l4_4_bytes: First 4 bytes of transport (layer 4) header | ||
366 | * @tos: Type-of-service | ||
367 | * @ip_ver: Value must be %ETH_RX_NFC_IP4; mask must be 0 | ||
368 | * @proto: Transport protocol number; mask must be 0 | ||
369 | */ | ||
351 | struct ethtool_usrip4_spec { | 370 | struct ethtool_usrip4_spec { |
352 | __be32 ip4src; | 371 | __be32 ip4src; |
353 | __be32 ip4dst; | 372 | __be32 ip4dst; |
@@ -357,6 +376,15 @@ struct ethtool_usrip4_spec { | |||
357 | __u8 proto; | 376 | __u8 proto; |
358 | }; | 377 | }; |
359 | 378 | ||
379 | /** | ||
380 | * struct ethtool_rx_flow_spec - specification for RX flow filter | ||
381 | * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW | ||
382 | * @h_u: Flow fields to match (dependent on @flow_type) | ||
383 | * @m_u: Masks for flow field bits to be ignored | ||
384 | * @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC | ||
385 | * if packets should be discarded | ||
386 | * @location: Index of filter in hardware table | ||
387 | */ | ||
360 | struct ethtool_rx_flow_spec { | 388 | struct ethtool_rx_flow_spec { |
361 | __u32 flow_type; | 389 | __u32 flow_type; |
362 | union { | 390 | union { |
@@ -365,36 +393,91 @@ struct ethtool_rx_flow_spec { | |||
365 | struct ethtool_tcpip4_spec sctp_ip4_spec; | 393 | struct ethtool_tcpip4_spec sctp_ip4_spec; |
366 | struct ethtool_ah_espip4_spec ah_ip4_spec; | 394 | struct ethtool_ah_espip4_spec ah_ip4_spec; |
367 | struct ethtool_ah_espip4_spec esp_ip4_spec; | 395 | struct ethtool_ah_espip4_spec esp_ip4_spec; |
368 | struct ethtool_rawip4_spec raw_ip4_spec; | ||
369 | struct ethtool_ether_spec ether_spec; | ||
370 | struct ethtool_usrip4_spec usr_ip4_spec; | 396 | struct ethtool_usrip4_spec usr_ip4_spec; |
371 | __u8 hdata[64]; | 397 | struct ethhdr ether_spec; |
372 | } h_u, m_u; /* entry, mask */ | 398 | __u8 hdata[72]; |
399 | } h_u, m_u; | ||
373 | __u64 ring_cookie; | 400 | __u64 ring_cookie; |
374 | __u32 location; | 401 | __u32 location; |
375 | }; | 402 | }; |
376 | 403 | ||
404 | /** | ||
405 | * struct ethtool_rxnfc - command to get or set RX flow classification rules | ||
406 | * @cmd: Specific command number - %ETHTOOL_GRXFH, %ETHTOOL_SRXFH, | ||
407 | * %ETHTOOL_GRXRINGS, %ETHTOOL_GRXCLSRLCNT, %ETHTOOL_GRXCLSRULE, | ||
408 | * %ETHTOOL_GRXCLSRLALL, %ETHTOOL_SRXCLSRLDEL or %ETHTOOL_SRXCLSRLINS | ||
409 | * @flow_type: Type of flow to be affected, e.g. %TCP_V4_FLOW | ||
410 | * @data: Command-dependent value | ||
411 | * @fs: Flow filter specification | ||
412 | * @rule_cnt: Number of rules to be affected | ||
413 | * @rule_locs: Array of valid rule indices | ||
414 | * | ||
415 | * For %ETHTOOL_GRXFH and %ETHTOOL_SRXFH, @data is a bitmask indicating | ||
416 | * the fields included in the flow hash, e.g. %RXH_IP_SRC. The following | ||
417 | * structure fields must not be used. | ||
418 | * | ||
419 | * For %ETHTOOL_GRXRINGS, @data is set to the number of RX rings/queues | ||
420 | * on return. | ||
421 | * | ||
422 | * For %ETHTOOL_GRXCLSRLCNT, @rule_cnt is set to the number of defined | ||
423 | * rules on return. | ||
424 | * | ||
425 | * For %ETHTOOL_GRXCLSRULE, @fs.@location specifies the index of an | ||
426 | * existing filter rule on entry and @fs contains the rule on return. | ||
427 | * | ||
428 | * For %ETHTOOL_GRXCLSRLALL, @rule_cnt specifies the array size of the | ||
429 | * user buffer for @rule_locs on entry. On return, @data is the size | ||
430 | * of the filter table and @rule_locs contains the indices of the | ||
431 | * defined rules. | ||
432 | * | ||
433 | * For %ETHTOOL_SRXCLSRLINS, @fs specifies the filter rule to add or | ||
434 | * update. @fs.@location specifies the index to use and must not be | ||
435 | * ignored. | ||
436 | * | ||
437 | * For %ETHTOOL_SRXCLSRLDEL, @fs.@location specifies the index of an | ||
438 | * existing filter rule on entry. | ||
439 | * | ||
440 | * Implementation of indexed classification rules generally requires a | ||
441 | * TCAM. | ||
442 | */ | ||
377 | struct ethtool_rxnfc { | 443 | struct ethtool_rxnfc { |
378 | __u32 cmd; | 444 | __u32 cmd; |
379 | __u32 flow_type; | 445 | __u32 flow_type; |
380 | /* The rx flow hash value or the rule DB size */ | ||
381 | __u64 data; | 446 | __u64 data; |
382 | /* The following fields are not valid and must not be used for | ||
383 | * the ETHTOOL_{G,X}RXFH commands. */ | ||
384 | struct ethtool_rx_flow_spec fs; | 447 | struct ethtool_rx_flow_spec fs; |
385 | __u32 rule_cnt; | 448 | __u32 rule_cnt; |
386 | __u32 rule_locs[0]; | 449 | __u32 rule_locs[0]; |
387 | }; | 450 | }; |
388 | 451 | ||
452 | /** | ||
453 | * struct ethtool_rxfh_indir - command to get or set RX flow hash indirection | ||
454 | * @cmd: Specific command number - %ETHTOOL_GRXFHINDIR or %ETHTOOL_SRXFHINDIR | ||
455 | * @size: On entry, the array size of the user buffer. On return from | ||
456 | * %ETHTOOL_GRXFHINDIR, the array size of the hardware indirection table. | ||
457 | * @ring_index: RX ring/queue index for each hash value | ||
458 | */ | ||
389 | struct ethtool_rxfh_indir { | 459 | struct ethtool_rxfh_indir { |
390 | __u32 cmd; | 460 | __u32 cmd; |
391 | /* On entry, this is the array size of the user buffer. On | ||
392 | * return from ETHTOOL_GRXFHINDIR, this is the array size of | ||
393 | * the hardware indirection table. */ | ||
394 | __u32 size; | 461 | __u32 size; |
395 | __u32 ring_index[0]; /* ring/queue index for each hash value */ | 462 | __u32 ring_index[0]; |
396 | }; | 463 | }; |
397 | 464 | ||
465 | /** | ||
466 | * struct ethtool_rx_ntuple_flow_spec - specification for RX flow filter | ||
467 | * @flow_type: Type of match to perform, e.g. %TCP_V4_FLOW | ||
468 | * @h_u: Flow field values to match (dependent on @flow_type) | ||
469 | * @m_u: Masks for flow field value bits to be ignored | ||
470 | * @vlan_tag: VLAN tag to match | ||
471 | * @vlan_tag_mask: Mask for VLAN tag bits to be ignored | ||
472 | * @data: Driver-dependent data to match | ||
473 | * @data_mask: Mask for driver-dependent data bits to be ignored | ||
474 | * @action: RX ring/queue index to deliver to (non-negative) or other action | ||
475 | * (negative, e.g. %ETHTOOL_RXNTUPLE_ACTION_DROP) | ||
476 | * | ||
477 | * For flow types %TCP_V4_FLOW, %UDP_V4_FLOW and %SCTP_V4_FLOW, where | ||
478 | * a field value and mask are both zero this is treated as if all mask | ||
479 | * bits are set i.e. the field is ignored. | ||
480 | */ | ||
398 | struct ethtool_rx_ntuple_flow_spec { | 481 | struct ethtool_rx_ntuple_flow_spec { |
399 | __u32 flow_type; | 482 | __u32 flow_type; |
400 | union { | 483 | union { |
@@ -403,22 +486,26 @@ struct ethtool_rx_ntuple_flow_spec { | |||
403 | struct ethtool_tcpip4_spec sctp_ip4_spec; | 486 | struct ethtool_tcpip4_spec sctp_ip4_spec; |
404 | struct ethtool_ah_espip4_spec ah_ip4_spec; | 487 | struct ethtool_ah_espip4_spec ah_ip4_spec; |
405 | struct ethtool_ah_espip4_spec esp_ip4_spec; | 488 | struct ethtool_ah_espip4_spec esp_ip4_spec; |
406 | struct ethtool_rawip4_spec raw_ip4_spec; | ||
407 | struct ethtool_ether_spec ether_spec; | ||
408 | struct ethtool_usrip4_spec usr_ip4_spec; | 489 | struct ethtool_usrip4_spec usr_ip4_spec; |
409 | __u8 hdata[64]; | 490 | struct ethhdr ether_spec; |
410 | } h_u, m_u; /* entry, mask */ | 491 | __u8 hdata[72]; |
492 | } h_u, m_u; | ||
411 | 493 | ||
412 | __u16 vlan_tag; | 494 | __u16 vlan_tag; |
413 | __u16 vlan_tag_mask; | 495 | __u16 vlan_tag_mask; |
414 | __u64 data; /* user-defined flow spec data */ | 496 | __u64 data; |
415 | __u64 data_mask; /* user-defined flow spec mask */ | 497 | __u64 data_mask; |
416 | 498 | ||
417 | /* signed to distinguish between queue and actions (DROP) */ | ||
418 | __s32 action; | 499 | __s32 action; |
419 | #define ETHTOOL_RXNTUPLE_ACTION_DROP -1 | 500 | #define ETHTOOL_RXNTUPLE_ACTION_DROP (-1) /* drop packet */ |
501 | #define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) /* clear filter */ | ||
420 | }; | 502 | }; |
421 | 503 | ||
504 | /** | ||
505 | * struct ethtool_rx_ntuple - command to set or clear RX flow filter | ||
506 | * @cmd: Command number - %ETHTOOL_SRXNTUPLE | ||
507 | * @fs: Flow filter specification | ||
508 | */ | ||
422 | struct ethtool_rx_ntuple { | 509 | struct ethtool_rx_ntuple { |
423 | __u32 cmd; | 510 | __u32 cmd; |
424 | struct ethtool_rx_ntuple_flow_spec fs; | 511 | struct ethtool_rx_ntuple_flow_spec fs; |
@@ -759,22 +846,23 @@ struct ethtool_ops { | |||
759 | #define WAKE_MAGIC (1 << 5) | 846 | #define WAKE_MAGIC (1 << 5) |
760 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ | 847 | #define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ |
761 | 848 | ||
762 | /* L3-L4 network traffic flow types */ | 849 | /* L2-L4 network traffic flow types */ |
763 | #define TCP_V4_FLOW 0x01 | 850 | #define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */ |
764 | #define UDP_V4_FLOW 0x02 | 851 | #define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */ |
765 | #define SCTP_V4_FLOW 0x03 | 852 | #define SCTP_V4_FLOW 0x03 /* hash or spec (sctp_ip4_spec) */ |
766 | #define AH_ESP_V4_FLOW 0x04 | 853 | #define AH_ESP_V4_FLOW 0x04 /* hash only */ |
767 | #define TCP_V6_FLOW 0x05 | 854 | #define TCP_V6_FLOW 0x05 /* hash only */ |
768 | #define UDP_V6_FLOW 0x06 | 855 | #define UDP_V6_FLOW 0x06 /* hash only */ |
769 | #define SCTP_V6_FLOW 0x07 | 856 | #define SCTP_V6_FLOW 0x07 /* hash only */ |
770 | #define AH_ESP_V6_FLOW 0x08 | 857 | #define AH_ESP_V6_FLOW 0x08 /* hash only */ |
771 | #define AH_V4_FLOW 0x09 | 858 | #define AH_V4_FLOW 0x09 /* hash or spec (ah_ip4_spec) */ |
772 | #define ESP_V4_FLOW 0x0a | 859 | #define ESP_V4_FLOW 0x0a /* hash or spec (esp_ip4_spec) */ |
773 | #define AH_V6_FLOW 0x0b | 860 | #define AH_V6_FLOW 0x0b /* hash only */ |
774 | #define ESP_V6_FLOW 0x0c | 861 | #define ESP_V6_FLOW 0x0c /* hash only */ |
775 | #define IP_USER_FLOW 0x0d | 862 | #define IP_USER_FLOW 0x0d /* spec only (usr_ip4_spec) */ |
776 | #define IPV4_FLOW 0x10 | 863 | #define IPV4_FLOW 0x10 /* hash only */ |
777 | #define IPV6_FLOW 0x11 | 864 | #define IPV6_FLOW 0x11 /* hash only */ |
865 | #define ETHER_FLOW 0x12 /* spec only (ether_spec) */ | ||
778 | 866 | ||
779 | /* L3-L4 network traffic flow hash options */ | 867 | /* L3-L4 network traffic flow hash options */ |
780 | #define RXH_L2DA (1 << 1) | 868 | #define RXH_L2DA (1 << 1) |
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h index a9cd507f8cd2..28028988c862 100644 --- a/include/linux/exportfs.h +++ b/include/linux/exportfs.h | |||
@@ -67,6 +67,19 @@ enum fid_type { | |||
67 | * 32 bit parent block number, 32 bit parent generation number | 67 | * 32 bit parent block number, 32 bit parent generation number |
68 | */ | 68 | */ |
69 | FILEID_UDF_WITH_PARENT = 0x52, | 69 | FILEID_UDF_WITH_PARENT = 0x52, |
70 | |||
71 | /* | ||
72 | * 64 bit checkpoint number, 64 bit inode number, | ||
73 | * 32 bit generation number. | ||
74 | */ | ||
75 | FILEID_NILFS_WITHOUT_PARENT = 0x61, | ||
76 | |||
77 | /* | ||
78 | * 64 bit checkpoint number, 64 bit inode number, | ||
79 | * 32 bit generation number, 32 bit parent generation. | ||
80 | * 64 bit parent inode number. | ||
81 | */ | ||
82 | FILEID_NILFS_WITH_PARENT = 0x62, | ||
70 | }; | 83 | }; |
71 | 84 | ||
72 | struct fid { | 85 | struct fid { |
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index f0949a57ca9d..63531a6b4d2a 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h | |||
@@ -65,14 +65,14 @@ | |||
65 | FAN_ALL_PERM_EVENTS |\ | 65 | FAN_ALL_PERM_EVENTS |\ |
66 | FAN_Q_OVERFLOW) | 66 | FAN_Q_OVERFLOW) |
67 | 67 | ||
68 | #define FANOTIFY_METADATA_VERSION 1 | 68 | #define FANOTIFY_METADATA_VERSION 2 |
69 | 69 | ||
70 | struct fanotify_event_metadata { | 70 | struct fanotify_event_metadata { |
71 | __u32 event_len; | 71 | __u32 event_len; |
72 | __u32 vers; | 72 | __u32 vers; |
73 | __s32 fd; | ||
74 | __u64 mask; | 73 | __u64 mask; |
75 | __s64 pid; | 74 | __s32 fd; |
75 | __s32 pid; | ||
76 | } __attribute__ ((packed)); | 76 | } __attribute__ ((packed)); |
77 | 77 | ||
78 | struct fanotify_response { | 78 | struct fanotify_response { |
@@ -95,11 +95,4 @@ struct fanotify_response { | |||
95 | (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ | 95 | (long)(meta)->event_len >= (long)FAN_EVENT_METADATA_LEN && \ |
96 | (long)(meta)->event_len <= (long)(len)) | 96 | (long)(meta)->event_len <= (long)(len)) |
97 | 97 | ||
98 | #ifdef __KERNEL__ | ||
99 | |||
100 | struct fanotify_wait { | ||
101 | struct fsnotify_event *event; | ||
102 | __s32 fd; | ||
103 | }; | ||
104 | #endif /* __KERNEL__ */ | ||
105 | #endif /* _LINUX_FANOTIFY_H */ | 98 | #endif /* _LINUX_FANOTIFY_H */ |
diff --git a/include/linux/fdreg.h b/include/linux/fdreg.h index c2eeb63b72db..61ce64169004 100644 --- a/include/linux/fdreg.h +++ b/include/linux/fdreg.h | |||
@@ -89,7 +89,7 @@ | |||
89 | /* the following commands are new in the 82078. They are not used in the | 89 | /* the following commands are new in the 82078. They are not used in the |
90 | * floppy driver, except the first three. These commands may be useful for apps | 90 | * floppy driver, except the first three. These commands may be useful for apps |
91 | * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at | 91 | * which use the FDRAWCMD interface. For doc, get the 82078 spec sheets at |
92 | * http://www-techdoc.intel.com/docs/periph/fd_contr/datasheets/ */ | 92 | * http://www.intel.com/design/archives/periphrl/docs/29046803.htm */ |
93 | 93 | ||
94 | #define FD_PARTID 0x18 /* part id ("extended" version cmd) */ | 94 | #define FD_PARTID 0x18 /* part id ("extended" version cmd) */ |
95 | #define FD_SAVE 0x2e /* save fdc regs for later restore */ | 95 | #define FD_SAVE 0x2e /* save fdc regs for later restore */ |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index f59ed297b661..133c0ba25e30 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -31,7 +31,7 @@ struct embedded_fd_set { | |||
31 | 31 | ||
32 | struct fdtable { | 32 | struct fdtable { |
33 | unsigned int max_fds; | 33 | unsigned int max_fds; |
34 | struct file ** fd; /* current fd array */ | 34 | struct file __rcu **fd; /* current fd array */ |
35 | fd_set *close_on_exec; | 35 | fd_set *close_on_exec; |
36 | fd_set *open_fds; | 36 | fd_set *open_fds; |
37 | struct rcu_head rcu; | 37 | struct rcu_head rcu; |
@@ -46,7 +46,7 @@ struct files_struct { | |||
46 | * read mostly part | 46 | * read mostly part |
47 | */ | 47 | */ |
48 | atomic_t count; | 48 | atomic_t count; |
49 | struct fdtable *fdt; | 49 | struct fdtable __rcu *fdt; |
50 | struct fdtable fdtab; | 50 | struct fdtable fdtab; |
51 | /* | 51 | /* |
52 | * written part on a separate cache line in SMP | 52 | * written part on a separate cache line in SMP |
@@ -55,7 +55,7 @@ struct files_struct { | |||
55 | int next_fd; | 55 | int next_fd; |
56 | struct embedded_fd_set close_on_exec_init; | 56 | struct embedded_fd_set close_on_exec_init; |
57 | struct embedded_fd_set open_fds_init; | 57 | struct embedded_fd_set open_fds_init; |
58 | struct file * fd_array[NR_OPEN_DEFAULT]; | 58 | struct file __rcu * fd_array[NR_OPEN_DEFAULT]; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ | 61 | #define rcu_dereference_check_fdtable(files, fdtfd) \ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 9a96b4d83fc1..4f34ff6e5558 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -125,9 +125,6 @@ struct inodes_stat_t { | |||
125 | * block layer could (in theory) choose to ignore this | 125 | * block layer could (in theory) choose to ignore this |
126 | * request if it runs into resource problems. | 126 | * request if it runs into resource problems. |
127 | * WRITE A normal async write. Device will be plugged. | 127 | * WRITE A normal async write. Device will be plugged. |
128 | * SWRITE Like WRITE, but a special case for ll_rw_block() that | ||
129 | * tells it to lock the buffer first. Normally a buffer | ||
130 | * must be locked before doing IO. | ||
131 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down | 128 | * WRITE_SYNC_PLUG Synchronous write. Identical to WRITE, but passes down |
132 | * the hint that someone will be waiting on this IO | 129 | * the hint that someone will be waiting on this IO |
133 | * shortly. The device must still be unplugged explicitly, | 130 | * shortly. The device must still be unplugged explicitly, |
@@ -138,15 +135,12 @@ struct inodes_stat_t { | |||
138 | * immediately after submission. The write equivalent | 135 | * immediately after submission. The write equivalent |
139 | * of READ_SYNC. | 136 | * of READ_SYNC. |
140 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. | 137 | * WRITE_ODIRECT_PLUG Special case write for O_DIRECT only. |
141 | * SWRITE_SYNC | 138 | * WRITE_FLUSH Like WRITE_SYNC but with preceding cache flush. |
142 | * SWRITE_SYNC_PLUG Like WRITE_SYNC/WRITE_SYNC_PLUG, but locks the buffer. | 139 | * WRITE_FUA Like WRITE_SYNC but data is guaranteed to be on |
143 | * See SWRITE. | 140 | * non-volatile media on completion. |
144 | * WRITE_BARRIER Like WRITE_SYNC, but tells the block layer that all | 141 | * WRITE_FLUSH_FUA Combination of WRITE_FLUSH and FUA. The IO is preceded |
145 | * previously submitted writes must be safely on storage | 142 | * by a cache flush and data is guaranteed to be on |
146 | * before this one is started. Also guarantees that when | 143 | * non-volatile media on completion. |
147 | * this write is complete, it itself is also safely on | ||
148 | * storage. Prevents reordering of writes on both sides | ||
149 | * of this IO. | ||
150 | * | 144 | * |
151 | */ | 145 | */ |
152 | #define RW_MASK REQ_WRITE | 146 | #define RW_MASK REQ_WRITE |
@@ -155,7 +149,6 @@ struct inodes_stat_t { | |||
155 | #define READ 0 | 149 | #define READ 0 |
156 | #define WRITE RW_MASK | 150 | #define WRITE RW_MASK |
157 | #define READA RWA_MASK | 151 | #define READA RWA_MASK |
158 | #define SWRITE (WRITE | READA) | ||
159 | 152 | ||
160 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) | 153 | #define READ_SYNC (READ | REQ_SYNC | REQ_UNPLUG) |
161 | #define READ_META (READ | REQ_META) | 154 | #define READ_META (READ | REQ_META) |
@@ -163,18 +156,12 @@ struct inodes_stat_t { | |||
163 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | 156 | #define WRITE_SYNC (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) |
164 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) | 157 | #define WRITE_ODIRECT_PLUG (WRITE | REQ_SYNC) |
165 | #define WRITE_META (WRITE | REQ_META) | 158 | #define WRITE_META (WRITE | REQ_META) |
166 | #define WRITE_BARRIER (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | 159 | #define WRITE_FLUSH (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
167 | REQ_HARDBARRIER) | 160 | REQ_FLUSH) |
168 | #define SWRITE_SYNC_PLUG (SWRITE | REQ_SYNC | REQ_NOIDLE) | 161 | #define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ |
169 | #define SWRITE_SYNC (SWRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG) | 162 | REQ_FUA) |
170 | 163 | #define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_UNPLUG | \ | |
171 | /* | 164 | REQ_FLUSH | REQ_FUA) |
172 | * These aren't really reads or writes, they pass down information about | ||
173 | * parts of device that are now unused by the file system. | ||
174 | */ | ||
175 | #define DISCARD_NOBARRIER (WRITE | REQ_DISCARD) | ||
176 | #define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER) | ||
177 | #define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE) | ||
178 | 165 | ||
179 | #define SEL_IN 1 | 166 | #define SEL_IN 1 |
180 | #define SEL_OUT 2 | 167 | #define SEL_OUT 2 |
@@ -929,6 +916,9 @@ struct file { | |||
929 | #define f_vfsmnt f_path.mnt | 916 | #define f_vfsmnt f_path.mnt |
930 | const struct file_operations *f_op; | 917 | const struct file_operations *f_op; |
931 | spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ | 918 | spinlock_t f_lock; /* f_ep_links, f_flags, no IRQ */ |
919 | #ifdef CONFIG_SMP | ||
920 | int f_sb_list_cpu; | ||
921 | #endif | ||
932 | atomic_long_t f_count; | 922 | atomic_long_t f_count; |
933 | unsigned int f_flags; | 923 | unsigned int f_flags; |
934 | fmode_t f_mode; | 924 | fmode_t f_mode; |
@@ -953,9 +943,6 @@ struct file { | |||
953 | unsigned long f_mnt_write_state; | 943 | unsigned long f_mnt_write_state; |
954 | #endif | 944 | #endif |
955 | }; | 945 | }; |
956 | extern spinlock_t files_lock; | ||
957 | #define file_list_lock() spin_lock(&files_lock); | ||
958 | #define file_list_unlock() spin_unlock(&files_lock); | ||
959 | 946 | ||
960 | #define get_file(x) atomic_long_inc(&(x)->f_count) | 947 | #define get_file(x) atomic_long_inc(&(x)->f_count) |
961 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) | 948 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) |
@@ -1140,6 +1127,8 @@ extern int vfs_setlease(struct file *, long, struct file_lock **); | |||
1140 | extern int lease_modify(struct file_lock **, int); | 1127 | extern int lease_modify(struct file_lock **, int); |
1141 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); | 1128 | extern int lock_may_read(struct inode *, loff_t start, unsigned long count); |
1142 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); | 1129 | extern int lock_may_write(struct inode *, loff_t start, unsigned long count); |
1130 | extern void lock_flocks(void); | ||
1131 | extern void unlock_flocks(void); | ||
1143 | #else /* !CONFIG_FILE_LOCKING */ | 1132 | #else /* !CONFIG_FILE_LOCKING */ |
1144 | static inline int fcntl_getlk(struct file *file, struct flock __user *user) | 1133 | static inline int fcntl_getlk(struct file *file, struct flock __user *user) |
1145 | { | 1134 | { |
@@ -1282,6 +1271,14 @@ static inline int lock_may_write(struct inode *inode, loff_t start, | |||
1282 | return 1; | 1271 | return 1; |
1283 | } | 1272 | } |
1284 | 1273 | ||
1274 | static inline void lock_flocks(void) | ||
1275 | { | ||
1276 | } | ||
1277 | |||
1278 | static inline void unlock_flocks(void) | ||
1279 | { | ||
1280 | } | ||
1281 | |||
1285 | #endif /* !CONFIG_FILE_LOCKING */ | 1282 | #endif /* !CONFIG_FILE_LOCKING */ |
1286 | 1283 | ||
1287 | 1284 | ||
@@ -1346,7 +1343,11 @@ struct super_block { | |||
1346 | 1343 | ||
1347 | struct list_head s_inodes; /* all inodes */ | 1344 | struct list_head s_inodes; /* all inodes */ |
1348 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ | 1345 | struct hlist_head s_anon; /* anonymous dentries for (nfs) exporting */ |
1346 | #ifdef CONFIG_SMP | ||
1347 | struct list_head __percpu *s_files; | ||
1348 | #else | ||
1349 | struct list_head s_files; | 1349 | struct list_head s_files; |
1350 | #endif | ||
1350 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ | 1351 | /* s_dentry_lru and s_nr_dentry_unused are protected by dcache_lock */ |
1351 | struct list_head s_dentry_lru; /* unused dentry lru */ | 1352 | struct list_head s_dentry_lru; /* unused dentry lru */ |
1352 | int s_nr_dentry_unused; /* # of dentry on lru */ | 1353 | int s_nr_dentry_unused; /* # of dentry on lru */ |
@@ -1385,7 +1386,7 @@ struct super_block { | |||
1385 | * Saved mount options for lazy filesystems using | 1386 | * Saved mount options for lazy filesystems using |
1386 | * generic_show_options() | 1387 | * generic_show_options() |
1387 | */ | 1388 | */ |
1388 | char *s_options; | 1389 | char __rcu *s_options; |
1389 | }; | 1390 | }; |
1390 | 1391 | ||
1391 | extern struct timespec current_fs_time(struct super_block *sb); | 1392 | extern struct timespec current_fs_time(struct super_block *sb); |
@@ -2197,8 +2198,6 @@ static inline void insert_inode_hash(struct inode *inode) { | |||
2197 | __insert_inode_hash(inode, inode->i_ino); | 2198 | __insert_inode_hash(inode, inode->i_ino); |
2198 | } | 2199 | } |
2199 | 2200 | ||
2200 | extern void file_move(struct file *f, struct list_head *list); | ||
2201 | extern void file_kill(struct file *f); | ||
2202 | #ifdef CONFIG_BLOCK | 2201 | #ifdef CONFIG_BLOCK |
2203 | extern void submit_bio(int, struct bio *); | 2202 | extern void submit_bio(int, struct bio *); |
2204 | extern int bdev_read_only(struct block_device *); | 2203 | extern int bdev_read_only(struct block_device *); |
@@ -2381,6 +2380,8 @@ extern ssize_t simple_write_to_buffer(void *to, size_t available, loff_t *ppos, | |||
2381 | 2380 | ||
2382 | extern int generic_file_fsync(struct file *, int); | 2381 | extern int generic_file_fsync(struct file *, int); |
2383 | 2382 | ||
2383 | extern int generic_check_addressable(unsigned, u64); | ||
2384 | |||
2384 | #ifdef CONFIG_MIGRATION | 2385 | #ifdef CONFIG_MIGRATION |
2385 | extern int buffer_migrate_page(struct address_space *, | 2386 | extern int buffer_migrate_page(struct address_space *, |
2386 | struct page *, struct page *); | 2387 | struct page *, struct page *); |
@@ -2457,6 +2458,7 @@ static const struct file_operations __fops = { \ | |||
2457 | .release = simple_attr_release, \ | 2458 | .release = simple_attr_release, \ |
2458 | .read = simple_attr_read, \ | 2459 | .read = simple_attr_read, \ |
2459 | .write = simple_attr_write, \ | 2460 | .write = simple_attr_write, \ |
2461 | .llseek = generic_file_llseek, \ | ||
2460 | }; | 2462 | }; |
2461 | 2463 | ||
2462 | static inline void __attribute__((format(printf, 1, 2))) | 2464 | static inline void __attribute__((format(printf, 1, 2))) |
diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h index eca3d5202138..a42b5bf02f8b 100644 --- a/include/linux/fs_struct.h +++ b/include/linux/fs_struct.h | |||
@@ -5,7 +5,7 @@ | |||
5 | 5 | ||
6 | struct fs_struct { | 6 | struct fs_struct { |
7 | int users; | 7 | int users; |
8 | rwlock_t lock; | 8 | spinlock_t lock; |
9 | int umask; | 9 | int umask; |
10 | int in_exec; | 10 | int in_exec; |
11 | struct path root, pwd; | 11 | struct path root, pwd; |
@@ -23,29 +23,29 @@ extern int unshare_fs_struct(void); | |||
23 | 23 | ||
24 | static inline void get_fs_root(struct fs_struct *fs, struct path *root) | 24 | static inline void get_fs_root(struct fs_struct *fs, struct path *root) |
25 | { | 25 | { |
26 | read_lock(&fs->lock); | 26 | spin_lock(&fs->lock); |
27 | *root = fs->root; | 27 | *root = fs->root; |
28 | path_get(root); | 28 | path_get(root); |
29 | read_unlock(&fs->lock); | 29 | spin_unlock(&fs->lock); |
30 | } | 30 | } |
31 | 31 | ||
32 | static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) | 32 | static inline void get_fs_pwd(struct fs_struct *fs, struct path *pwd) |
33 | { | 33 | { |
34 | read_lock(&fs->lock); | 34 | spin_lock(&fs->lock); |
35 | *pwd = fs->pwd; | 35 | *pwd = fs->pwd; |
36 | path_get(pwd); | 36 | path_get(pwd); |
37 | read_unlock(&fs->lock); | 37 | spin_unlock(&fs->lock); |
38 | } | 38 | } |
39 | 39 | ||
40 | static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, | 40 | static inline void get_fs_root_and_pwd(struct fs_struct *fs, struct path *root, |
41 | struct path *pwd) | 41 | struct path *pwd) |
42 | { | 42 | { |
43 | read_lock(&fs->lock); | 43 | spin_lock(&fs->lock); |
44 | *root = fs->root; | 44 | *root = fs->root; |
45 | path_get(root); | 45 | path_get(root); |
46 | *pwd = fs->pwd; | 46 | *pwd = fs->pwd; |
47 | path_get(pwd); | 47 | path_get(pwd); |
48 | read_unlock(&fs->lock); | 48 | spin_unlock(&fs->lock); |
49 | } | 49 | } |
50 | 50 | ||
51 | #endif /* _LINUX_FS_STRUCT_H */ | 51 | #endif /* _LINUX_FS_STRUCT_H */ |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 28e33fea5107..4eb56ed75fbc 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
@@ -58,17 +58,35 @@ enum fsl_usb2_phy_modes { | |||
58 | FSL_USB2_PHY_SERIAL, | 58 | FSL_USB2_PHY_SERIAL, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct clk; | ||
62 | struct platform_device; | ||
63 | |||
61 | struct fsl_usb2_platform_data { | 64 | struct fsl_usb2_platform_data { |
62 | /* board specific information */ | 65 | /* board specific information */ |
63 | enum fsl_usb2_operating_modes operating_mode; | 66 | enum fsl_usb2_operating_modes operating_mode; |
64 | enum fsl_usb2_phy_modes phy_mode; | 67 | enum fsl_usb2_phy_modes phy_mode; |
65 | unsigned int port_enables; | 68 | unsigned int port_enables; |
69 | unsigned int workaround; | ||
70 | |||
71 | int (*init)(struct platform_device *); | ||
72 | void (*exit)(struct platform_device *); | ||
73 | void __iomem *regs; /* ioremap'd register base */ | ||
74 | struct clk *clk; | ||
75 | unsigned big_endian_mmio:1; | ||
76 | unsigned big_endian_desc:1; | ||
77 | unsigned es:1; /* need USBMODE:ES */ | ||
78 | unsigned le_setup_buf:1; | ||
79 | unsigned have_sysif_regs:1; | ||
80 | unsigned invert_drvvbus:1; | ||
81 | unsigned invert_pwr_fault:1; | ||
66 | }; | 82 | }; |
67 | 83 | ||
68 | /* Flags in fsl_usb2_mph_platform_data */ | 84 | /* Flags in fsl_usb2_mph_platform_data */ |
69 | #define FSL_USB2_PORT0_ENABLED 0x00000001 | 85 | #define FSL_USB2_PORT0_ENABLED 0x00000001 |
70 | #define FSL_USB2_PORT1_ENABLED 0x00000002 | 86 | #define FSL_USB2_PORT1_ENABLED 0x00000002 |
71 | 87 | ||
88 | #define FLS_USB2_WORKAROUND_ENGCM09152 (1 << 0) | ||
89 | |||
72 | struct spi_device; | 90 | struct spi_device; |
73 | 91 | ||
74 | struct fsl_spi_platform_data { | 92 | struct fsl_spi_platform_data { |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index ed36fb57c426..e40190d16878 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
@@ -156,6 +156,7 @@ struct fsnotify_group { | |||
156 | struct mutex access_mutex; | 156 | struct mutex access_mutex; |
157 | struct list_head access_list; | 157 | struct list_head access_list; |
158 | wait_queue_head_t access_waitq; | 158 | wait_queue_head_t access_waitq; |
159 | bool bypass_perm; /* protected by access_mutex */ | ||
159 | #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ | 160 | #endif /* CONFIG_FANOTIFY_ACCESS_PERMISSIONS */ |
160 | int f_flags; | 161 | int f_flags; |
161 | } fanotify_data; | 162 | } fanotify_data; |
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 02b8b24f8f51..8beabb958f61 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h | |||
@@ -191,8 +191,8 @@ struct ftrace_event_call { | |||
191 | unsigned int flags; | 191 | unsigned int flags; |
192 | 192 | ||
193 | #ifdef CONFIG_PERF_EVENTS | 193 | #ifdef CONFIG_PERF_EVENTS |
194 | int perf_refcount; | 194 | int perf_refcount; |
195 | struct hlist_head *perf_events; | 195 | struct hlist_head __percpu *perf_events; |
196 | #endif | 196 | #endif |
197 | }; | 197 | }; |
198 | 198 | ||
@@ -252,8 +252,8 @@ DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | |||
252 | 252 | ||
253 | extern int perf_trace_init(struct perf_event *event); | 253 | extern int perf_trace_init(struct perf_event *event); |
254 | extern void perf_trace_destroy(struct perf_event *event); | 254 | extern void perf_trace_destroy(struct perf_event *event); |
255 | extern int perf_trace_enable(struct perf_event *event); | 255 | extern int perf_trace_add(struct perf_event *event, int flags); |
256 | extern void perf_trace_disable(struct perf_event *event); | 256 | extern void perf_trace_del(struct perf_event *event, int flags); |
257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, | 257 | extern int ftrace_profile_set_filter(struct perf_event *event, int event_id, |
258 | char *filter_str); | 258 | char *filter_str); |
259 | extern void ftrace_profile_free_filter(struct perf_event *event); | 259 | extern void ftrace_profile_free_filter(struct perf_event *event); |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5f2f4c4d8fb0..7a7b9c1644e4 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
13 | #include <linux/kdev_t.h> | 13 | #include <linux/kdev_t.h> |
14 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
15 | #include <linux/slab.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_BLOCK | 17 | #ifdef CONFIG_BLOCK |
17 | 18 | ||
@@ -86,7 +87,15 @@ struct disk_stats { | |||
86 | unsigned long io_ticks; | 87 | unsigned long io_ticks; |
87 | unsigned long time_in_queue; | 88 | unsigned long time_in_queue; |
88 | }; | 89 | }; |
89 | 90 | ||
91 | #define PARTITION_META_INFO_VOLNAMELTH 64 | ||
92 | #define PARTITION_META_INFO_UUIDLTH 16 | ||
93 | |||
94 | struct partition_meta_info { | ||
95 | u8 uuid[PARTITION_META_INFO_UUIDLTH]; /* always big endian */ | ||
96 | u8 volname[PARTITION_META_INFO_VOLNAMELTH]; | ||
97 | }; | ||
98 | |||
90 | struct hd_struct { | 99 | struct hd_struct { |
91 | sector_t start_sect; | 100 | sector_t start_sect; |
92 | sector_t nr_sects; | 101 | sector_t nr_sects; |
@@ -95,6 +104,7 @@ struct hd_struct { | |||
95 | struct device __dev; | 104 | struct device __dev; |
96 | struct kobject *holder_dir; | 105 | struct kobject *holder_dir; |
97 | int policy, partno; | 106 | int policy, partno; |
107 | struct partition_meta_info *info; | ||
98 | #ifdef CONFIG_FAIL_MAKE_REQUEST | 108 | #ifdef CONFIG_FAIL_MAKE_REQUEST |
99 | int make_it_fail; | 109 | int make_it_fail; |
100 | #endif | 110 | #endif |
@@ -129,8 +139,8 @@ struct blk_scsi_cmd_filter { | |||
129 | struct disk_part_tbl { | 139 | struct disk_part_tbl { |
130 | struct rcu_head rcu_head; | 140 | struct rcu_head rcu_head; |
131 | int len; | 141 | int len; |
132 | struct hd_struct *last_lookup; | 142 | struct hd_struct __rcu *last_lookup; |
133 | struct hd_struct *part[]; | 143 | struct hd_struct __rcu *part[]; |
134 | }; | 144 | }; |
135 | 145 | ||
136 | struct gendisk { | 146 | struct gendisk { |
@@ -149,7 +159,7 @@ struct gendisk { | |||
149 | * non-critical accesses use RCU. Always access through | 159 | * non-critical accesses use RCU. Always access through |
150 | * helpers. | 160 | * helpers. |
151 | */ | 161 | */ |
152 | struct disk_part_tbl *part_tbl; | 162 | struct disk_part_tbl __rcu *part_tbl; |
153 | struct hd_struct part0; | 163 | struct hd_struct part0; |
154 | 164 | ||
155 | const struct block_device_operations *fops; | 165 | const struct block_device_operations *fops; |
@@ -181,6 +191,30 @@ static inline struct gendisk *part_to_disk(struct hd_struct *part) | |||
181 | return NULL; | 191 | return NULL; |
182 | } | 192 | } |
183 | 193 | ||
194 | static inline void part_pack_uuid(const u8 *uuid_str, u8 *to) | ||
195 | { | ||
196 | int i; | ||
197 | for (i = 0; i < 16; ++i) { | ||
198 | *to++ = (hex_to_bin(*uuid_str) << 4) | | ||
199 | (hex_to_bin(*(uuid_str + 1))); | ||
200 | uuid_str += 2; | ||
201 | switch (i) { | ||
202 | case 3: | ||
203 | case 5: | ||
204 | case 7: | ||
205 | case 9: | ||
206 | uuid_str++; | ||
207 | continue; | ||
208 | } | ||
209 | } | ||
210 | } | ||
211 | |||
212 | static inline char *part_unpack_uuid(const u8 *uuid, char *out) | ||
213 | { | ||
214 | sprintf(out, "%pU", uuid); | ||
215 | return out; | ||
216 | } | ||
217 | |||
184 | static inline int disk_max_parts(struct gendisk *disk) | 218 | static inline int disk_max_parts(struct gendisk *disk) |
185 | { | 219 | { |
186 | if (disk->flags & GENHD_FL_EXT_DEVT) | 220 | if (disk->flags & GENHD_FL_EXT_DEVT) |
@@ -342,6 +376,19 @@ static inline int part_in_flight(struct hd_struct *part) | |||
342 | return part->in_flight[0] + part->in_flight[1]; | 376 | return part->in_flight[0] + part->in_flight[1]; |
343 | } | 377 | } |
344 | 378 | ||
379 | static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk) | ||
380 | { | ||
381 | if (disk) | ||
382 | return kzalloc_node(sizeof(struct partition_meta_info), | ||
383 | GFP_KERNEL, disk->node_id); | ||
384 | return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL); | ||
385 | } | ||
386 | |||
387 | static inline void free_part_info(struct hd_struct *part) | ||
388 | { | ||
389 | kfree(part->info); | ||
390 | } | ||
391 | |||
345 | /* block/blk-core.c */ | 392 | /* block/blk-core.c */ |
346 | extern void part_round_stats(int cpu, struct hd_struct *part); | 393 | extern void part_round_stats(int cpu, struct hd_struct *part); |
347 | 394 | ||
@@ -533,7 +580,9 @@ extern int disk_expand_part_tbl(struct gendisk *disk, int target); | |||
533 | extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); | 580 | extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev); |
534 | extern struct hd_struct * __must_check add_partition(struct gendisk *disk, | 581 | extern struct hd_struct * __must_check add_partition(struct gendisk *disk, |
535 | int partno, sector_t start, | 582 | int partno, sector_t start, |
536 | sector_t len, int flags); | 583 | sector_t len, int flags, |
584 | struct partition_meta_info | ||
585 | *info); | ||
537 | extern void delete_partition(struct gendisk *, int); | 586 | extern void delete_partition(struct gendisk *, int); |
538 | extern void printk_all_partitions(void); | 587 | extern void printk_all_partitions(void); |
539 | 588 | ||
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 03f616b78cfa..e41f7dd1ae67 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | 14 | ||
15 | struct device; | 15 | struct device; |
16 | struct gpio_chip; | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * Some platforms don't support the GPIO programming interface. | 19 | * Some platforms don't support the GPIO programming interface. |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d5b387669dab..8a389b608ce3 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #include <linux/lockdep.h> | 8 | #include <linux/lockdep.h> |
9 | #include <linux/ftrace_irq.h> | 9 | #include <linux/ftrace_irq.h> |
10 | #include <asm/hardirq.h> | 10 | #include <asm/hardirq.h> |
11 | #include <asm/system.h> | ||
12 | 11 | ||
13 | /* | 12 | /* |
14 | * We put the hardirq and softirq counter into the preemption | 13 | * We put the hardirq and softirq counter into the preemption |
@@ -64,6 +63,8 @@ | |||
64 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) | 63 | #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) |
65 | #define NMI_OFFSET (1UL << NMI_SHIFT) | 64 | #define NMI_OFFSET (1UL << NMI_SHIFT) |
66 | 65 | ||
66 | #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) | ||
67 | |||
67 | #ifndef PREEMPT_ACTIVE | 68 | #ifndef PREEMPT_ACTIVE |
68 | #define PREEMPT_ACTIVE_BITS 1 | 69 | #define PREEMPT_ACTIVE_BITS 1 |
69 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) | 70 | #define PREEMPT_ACTIVE_SHIFT (NMI_SHIFT + NMI_BITS) |
@@ -82,10 +83,13 @@ | |||
82 | /* | 83 | /* |
83 | * Are we doing bottom half or hardware interrupt processing? | 84 | * Are we doing bottom half or hardware interrupt processing? |
84 | * Are we in a softirq context? Interrupt context? | 85 | * Are we in a softirq context? Interrupt context? |
86 | * in_softirq - Are we currently processing softirq or have bh disabled? | ||
87 | * in_serving_softirq - Are we currently processing softirq? | ||
85 | */ | 88 | */ |
86 | #define in_irq() (hardirq_count()) | 89 | #define in_irq() (hardirq_count()) |
87 | #define in_softirq() (softirq_count()) | 90 | #define in_softirq() (softirq_count()) |
88 | #define in_interrupt() (irq_count()) | 91 | #define in_interrupt() (irq_count()) |
92 | #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) | ||
89 | 93 | ||
90 | /* | 94 | /* |
91 | * Are we in NMI context? | 95 | * Are we in NMI context? |
@@ -132,14 +136,16 @@ extern void synchronize_irq(unsigned int irq); | |||
132 | 136 | ||
133 | struct task_struct; | 137 | struct task_struct; |
134 | 138 | ||
135 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 139 | #if !defined(CONFIG_VIRT_CPU_ACCOUNTING) && !defined(CONFIG_IRQ_TIME_ACCOUNTING) |
136 | static inline void account_system_vtime(struct task_struct *tsk) | 140 | static inline void account_system_vtime(struct task_struct *tsk) |
137 | { | 141 | { |
138 | } | 142 | } |
143 | #else | ||
144 | extern void account_system_vtime(struct task_struct *tsk); | ||
139 | #endif | 145 | #endif |
140 | 146 | ||
141 | #if defined(CONFIG_NO_HZ) | 147 | #if defined(CONFIG_NO_HZ) |
142 | #if defined(CONFIG_TINY_RCU) | 148 | #if defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
143 | extern void rcu_enter_nohz(void); | 149 | extern void rcu_enter_nohz(void); |
144 | extern void rcu_exit_nohz(void); | 150 | extern void rcu_exit_nohz(void); |
145 | 151 | ||
diff --git a/include/linux/hid.h b/include/linux/hid.h index 42a0f1d11365..bb0f56f5c01e 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -316,6 +316,7 @@ struct hid_item { | |||
316 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 | 316 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 |
317 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 | 317 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 |
318 | #define HID_QUIRK_NO_IGNORE 0x40000000 | 318 | #define HID_QUIRK_NO_IGNORE 0x40000000 |
319 | #define HID_QUIRK_NO_INPUT_SYNC 0x80000000 | ||
319 | 320 | ||
320 | /* | 321 | /* |
321 | * This is the global environment of the parser. This information is | 322 | * This is the global environment of the parser. This information is |
@@ -626,8 +627,8 @@ struct hid_driver { | |||
626 | int (*event)(struct hid_device *hdev, struct hid_field *field, | 627 | int (*event)(struct hid_device *hdev, struct hid_field *field, |
627 | struct hid_usage *usage, __s32 value); | 628 | struct hid_usage *usage, __s32 value); |
628 | 629 | ||
629 | void (*report_fixup)(struct hid_device *hdev, __u8 *buf, | 630 | __u8 *(*report_fixup)(struct hid_device *hdev, __u8 *buf, |
630 | unsigned int size); | 631 | unsigned int *size); |
631 | 632 | ||
632 | int (*input_mapping)(struct hid_device *hdev, | 633 | int (*input_mapping)(struct hid_device *hdev, |
633 | struct hid_input *hidinput, struct hid_field *field, | 634 | struct hid_input *hidinput, struct hid_field *field, |
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h index bb6f58baf319..a3f481a3063b 100644 --- a/include/linux/hiddev.h +++ b/include/linux/hiddev.h | |||
@@ -226,8 +226,6 @@ void hiddev_disconnect(struct hid_device *); | |||
226 | void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, | 226 | void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, |
227 | struct hid_usage *usage, __s32 value); | 227 | struct hid_usage *usage, __s32 value); |
228 | void hiddev_report_event(struct hid_device *hid, struct hid_report *report); | 228 | void hiddev_report_event(struct hid_device *hid, struct hid_report *report); |
229 | int __init hiddev_init(void); | ||
230 | void hiddev_exit(void); | ||
231 | #else | 229 | #else |
232 | static inline int hiddev_connect(struct hid_device *hid, | 230 | static inline int hiddev_connect(struct hid_device *hid, |
233 | unsigned int force) | 231 | unsigned int force) |
@@ -236,8 +234,6 @@ static inline void hiddev_disconnect(struct hid_device *hid) { } | |||
236 | static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, | 234 | static inline void hiddev_hid_event(struct hid_device *hid, struct hid_field *field, |
237 | struct hid_usage *usage, __s32 value) { } | 235 | struct hid_usage *usage, __s32 value) { } |
238 | static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } | 236 | static inline void hiddev_report_event(struct hid_device *hid, struct hid_report *report) { } |
239 | static inline int hiddev_init(void) { return 0; } | ||
240 | static inline void hiddev_exit(void) { } | ||
241 | #endif | 237 | #endif |
242 | 238 | ||
243 | #endif | 239 | #endif |
diff --git a/include/linux/htirq.h b/include/linux/htirq.h index c96ea46737d0..70a1dbbf2093 100644 --- a/include/linux/htirq.h +++ b/include/linux/htirq.h | |||
@@ -9,8 +9,9 @@ struct ht_irq_msg { | |||
9 | /* Helper functions.. */ | 9 | /* Helper functions.. */ |
10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 10 | void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); | 11 | void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg); |
12 | void mask_ht_irq(unsigned int irq); | 12 | struct irq_data; |
13 | void unmask_ht_irq(unsigned int irq); | 13 | void mask_ht_irq(struct irq_data *data); |
14 | void unmask_ht_irq(struct irq_data *data); | ||
14 | 15 | ||
15 | /* The arch hook for getting things started */ | 16 | /* The arch hook for getting things started */ |
16 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); | 17 | int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev); |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 4bae0b72ed3c..1f66fa06a97c 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -384,11 +384,15 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) | |||
384 | dev_set_drvdata(&dev->dev, data); | 384 | dev_set_drvdata(&dev->dev, data); |
385 | } | 385 | } |
386 | 386 | ||
387 | static inline int i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) | 387 | static inline struct i2c_adapter * |
388 | i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) | ||
388 | { | 389 | { |
389 | return adapter->dev.parent != NULL | 390 | struct device *parent = adapter->dev.parent; |
390 | && adapter->dev.parent->bus == &i2c_bus_type | 391 | |
391 | && adapter->dev.parent->type == &i2c_adapter_type; | 392 | if (parent != NULL && parent->type == &i2c_adapter_type) |
393 | return to_i2c_adapter(parent); | ||
394 | else | ||
395 | return NULL; | ||
392 | } | 396 | } |
393 | 397 | ||
394 | /* Adapter locking functions, exported for shared pin cases */ | 398 | /* Adapter locking functions, exported for shared pin cases */ |
diff --git a/include/linux/i2c/sx150x.h b/include/linux/i2c/sx150x.h index ee3049cb9ba5..52baa79d69a7 100644 --- a/include/linux/i2c/sx150x.h +++ b/include/linux/i2c/sx150x.h | |||
@@ -63,6 +63,9 @@ | |||
63 | * IRQ lines will appear. Similarly to gpio_base, the expander | 63 | * IRQ lines will appear. Similarly to gpio_base, the expander |
64 | * will create a block of irqs beginning at this number. | 64 | * will create a block of irqs beginning at this number. |
65 | * This value is ignored if irq_summary is < 0. | 65 | * This value is ignored if irq_summary is < 0. |
66 | * @reset_during_probe: If set to true, the driver will trigger a full | ||
67 | * reset of the chip at the beginning of the probe | ||
68 | * in order to place it in a known state. | ||
66 | */ | 69 | */ |
67 | struct sx150x_platform_data { | 70 | struct sx150x_platform_data { |
68 | unsigned gpio_base; | 71 | unsigned gpio_base; |
@@ -73,6 +76,7 @@ struct sx150x_platform_data { | |||
73 | u16 io_polarity; | 76 | u16 io_polarity; |
74 | int irq_summary; | 77 | int irq_summary; |
75 | unsigned irq_base; | 78 | unsigned irq_base; |
79 | bool reset_during_probe; | ||
76 | }; | 80 | }; |
77 | 81 | ||
78 | #endif /* __LINUX_I2C_SX150X_H */ | 82 | #endif /* __LINUX_I2C_SX150X_H */ |
diff --git a/include/linux/idr.h b/include/linux/idr.h index e968db71e33a..928ae712709f 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -50,14 +50,14 @@ | |||
50 | 50 | ||
51 | struct idr_layer { | 51 | struct idr_layer { |
52 | unsigned long bitmap; /* A zero bit means "space here" */ | 52 | unsigned long bitmap; /* A zero bit means "space here" */ |
53 | struct idr_layer *ary[1<<IDR_BITS]; | 53 | struct idr_layer __rcu *ary[1<<IDR_BITS]; |
54 | int count; /* When zero, we can release it */ | 54 | int count; /* When zero, we can release it */ |
55 | int layer; /* distance from leaf */ | 55 | int layer; /* distance from leaf */ |
56 | struct rcu_head rcu_head; | 56 | struct rcu_head rcu_head; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | struct idr { | 59 | struct idr { |
60 | struct idr_layer *top; | 60 | struct idr_layer __rcu *top; |
61 | struct idr_layer *id_free; | 61 | struct idr_layer *id_free; |
62 | int layers; /* only valid without concurrent changes */ | 62 | int layers; /* only valid without concurrent changes */ |
63 | int id_free_cnt; | 63 | int id_free_cnt; |
@@ -117,10 +117,13 @@ void idr_init(struct idr *idp); | |||
117 | /* | 117 | /* |
118 | * IDA - IDR based id allocator, use when translation from id to | 118 | * IDA - IDR based id allocator, use when translation from id to |
119 | * pointer isn't necessary. | 119 | * pointer isn't necessary. |
120 | * | ||
121 | * IDA_BITMAP_LONGS is calculated to be one less to accommodate | ||
122 | * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes. | ||
120 | */ | 123 | */ |
121 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ | 124 | #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */ |
122 | #define IDA_BITMAP_LONGS (128 / sizeof(long) - 1) | 125 | #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1) |
123 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) | 126 | #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8) |
124 | 127 | ||
125 | struct ida_bitmap { | 128 | struct ida_bitmap { |
126 | long nr_busy; | 129 | long nr_busy; |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 97b2eae6a22c..ed5a03cbe184 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
@@ -986,6 +986,7 @@ struct ieee80211_ht_info { | |||
986 | #define WLAN_AUTH_OPEN 0 | 986 | #define WLAN_AUTH_OPEN 0 |
987 | #define WLAN_AUTH_SHARED_KEY 1 | 987 | #define WLAN_AUTH_SHARED_KEY 1 |
988 | #define WLAN_AUTH_FT 2 | 988 | #define WLAN_AUTH_FT 2 |
989 | #define WLAN_AUTH_SAE 3 | ||
989 | #define WLAN_AUTH_LEAP 128 | 990 | #define WLAN_AUTH_LEAP 128 |
990 | 991 | ||
991 | #define WLAN_AUTH_CHALLENGE_LEN 128 | 992 | #define WLAN_AUTH_CHALLENGE_LEN 128 |
@@ -1072,6 +1073,10 @@ enum ieee80211_statuscode { | |||
1072 | WLAN_STATUS_NO_DIRECT_LINK = 48, | 1073 | WLAN_STATUS_NO_DIRECT_LINK = 48, |
1073 | WLAN_STATUS_STA_NOT_PRESENT = 49, | 1074 | WLAN_STATUS_STA_NOT_PRESENT = 49, |
1074 | WLAN_STATUS_STA_NOT_QSTA = 50, | 1075 | WLAN_STATUS_STA_NOT_QSTA = 50, |
1076 | /* 802.11s */ | ||
1077 | WLAN_STATUS_ANTI_CLOG_REQUIRED = 76, | ||
1078 | WLAN_STATUS_FCG_NOT_SUPP = 78, | ||
1079 | WLAN_STATUS_STA_NO_TBTT = 78, | ||
1075 | }; | 1080 | }; |
1076 | 1081 | ||
1077 | 1082 | ||
@@ -1112,6 +1117,22 @@ enum ieee80211_reasoncode { | |||
1112 | WLAN_REASON_QSTA_REQUIRE_SETUP = 38, | 1117 | WLAN_REASON_QSTA_REQUIRE_SETUP = 38, |
1113 | WLAN_REASON_QSTA_TIMEOUT = 39, | 1118 | WLAN_REASON_QSTA_TIMEOUT = 39, |
1114 | WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, | 1119 | WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45, |
1120 | /* 802.11s */ | ||
1121 | WLAN_REASON_MESH_PEER_CANCELED = 52, | ||
1122 | WLAN_REASON_MESH_MAX_PEERS = 53, | ||
1123 | WLAN_REASON_MESH_CONFIG = 54, | ||
1124 | WLAN_REASON_MESH_CLOSE = 55, | ||
1125 | WLAN_REASON_MESH_MAX_RETRIES = 56, | ||
1126 | WLAN_REASON_MESH_CONFIRM_TIMEOUT = 57, | ||
1127 | WLAN_REASON_MESH_INVALID_GTK = 58, | ||
1128 | WLAN_REASON_MESH_INCONSISTENT_PARAM = 59, | ||
1129 | WLAN_REASON_MESH_INVALID_SECURITY = 60, | ||
1130 | WLAN_REASON_MESH_PATH_ERROR = 61, | ||
1131 | WLAN_REASON_MESH_PATH_NOFORWARD = 62, | ||
1132 | WLAN_REASON_MESH_PATH_DEST_UNREACHABLE = 63, | ||
1133 | WLAN_REASON_MAC_EXISTS_IN_MBSS = 64, | ||
1134 | WLAN_REASON_MESH_CHAN_REGULATORY = 65, | ||
1135 | WLAN_REASON_MESH_CHAN = 66, | ||
1115 | }; | 1136 | }; |
1116 | 1137 | ||
1117 | 1138 | ||
@@ -1139,20 +1160,33 @@ enum ieee80211_eid { | |||
1139 | WLAN_EID_TS_DELAY = 43, | 1160 | WLAN_EID_TS_DELAY = 43, |
1140 | WLAN_EID_TCLAS_PROCESSING = 44, | 1161 | WLAN_EID_TCLAS_PROCESSING = 44, |
1141 | WLAN_EID_QOS_CAPA = 46, | 1162 | WLAN_EID_QOS_CAPA = 46, |
1142 | /* 802.11s | 1163 | /* 802.11s */ |
1143 | * | 1164 | WLAN_EID_MESH_CONFIG = 113, |
1144 | * All mesh EID numbers are pending IEEE 802.11 ANA approval. | 1165 | WLAN_EID_MESH_ID = 114, |
1145 | * The numbers have been incremented from those suggested in | 1166 | WLAN_EID_LINK_METRIC_REPORT = 115, |
1146 | * 802.11s/D2.0 so that MESH_CONFIG does not conflict with | 1167 | WLAN_EID_CONGESTION_NOTIFICATION = 116, |
1147 | * EXT_SUPP_RATES. | 1168 | /* Note that the Peer Link IE has been replaced with the similar |
1169 | * Peer Management IE. We will keep the former definition until mesh | ||
1170 | * code is changed to comply with latest 802.11s drafts. | ||
1148 | */ | 1171 | */ |
1149 | WLAN_EID_MESH_CONFIG = 51, | 1172 | WLAN_EID_PEER_LINK = 55, /* no longer in 802.11s drafts */ |
1150 | WLAN_EID_MESH_ID = 52, | 1173 | WLAN_EID_PEER_MGMT = 117, |
1151 | WLAN_EID_PEER_LINK = 55, | 1174 | WLAN_EID_CHAN_SWITCH_PARAM = 118, |
1152 | WLAN_EID_PREQ = 68, | 1175 | WLAN_EID_MESH_AWAKE_WINDOW = 119, |
1153 | WLAN_EID_PREP = 69, | 1176 | WLAN_EID_BEACON_TIMING = 120, |
1154 | WLAN_EID_PERR = 70, | 1177 | WLAN_EID_MCCAOP_SETUP_REQ = 121, |
1155 | WLAN_EID_RANN = 49, /* compatible with FreeBSD */ | 1178 | WLAN_EID_MCCAOP_SETUP_RESP = 122, |
1179 | WLAN_EID_MCCAOP_ADVERT = 123, | ||
1180 | WLAN_EID_MCCAOP_TEARDOWN = 124, | ||
1181 | WLAN_EID_GANN = 125, | ||
1182 | WLAN_EID_RANN = 126, | ||
1183 | WLAN_EID_PREQ = 130, | ||
1184 | WLAN_EID_PREP = 131, | ||
1185 | WLAN_EID_PERR = 132, | ||
1186 | WLAN_EID_PXU = 137, | ||
1187 | WLAN_EID_PXUC = 138, | ||
1188 | WLAN_EID_AUTH_MESH_PEER_EXCH = 139, | ||
1189 | WLAN_EID_MIC = 140, | ||
1156 | 1190 | ||
1157 | WLAN_EID_PWR_CONSTRAINT = 32, | 1191 | WLAN_EID_PWR_CONSTRAINT = 32, |
1158 | WLAN_EID_PWR_CAPABILITY = 33, | 1192 | WLAN_EID_PWR_CAPABILITY = 33, |
@@ -1211,9 +1245,14 @@ enum ieee80211_category { | |||
1211 | WLAN_CATEGORY_HT = 7, | 1245 | WLAN_CATEGORY_HT = 7, |
1212 | WLAN_CATEGORY_SA_QUERY = 8, | 1246 | WLAN_CATEGORY_SA_QUERY = 8, |
1213 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, | 1247 | WLAN_CATEGORY_PROTECTED_DUAL_OF_ACTION = 9, |
1248 | WLAN_CATEGORY_MESH_ACTION = 13, | ||
1249 | WLAN_CATEGORY_MULTIHOP_ACTION = 14, | ||
1250 | WLAN_CATEGORY_SELF_PROTECTED = 15, | ||
1214 | WLAN_CATEGORY_WMM = 17, | 1251 | WLAN_CATEGORY_WMM = 17, |
1215 | WLAN_CATEGORY_MESH_PLINK = 30, /* Pending ANA approval */ | 1252 | /* TODO: remove MESH_PLINK and MESH_PATH_SEL after */ |
1216 | WLAN_CATEGORY_MESH_PATH_SEL = 32, /* Pending ANA approval */ | 1253 | /* mesh is updated to current 802.11s draft */ |
1254 | WLAN_CATEGORY_MESH_PLINK = 30, | ||
1255 | WLAN_CATEGORY_MESH_PATH_SEL = 32, | ||
1217 | WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, | 1256 | WLAN_CATEGORY_VENDOR_SPECIFIC_PROTECTED = 126, |
1218 | WLAN_CATEGORY_VENDOR_SPECIFIC = 127, | 1257 | WLAN_CATEGORY_VENDOR_SPECIFIC = 127, |
1219 | }; | 1258 | }; |
@@ -1351,6 +1390,8 @@ enum ieee80211_sa_query_action { | |||
1351 | /* AKM suite selectors */ | 1390 | /* AKM suite selectors */ |
1352 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 | 1391 | #define WLAN_AKM_SUITE_8021X 0x000FAC01 |
1353 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 | 1392 | #define WLAN_AKM_SUITE_PSK 0x000FAC02 |
1393 | #define WLAN_AKM_SUITE_SAE 0x000FAC08 | ||
1394 | #define WLAN_AKM_SUITE_FT_OVER_SAE 0x000FAC09 | ||
1354 | 1395 | ||
1355 | #define WLAN_MAX_KEY_LEN 32 | 1396 | #define WLAN_MAX_KEY_LEN 32 |
1356 | 1397 | ||
diff --git a/include/linux/if.h b/include/linux/if.h index 53558ec59e1b..123959927745 100644 --- a/include/linux/if.h +++ b/include/linux/if.h | |||
@@ -75,6 +75,8 @@ | |||
75 | #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ | 75 | #define IFF_DISABLE_NETPOLL 0x2000 /* disable netpoll at run-time */ |
76 | #define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ | 76 | #define IFF_MACVLAN_PORT 0x4000 /* device used as macvlan port */ |
77 | #define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ | 77 | #define IFF_BRIDGE_PORT 0x8000 /* device used as bridge port */ |
78 | #define IFF_OVS_DATAPATH 0x10000 /* device used as Open vSwitch | ||
79 | * datapath port */ | ||
78 | 80 | ||
79 | #define IF_GET_IFACE 0x0001 /* for querying only */ | 81 | #define IF_GET_IFACE 0x0001 /* for querying only */ |
80 | #define IF_GET_PROTO 0x0002 | 82 | #define IF_GET_PROTO 0x0002 |
diff --git a/include/linux/if_bonding.h b/include/linux/if_bonding.h index 2c7994372bde..a17edda8a781 100644 --- a/include/linux/if_bonding.h +++ b/include/linux/if_bonding.h | |||
@@ -84,6 +84,9 @@ | |||
84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ | 84 | #define BOND_DEFAULT_MAX_BONDS 1 /* Default maximum number of devices to support */ |
85 | 85 | ||
86 | #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ | 86 | #define BOND_DEFAULT_TX_QUEUES 16 /* Default number of tx queues per device */ |
87 | |||
88 | #define BOND_DEFAULT_RESEND_IGMP 1 /* Default number of IGMP membership reports */ | ||
89 | |||
87 | /* hashing types */ | 90 | /* hashing types */ |
88 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ | 91 | #define BOND_XMIT_POLICY_LAYER2 0 /* layer 2 (MAC only), default */ |
89 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ | 92 | #define BOND_XMIT_POLICY_LAYER34 1 /* layer 3+4 (IP ^ (TCP || UDP)) */ |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index c831467774d0..f9c3df03db0f 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -119,7 +119,7 @@ struct ethhdr { | |||
119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ | 119 | unsigned char h_dest[ETH_ALEN]; /* destination eth addr */ |
120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ | 120 | unsigned char h_source[ETH_ALEN]; /* source ether addr */ |
121 | __be16 h_proto; /* packet type ID field */ | 121 | __be16 h_proto; /* packet type ID field */ |
122 | } __packed; | 122 | } __attribute__((packed)); |
123 | 123 | ||
124 | #ifdef __KERNEL__ | 124 | #ifdef __KERNEL__ |
125 | #include <linux/skbuff.h> | 125 | #include <linux/skbuff.h> |
@@ -137,8 +137,6 @@ extern struct ctl_table ether_table[]; | |||
137 | 137 | ||
138 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); | 138 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); |
139 | 139 | ||
140 | #define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x" | ||
141 | |||
142 | #endif | 140 | #endif |
143 | 141 | ||
144 | #endif /* _LINUX_IF_ETHER_H */ | 142 | #endif /* _LINUX_IF_ETHER_H */ |
diff --git a/include/linux/if_fddi.h b/include/linux/if_fddi.h index 9947c39e62f6..e6dc11e7f9a5 100644 --- a/include/linux/if_fddi.h +++ b/include/linux/if_fddi.h | |||
@@ -67,7 +67,7 @@ struct fddi_8022_1_hdr { | |||
67 | __u8 dsap; /* destination service access point */ | 67 | __u8 dsap; /* destination service access point */ |
68 | __u8 ssap; /* source service access point */ | 68 | __u8 ssap; /* source service access point */ |
69 | __u8 ctrl; /* control byte #1 */ | 69 | __u8 ctrl; /* control byte #1 */ |
70 | } __packed; | 70 | } __attribute__((packed)); |
71 | 71 | ||
72 | /* Define 802.2 Type 2 header */ | 72 | /* Define 802.2 Type 2 header */ |
73 | struct fddi_8022_2_hdr { | 73 | struct fddi_8022_2_hdr { |
@@ -75,7 +75,7 @@ struct fddi_8022_2_hdr { | |||
75 | __u8 ssap; /* source service access point */ | 75 | __u8 ssap; /* source service access point */ |
76 | __u8 ctrl_1; /* control byte #1 */ | 76 | __u8 ctrl_1; /* control byte #1 */ |
77 | __u8 ctrl_2; /* control byte #2 */ | 77 | __u8 ctrl_2; /* control byte #2 */ |
78 | } __packed; | 78 | } __attribute__((packed)); |
79 | 79 | ||
80 | /* Define 802.2 SNAP header */ | 80 | /* Define 802.2 SNAP header */ |
81 | #define FDDI_K_OUI_LEN 3 | 81 | #define FDDI_K_OUI_LEN 3 |
@@ -85,7 +85,7 @@ struct fddi_snap_hdr { | |||
85 | __u8 ctrl; /* always 0x03 */ | 85 | __u8 ctrl; /* always 0x03 */ |
86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ | 86 | __u8 oui[FDDI_K_OUI_LEN]; /* organizational universal id */ |
87 | __be16 ethertype; /* packet type ID field */ | 87 | __be16 ethertype; /* packet type ID field */ |
88 | } __packed; | 88 | } __attribute__((packed)); |
89 | 89 | ||
90 | /* Define FDDI LLC frame header */ | 90 | /* Define FDDI LLC frame header */ |
91 | struct fddihdr { | 91 | struct fddihdr { |
@@ -98,7 +98,7 @@ struct fddihdr { | |||
98 | struct fddi_8022_2_hdr llc_8022_2; | 98 | struct fddi_8022_2_hdr llc_8022_2; |
99 | struct fddi_snap_hdr llc_snap; | 99 | struct fddi_snap_hdr llc_snap; |
100 | } hdr; | 100 | } hdr; |
101 | } __packed; | 101 | } __attribute__((packed)); |
102 | 102 | ||
103 | #ifdef __KERNEL__ | 103 | #ifdef __KERNEL__ |
104 | #include <linux/netdevice.h> | 104 | #include <linux/netdevice.h> |
diff --git a/include/linux/if_hippi.h b/include/linux/if_hippi.h index 5fe5f307c6f5..cdc049f1829a 100644 --- a/include/linux/if_hippi.h +++ b/include/linux/if_hippi.h | |||
@@ -104,7 +104,7 @@ struct hippi_fp_hdr { | |||
104 | __be32 fixed; | 104 | __be32 fixed; |
105 | #endif | 105 | #endif |
106 | __be32 d2_size; | 106 | __be32 d2_size; |
107 | } __packed; | 107 | } __attribute__((packed)); |
108 | 108 | ||
109 | struct hippi_le_hdr { | 109 | struct hippi_le_hdr { |
110 | #if defined (__BIG_ENDIAN_BITFIELD) | 110 | #if defined (__BIG_ENDIAN_BITFIELD) |
@@ -129,7 +129,7 @@ struct hippi_le_hdr { | |||
129 | __u8 daddr[HIPPI_ALEN]; | 129 | __u8 daddr[HIPPI_ALEN]; |
130 | __u16 locally_administered; | 130 | __u16 locally_administered; |
131 | __u8 saddr[HIPPI_ALEN]; | 131 | __u8 saddr[HIPPI_ALEN]; |
132 | } __packed; | 132 | } __attribute__((packed)); |
133 | 133 | ||
134 | #define HIPPI_OUI_LEN 3 | 134 | #define HIPPI_OUI_LEN 3 |
135 | /* | 135 | /* |
@@ -142,12 +142,12 @@ struct hippi_snap_hdr { | |||
142 | __u8 ctrl; /* always 0x03 */ | 142 | __u8 ctrl; /* always 0x03 */ |
143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ | 143 | __u8 oui[HIPPI_OUI_LEN]; /* organizational universal id (zero)*/ |
144 | __be16 ethertype; /* packet type ID field */ | 144 | __be16 ethertype; /* packet type ID field */ |
145 | } __packed; | 145 | } __attribute__((packed)); |
146 | 146 | ||
147 | struct hippi_hdr { | 147 | struct hippi_hdr { |
148 | struct hippi_fp_hdr fp; | 148 | struct hippi_fp_hdr fp; |
149 | struct hippi_le_hdr le; | 149 | struct hippi_le_hdr le; |
150 | struct hippi_snap_hdr snap; | 150 | struct hippi_snap_hdr snap; |
151 | } __packed; | 151 | } __attribute__((packed)); |
152 | 152 | ||
153 | #endif /* _LINUX_IF_HIPPI_H */ | 153 | #endif /* _LINUX_IF_HIPPI_H */ |
diff --git a/include/linux/if_infiniband.h b/include/linux/if_infiniband.h index 3e659ec7dfdd..7d958475d4ac 100644 --- a/include/linux/if_infiniband.h +++ b/include/linux/if_infiniband.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD | 5 | * <http://www.fsf.org/copyleft/gpl.html>, or the OpenIB.org BSD |
6 | * license, available in the LICENSE.TXT file accompanying this | 6 | * license, available in the LICENSE.TXT file accompanying this |
7 | * software. These details are also available at | 7 | * software. These details are also available at |
8 | * <http://openib.org/license.html>. | 8 | * <http://www.openfabrics.org/software_license.htm>. |
9 | * | 9 | * |
10 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | 10 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, |
11 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | 11 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index 35280b302290..8a2fd66a8b5f 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -40,6 +40,12 @@ struct macvlan_rx_stats { | |||
40 | unsigned long rx_errors; | 40 | unsigned long rx_errors; |
41 | }; | 41 | }; |
42 | 42 | ||
43 | /* | ||
44 | * Maximum times a macvtap device can be opened. This can be used to | ||
45 | * configure the number of receive queue, e.g. for multiqueue virtio. | ||
46 | */ | ||
47 | #define MAX_MACVTAP_QUEUES (NR_CPUS < 16 ? NR_CPUS : 16) | ||
48 | |||
43 | struct macvlan_dev { | 49 | struct macvlan_dev { |
44 | struct net_device *dev; | 50 | struct net_device *dev; |
45 | struct list_head list; | 51 | struct list_head list; |
@@ -50,7 +56,8 @@ struct macvlan_dev { | |||
50 | enum macvlan_mode mode; | 56 | enum macvlan_mode mode; |
51 | int (*receive)(struct sk_buff *skb); | 57 | int (*receive)(struct sk_buff *skb); |
52 | int (*forward)(struct net_device *dev, struct sk_buff *skb); | 58 | int (*forward)(struct net_device *dev, struct sk_buff *skb); |
53 | struct macvtap_queue *tap; | 59 | struct macvtap_queue *taps[MAX_MACVTAP_QUEUES]; |
60 | int numvtaps; | ||
54 | }; | 61 | }; |
55 | 62 | ||
56 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | 63 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, |
diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 1925e0c3f162..397921b09ef9 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h | |||
@@ -40,26 +40,36 @@ | |||
40 | * PPPoE addressing definition | 40 | * PPPoE addressing definition |
41 | */ | 41 | */ |
42 | typedef __be16 sid_t; | 42 | typedef __be16 sid_t; |
43 | struct pppoe_addr{ | 43 | struct pppoe_addr { |
44 | sid_t sid; /* Session identifier */ | 44 | sid_t sid; /* Session identifier */ |
45 | unsigned char remote[ETH_ALEN]; /* Remote address */ | 45 | unsigned char remote[ETH_ALEN]; /* Remote address */ |
46 | char dev[IFNAMSIZ]; /* Local device to use */ | 46 | char dev[IFNAMSIZ]; /* Local device to use */ |
47 | }; | 47 | }; |
48 | 48 | ||
49 | /************************************************************************ | 49 | /************************************************************************ |
50 | * Protocols supported by AF_PPPOX | 50 | * PPTP addressing definition |
51 | */ | 51 | */ |
52 | struct pptp_addr { | ||
53 | __be16 call_id; | ||
54 | struct in_addr sin_addr; | ||
55 | }; | ||
56 | |||
57 | /************************************************************************ | ||
58 | * Protocols supported by AF_PPPOX | ||
59 | */ | ||
52 | #define PX_PROTO_OE 0 /* Currently just PPPoE */ | 60 | #define PX_PROTO_OE 0 /* Currently just PPPoE */ |
53 | #define PX_PROTO_OL2TP 1 /* Now L2TP also */ | 61 | #define PX_PROTO_OL2TP 1 /* Now L2TP also */ |
54 | #define PX_MAX_PROTO 2 | 62 | #define PX_PROTO_PPTP 2 |
63 | #define PX_MAX_PROTO 3 | ||
55 | 64 | ||
56 | struct sockaddr_pppox { | 65 | struct sockaddr_pppox { |
57 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 66 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
58 | unsigned int sa_protocol; /* protocol identifier */ | 67 | unsigned int sa_protocol; /* protocol identifier */ |
59 | union{ | 68 | union { |
60 | struct pppoe_addr pppoe; | 69 | struct pppoe_addr pppoe; |
61 | }sa_addr; | 70 | struct pptp_addr pptp; |
62 | } __packed; | 71 | } sa_addr; |
72 | } __attribute__((packed)); | ||
63 | 73 | ||
64 | /* The use of the above union isn't viable because the size of this | 74 | /* The use of the above union isn't viable because the size of this |
65 | * struct must stay fixed over time -- applications use sizeof(struct | 75 | * struct must stay fixed over time -- applications use sizeof(struct |
@@ -70,7 +80,7 @@ struct sockaddr_pppol2tp { | |||
70 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 80 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
71 | unsigned int sa_protocol; /* protocol identifier */ | 81 | unsigned int sa_protocol; /* protocol identifier */ |
72 | struct pppol2tp_addr pppol2tp; | 82 | struct pppol2tp_addr pppol2tp; |
73 | } __packed; | 83 | } __attribute__((packed)); |
74 | 84 | ||
75 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 | 85 | /* The L2TPv3 protocol changes tunnel and session ids from 16 to 32 |
76 | * bits. So we need a different sockaddr structure. | 86 | * bits. So we need a different sockaddr structure. |
@@ -79,7 +89,7 @@ struct sockaddr_pppol2tpv3 { | |||
79 | sa_family_t sa_family; /* address family, AF_PPPOX */ | 89 | sa_family_t sa_family; /* address family, AF_PPPOX */ |
80 | unsigned int sa_protocol; /* protocol identifier */ | 90 | unsigned int sa_protocol; /* protocol identifier */ |
81 | struct pppol2tpv3_addr pppol2tp; | 91 | struct pppol2tpv3_addr pppol2tp; |
82 | } __packed; | 92 | } __attribute__((packed)); |
83 | 93 | ||
84 | /********************************************************************* | 94 | /********************************************************************* |
85 | * | 95 | * |
@@ -101,7 +111,7 @@ struct pppoe_tag { | |||
101 | __be16 tag_type; | 111 | __be16 tag_type; |
102 | __be16 tag_len; | 112 | __be16 tag_len; |
103 | char tag_data[0]; | 113 | char tag_data[0]; |
104 | } __attribute ((packed)); | 114 | } __attribute__ ((packed)); |
105 | 115 | ||
106 | /* Tag identifiers */ | 116 | /* Tag identifiers */ |
107 | #define PTT_EOL __cpu_to_be16(0x0000) | 117 | #define PTT_EOL __cpu_to_be16(0x0000) |
@@ -129,7 +139,7 @@ struct pppoe_hdr { | |||
129 | __be16 sid; | 139 | __be16 sid; |
130 | __be16 length; | 140 | __be16 length; |
131 | struct pppoe_tag tag[0]; | 141 | struct pppoe_tag tag[0]; |
132 | } __packed; | 142 | } __attribute__((packed)); |
133 | 143 | ||
134 | /* Length of entire PPPoE + PPP header */ | 144 | /* Length of entire PPPoE + PPP header */ |
135 | #define PPPOE_SES_HLEN 8 | 145 | #define PPPOE_SES_HLEN 8 |
@@ -150,15 +160,23 @@ struct pppoe_opt { | |||
150 | relayed to (PPPoE relaying) */ | 160 | relayed to (PPPoE relaying) */ |
151 | }; | 161 | }; |
152 | 162 | ||
163 | struct pptp_opt { | ||
164 | struct pptp_addr src_addr; | ||
165 | struct pptp_addr dst_addr; | ||
166 | u32 ack_sent, ack_recv; | ||
167 | u32 seq_sent, seq_recv; | ||
168 | int ppp_flags; | ||
169 | }; | ||
153 | #include <net/sock.h> | 170 | #include <net/sock.h> |
154 | 171 | ||
155 | struct pppox_sock { | 172 | struct pppox_sock { |
156 | /* struct sock must be the first member of pppox_sock */ | 173 | /* struct sock must be the first member of pppox_sock */ |
157 | struct sock sk; | 174 | struct sock sk; |
158 | struct ppp_channel chan; | 175 | struct ppp_channel chan; |
159 | struct pppox_sock *next; /* for hash table */ | 176 | struct pppox_sock *next; /* for hash table */ |
160 | union { | 177 | union { |
161 | struct pppoe_opt pppoe; | 178 | struct pppoe_opt pppoe; |
179 | struct pptp_opt pptp; | ||
162 | } proto; | 180 | } proto; |
163 | __be16 num; | 181 | __be16 num; |
164 | }; | 182 | }; |
@@ -186,7 +204,7 @@ struct pppox_proto { | |||
186 | struct module *owner; | 204 | struct module *owner; |
187 | }; | 205 | }; |
188 | 206 | ||
189 | extern int register_pppox_proto(int proto_num, struct pppox_proto *pp); | 207 | extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp); |
190 | extern void unregister_pppox_proto(int proto_num); | 208 | extern void unregister_pppox_proto(int proto_num); |
191 | extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ | 209 | extern void pppox_unbind_sock(struct sock *sk);/* delete ppp-channel binding */ |
192 | extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | 210 | extern int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 3d870fda8c4f..c2f3a72712ce 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | #include <linux/netdevice.h> | 17 | #include <linux/netdevice.h> |
18 | #include <linux/etherdevice.h> | 18 | #include <linux/etherdevice.h> |
19 | #include <linux/rtnetlink.h> | ||
19 | 20 | ||
20 | #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) | 21 | #define VLAN_HLEN 4 /* The additional bytes (on top of the Ethernet header) |
21 | * that VLAN requires. | 22 | * that VLAN requires. |
@@ -68,6 +69,7 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) | |||
68 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ | 69 | #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator */ |
69 | #define VLAN_TAG_PRESENT VLAN_CFI_MASK | 70 | #define VLAN_TAG_PRESENT VLAN_CFI_MASK |
70 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ | 71 | #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */ |
72 | #define VLAN_N_VID 4096 | ||
71 | 73 | ||
72 | /* found in socket.c */ | 74 | /* found in socket.c */ |
73 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | 75 | extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); |
@@ -76,9 +78,8 @@ extern void vlan_ioctl_set(int (*hook)(struct net *, void __user *)); | |||
76 | * depends on completely exhausting the VLAN identifier space. Thus | 78 | * depends on completely exhausting the VLAN identifier space. Thus |
77 | * it gives constant time look-up, but in many cases it wastes memory. | 79 | * it gives constant time look-up, but in many cases it wastes memory. |
78 | */ | 80 | */ |
79 | #define VLAN_GROUP_ARRAY_LEN 4096 | ||
80 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 | 81 | #define VLAN_GROUP_ARRAY_SPLIT_PARTS 8 |
81 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_GROUP_ARRAY_LEN/VLAN_GROUP_ARRAY_SPLIT_PARTS) | 82 | #define VLAN_GROUP_ARRAY_PART_LEN (VLAN_N_VID/VLAN_GROUP_ARRAY_SPLIT_PARTS) |
82 | 83 | ||
83 | struct vlan_group { | 84 | struct vlan_group { |
84 | struct net_device *real_dev; /* The ethernet(like) device | 85 | struct net_device *real_dev; /* The ethernet(like) device |
@@ -114,12 +115,24 @@ static inline void vlan_group_set_device(struct vlan_group *vg, | |||
114 | #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) | 115 | #define vlan_tx_tag_get(__skb) ((__skb)->vlan_tci & ~VLAN_TAG_PRESENT) |
115 | 116 | ||
116 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | 117 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) |
118 | /* Must be invoked with rcu_read_lock or with RTNL. */ | ||
119 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, | ||
120 | u16 vlan_id) | ||
121 | { | ||
122 | struct vlan_group *grp = rcu_dereference_rtnl(real_dev->vlgrp); | ||
123 | |||
124 | if (grp) | ||
125 | return vlan_group_get_device(grp, vlan_id); | ||
126 | |||
127 | return NULL; | ||
128 | } | ||
129 | |||
117 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); | 130 | extern struct net_device *vlan_dev_real_dev(const struct net_device *dev); |
118 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); | 131 | extern u16 vlan_dev_vlan_id(const struct net_device *dev); |
119 | 132 | ||
120 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | 133 | extern int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, |
121 | u16 vlan_tci, int polling); | 134 | u16 vlan_tci, int polling); |
122 | extern int vlan_hwaccel_do_receive(struct sk_buff *skb); | 135 | extern bool vlan_hwaccel_do_receive(struct sk_buff **skb); |
123 | extern gro_result_t | 136 | extern gro_result_t |
124 | vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, | 137 | vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp, |
125 | unsigned int vlan_tci, struct sk_buff *skb); | 138 | unsigned int vlan_tci, struct sk_buff *skb); |
@@ -128,6 +141,12 @@ vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp, | |||
128 | unsigned int vlan_tci); | 141 | unsigned int vlan_tci); |
129 | 142 | ||
130 | #else | 143 | #else |
144 | static inline struct net_device *vlan_find_dev(struct net_device *real_dev, | ||
145 | u16 vlan_id) | ||
146 | { | ||
147 | return NULL; | ||
148 | } | ||
149 | |||
131 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) | 150 | static inline struct net_device *vlan_dev_real_dev(const struct net_device *dev) |
132 | { | 151 | { |
133 | BUG(); | 152 | BUG(); |
@@ -147,9 +166,11 @@ static inline int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp, | |||
147 | return NET_XMIT_SUCCESS; | 166 | return NET_XMIT_SUCCESS; |
148 | } | 167 | } |
149 | 168 | ||
150 | static inline int vlan_hwaccel_do_receive(struct sk_buff *skb) | 169 | static inline bool vlan_hwaccel_do_receive(struct sk_buff **skb) |
151 | { | 170 | { |
152 | return 0; | 171 | if ((*skb)->vlan_tci & VLAN_VID_MASK) |
172 | (*skb)->pkt_type = PACKET_OTHERHOST; | ||
173 | return false; | ||
153 | } | 174 | } |
154 | 175 | ||
155 | static inline gro_result_t | 176 | static inline gro_result_t |
diff --git a/include/linux/in.h b/include/linux/in.h index 41d88a4689af..beeb6dee2b49 100644 --- a/include/linux/in.h +++ b/include/linux/in.h | |||
@@ -250,6 +250,25 @@ struct sockaddr_in { | |||
250 | 250 | ||
251 | #ifdef __KERNEL__ | 251 | #ifdef __KERNEL__ |
252 | 252 | ||
253 | #include <linux/errno.h> | ||
254 | |||
255 | static inline int proto_ports_offset(int proto) | ||
256 | { | ||
257 | switch (proto) { | ||
258 | case IPPROTO_TCP: | ||
259 | case IPPROTO_UDP: | ||
260 | case IPPROTO_DCCP: | ||
261 | case IPPROTO_ESP: /* SPI */ | ||
262 | case IPPROTO_SCTP: | ||
263 | case IPPROTO_UDPLITE: | ||
264 | return 0; | ||
265 | case IPPROTO_AH: /* SPI */ | ||
266 | return 4; | ||
267 | default: | ||
268 | return -EINVAL; | ||
269 | } | ||
270 | } | ||
271 | |||
253 | static inline bool ipv4_is_loopback(__be32 addr) | 272 | static inline bool ipv4_is_loopback(__be32 addr) |
254 | { | 273 | { |
255 | return (addr & htonl(0xff000000)) == htonl(0x7f000000); | 274 | return (addr & htonl(0xff000000)) == htonl(0x7f000000); |
diff --git a/include/linux/in6.h b/include/linux/in6.h index c4bf46f764bf..097a34b55560 100644 --- a/include/linux/in6.h +++ b/include/linux/in6.h | |||
@@ -268,6 +268,10 @@ struct in6_flowlabel_req { | |||
268 | /* RFC5082: Generalized Ttl Security Mechanism */ | 268 | /* RFC5082: Generalized Ttl Security Mechanism */ |
269 | #define IPV6_MINHOPCOUNT 73 | 269 | #define IPV6_MINHOPCOUNT 73 |
270 | 270 | ||
271 | #define IPV6_ORIGDSTADDR 74 | ||
272 | #define IPV6_RECVORIGDSTADDR IPV6_ORIGDSTADDR | ||
273 | #define IPV6_TRANSPARENT 75 | ||
274 | |||
271 | /* | 275 | /* |
272 | * Multicast Routing: | 276 | * Multicast Routing: |
273 | * see include/linux/mroute6.h. | 277 | * see include/linux/mroute6.h. |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 2be1a1a2beb9..ccd5b07d678d 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <linux/rcupdate.h> | 9 | #include <linux/rcupdate.h> |
10 | #include <linux/timer.h> | 10 | #include <linux/timer.h> |
11 | #include <linux/sysctl.h> | 11 | #include <linux/sysctl.h> |
12 | #include <linux/rtnetlink.h> | ||
12 | 13 | ||
13 | enum | 14 | enum |
14 | { | 15 | { |
@@ -158,7 +159,12 @@ struct in_ifaddr { | |||
158 | extern int register_inetaddr_notifier(struct notifier_block *nb); | 159 | extern int register_inetaddr_notifier(struct notifier_block *nb); |
159 | extern int unregister_inetaddr_notifier(struct notifier_block *nb); | 160 | extern int unregister_inetaddr_notifier(struct notifier_block *nb); |
160 | 161 | ||
161 | extern struct net_device *ip_dev_find(struct net *net, __be32 addr); | 162 | extern struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref); |
163 | static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) | ||
164 | { | ||
165 | return __ip_dev_find(net, addr, true); | ||
166 | } | ||
167 | |||
162 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); | 168 | extern int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); |
163 | extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); | 169 | extern int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); |
164 | extern void devinet_init(void); | 170 | extern void devinet_init(void); |
@@ -198,14 +204,10 @@ static __inline__ int bad_mask(__be32 mask, __be32 addr) | |||
198 | 204 | ||
199 | static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) | 205 | static inline struct in_device *__in_dev_get_rcu(const struct net_device *dev) |
200 | { | 206 | { |
201 | struct in_device *in_dev = dev->ip_ptr; | 207 | return rcu_dereference(dev->ip_ptr); |
202 | if (in_dev) | ||
203 | in_dev = rcu_dereference(in_dev); | ||
204 | return in_dev; | ||
205 | } | 208 | } |
206 | 209 | ||
207 | static __inline__ struct in_device * | 210 | static inline struct in_device *in_dev_get(const struct net_device *dev) |
208 | in_dev_get(const struct net_device *dev) | ||
209 | { | 211 | { |
210 | struct in_device *in_dev; | 212 | struct in_device *in_dev; |
211 | 213 | ||
@@ -217,10 +219,9 @@ in_dev_get(const struct net_device *dev) | |||
217 | return in_dev; | 219 | return in_dev; |
218 | } | 220 | } |
219 | 221 | ||
220 | static __inline__ struct in_device * | 222 | static inline struct in_device *__in_dev_get_rtnl(const struct net_device *dev) |
221 | __in_dev_get_rtnl(const struct net_device *dev) | ||
222 | { | 223 | { |
223 | return (struct in_device*)dev->ip_ptr; | 224 | return rcu_dereference_check(dev->ip_ptr, lockdep_rtnl_is_held()); |
224 | } | 225 | } |
225 | 226 | ||
226 | extern void in_dev_finish_destroy(struct in_device *idev); | 227 | extern void in_dev_finish_destroy(struct in_device *idev); |
diff --git a/include/linux/init.h b/include/linux/init.h index de994304e0bb..577671c55153 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -46,16 +46,23 @@ | |||
46 | #define __exitdata __section(.exit.data) | 46 | #define __exitdata __section(.exit.data) |
47 | #define __exit_call __used __section(.exitcall.exit) | 47 | #define __exit_call __used __section(.exitcall.exit) |
48 | 48 | ||
49 | /* modpost check for section mismatches during the kernel build. | 49 | /* |
50 | * modpost check for section mismatches during the kernel build. | ||
50 | * A section mismatch happens when there are references from a | 51 | * A section mismatch happens when there are references from a |
51 | * code or data section to an init section (both code or data). | 52 | * code or data section to an init section (both code or data). |
52 | * The init sections are (for most archs) discarded by the kernel | 53 | * The init sections are (for most archs) discarded by the kernel |
53 | * when early init has completed so all such references are potential bugs. | 54 | * when early init has completed so all such references are potential bugs. |
54 | * For exit sections the same issue exists. | 55 | * For exit sections the same issue exists. |
56 | * | ||
55 | * The following markers are used for the cases where the reference to | 57 | * The following markers are used for the cases where the reference to |
56 | * the *init / *exit section (code or data) is valid and will teach | 58 | * the *init / *exit section (code or data) is valid and will teach |
57 | * modpost not to issue a warning. | 59 | * modpost not to issue a warning. Intended semantics is that a code or |
58 | * The markers follow same syntax rules as __init / __initdata. */ | 60 | * data tagged __ref* can reference code or data from init section without |
61 | * producing a warning (of course, no warning does not mean code is | ||
62 | * correct, so optimally document why the __ref is needed and why it's OK). | ||
63 | * | ||
64 | * The markers follow same syntax rules as __init / __initdata. | ||
65 | */ | ||
59 | #define __ref __section(.ref.text) noinline | 66 | #define __ref __section(.ref.text) noinline |
60 | #define __refdata __section(.ref.data) | 67 | #define __refdata __section(.ref.data) |
61 | #define __refconst __section(.ref.rodata) | 68 | #define __refconst __section(.ref.rodata) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 1f43fa56f600..2fea6c8ef6ba 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -82,11 +82,17 @@ extern struct group_info init_groups; | |||
82 | # define CAP_INIT_BSET CAP_FULL_SET | 82 | # define CAP_INIT_BSET CAP_FULL_SET |
83 | 83 | ||
84 | #ifdef CONFIG_TREE_PREEMPT_RCU | 84 | #ifdef CONFIG_TREE_PREEMPT_RCU |
85 | #define INIT_TASK_RCU_TREE_PREEMPT() \ | ||
86 | .rcu_blocked_node = NULL, | ||
87 | #else | ||
88 | #define INIT_TASK_RCU_TREE_PREEMPT(tsk) | ||
89 | #endif | ||
90 | #ifdef CONFIG_PREEMPT_RCU | ||
85 | #define INIT_TASK_RCU_PREEMPT(tsk) \ | 91 | #define INIT_TASK_RCU_PREEMPT(tsk) \ |
86 | .rcu_read_lock_nesting = 0, \ | 92 | .rcu_read_lock_nesting = 0, \ |
87 | .rcu_read_unlock_special = 0, \ | 93 | .rcu_read_unlock_special = 0, \ |
88 | .rcu_blocked_node = NULL, \ | 94 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), \ |
89 | .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), | 95 | INIT_TASK_RCU_TREE_PREEMPT() |
90 | #else | 96 | #else |
91 | #define INIT_TASK_RCU_PREEMPT(tsk) | 97 | #define INIT_TASK_RCU_PREEMPT(tsk) |
92 | #endif | 98 | #endif |
@@ -137,8 +143,8 @@ extern struct cred init_cred; | |||
137 | .children = LIST_HEAD_INIT(tsk.children), \ | 143 | .children = LIST_HEAD_INIT(tsk.children), \ |
138 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ | 144 | .sibling = LIST_HEAD_INIT(tsk.sibling), \ |
139 | .group_leader = &tsk, \ | 145 | .group_leader = &tsk, \ |
140 | .real_cred = &init_cred, \ | 146 | RCU_INIT_POINTER(.real_cred, &init_cred), \ |
141 | .cred = &init_cred, \ | 147 | RCU_INIT_POINTER(.cred, &init_cred), \ |
142 | .cred_guard_mutex = \ | 148 | .cred_guard_mutex = \ |
143 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ | 149 | __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \ |
144 | .comm = "swapper", \ | 150 | .comm = "swapper", \ |
diff --git a/include/linux/input.h b/include/linux/input.h index 0057698fd975..51af441f3a21 100644 --- a/include/linux/input.h +++ b/include/linux/input.h | |||
@@ -1227,7 +1227,7 @@ struct input_dev { | |||
1227 | int (*flush)(struct input_dev *dev, struct file *file); | 1227 | int (*flush)(struct input_dev *dev, struct file *file); |
1228 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); | 1228 | int (*event)(struct input_dev *dev, unsigned int type, unsigned int code, int value); |
1229 | 1229 | ||
1230 | struct input_handle *grab; | 1230 | struct input_handle __rcu *grab; |
1231 | 1231 | ||
1232 | spinlock_t event_lock; | 1232 | spinlock_t event_lock; |
1233 | struct mutex mutex; | 1233 | struct mutex mutex; |
diff --git a/include/linux/intel-gtt.h b/include/linux/intel-gtt.h new file mode 100644 index 000000000000..1d19ab2afa39 --- /dev/null +++ b/include/linux/intel-gtt.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Common Intel AGPGART and GTT definitions. | ||
3 | */ | ||
4 | #ifndef _INTEL_GTT_H | ||
5 | #define _INTEL_GTT_H | ||
6 | |||
7 | #include <linux/agp_backend.h> | ||
8 | |||
9 | /* This is for Intel only GTT controls. | ||
10 | * | ||
11 | * Sandybridge: AGP_USER_CACHED_MEMORY default to LLC only | ||
12 | */ | ||
13 | |||
14 | #define AGP_USER_CACHED_MEMORY_LLC_MLC (AGP_USER_TYPES + 2) | ||
15 | #define AGP_USER_UNCACHED_MEMORY (AGP_USER_TYPES + 4) | ||
16 | |||
17 | /* flag for GFDT type */ | ||
18 | #define AGP_USER_CACHED_MEMORY_GFDT (1 << 3) | ||
19 | |||
20 | #endif | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index a0384a4d1e6f..01b281646251 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <asm/atomic.h> | 18 | #include <asm/atomic.h> |
19 | #include <asm/ptrace.h> | 19 | #include <asm/ptrace.h> |
20 | #include <asm/system.h> | 20 | #include <asm/system.h> |
21 | #include <trace/events/irq.h> | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * These correspond to the IORESOURCE_IRQ_* defines in | 24 | * These correspond to the IORESOURCE_IRQ_* defines in |
@@ -407,10 +408,14 @@ asmlinkage void do_softirq(void); | |||
407 | asmlinkage void __do_softirq(void); | 408 | asmlinkage void __do_softirq(void); |
408 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); | 409 | extern void open_softirq(int nr, void (*action)(struct softirq_action *)); |
409 | extern void softirq_init(void); | 410 | extern void softirq_init(void); |
410 | #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) | 411 | static inline void __raise_softirq_irqoff(unsigned int nr) |
412 | { | ||
413 | trace_softirq_raise((struct softirq_action *)(unsigned long)nr, NULL); | ||
414 | or_softirq_pending(1UL << nr); | ||
415 | } | ||
416 | |||
411 | extern void raise_softirq_irqoff(unsigned int nr); | 417 | extern void raise_softirq_irqoff(unsigned int nr); |
412 | extern void raise_softirq(unsigned int nr); | 418 | extern void raise_softirq(unsigned int nr); |
413 | extern void wakeup_softirqd(void); | ||
414 | 419 | ||
415 | /* This is the worklist that queues up per-cpu softirq work. | 420 | /* This is the worklist that queues up per-cpu softirq work. |
416 | * | 421 | * |
@@ -641,11 +646,8 @@ static inline void init_irq_proc(void) | |||
641 | struct seq_file; | 646 | struct seq_file; |
642 | int show_interrupts(struct seq_file *p, void *v); | 647 | int show_interrupts(struct seq_file *p, void *v); |
643 | 648 | ||
644 | struct irq_desc; | ||
645 | |||
646 | extern int early_irq_init(void); | 649 | extern int early_irq_init(void); |
647 | extern int arch_probe_nr_irqs(void); | 650 | extern int arch_probe_nr_irqs(void); |
648 | extern int arch_early_irq_init(void); | 651 | extern int arch_early_irq_init(void); |
649 | extern int arch_init_chip_data(struct irq_desc *desc, int node); | ||
650 | 652 | ||
651 | #endif | 653 | #endif |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 0a6b3d5c490c..7fb592793738 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
@@ -79,7 +79,7 @@ io_mapping_free(struct io_mapping *mapping) | |||
79 | } | 79 | } |
80 | 80 | ||
81 | /* Atomic map/unmap */ | 81 | /* Atomic map/unmap */ |
82 | static inline void * | 82 | static inline void __iomem * |
83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 83 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
84 | unsigned long offset, | 84 | unsigned long offset, |
85 | int slot) | 85 | int slot) |
@@ -94,12 +94,12 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping, | |||
94 | } | 94 | } |
95 | 95 | ||
96 | static inline void | 96 | static inline void |
97 | io_mapping_unmap_atomic(void *vaddr, int slot) | 97 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
98 | { | 98 | { |
99 | iounmap_atomic(vaddr, slot); | 99 | iounmap_atomic(vaddr, slot); |
100 | } | 100 | } |
101 | 101 | ||
102 | static inline void * | 102 | static inline void __iomem * |
103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 103 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
104 | { | 104 | { |
105 | resource_size_t phys_addr; | 105 | resource_size_t phys_addr; |
@@ -111,7 +111,7 @@ io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static inline void | 113 | static inline void |
114 | io_mapping_unmap(void *vaddr) | 114 | io_mapping_unmap(void __iomem *vaddr) |
115 | { | 115 | { |
116 | iounmap(vaddr); | 116 | iounmap(vaddr); |
117 | } | 117 | } |
@@ -125,38 +125,38 @@ struct io_mapping; | |||
125 | static inline struct io_mapping * | 125 | static inline struct io_mapping * |
126 | io_mapping_create_wc(resource_size_t base, unsigned long size) | 126 | io_mapping_create_wc(resource_size_t base, unsigned long size) |
127 | { | 127 | { |
128 | return (struct io_mapping *) ioremap_wc(base, size); | 128 | return (struct io_mapping __force *) ioremap_wc(base, size); |
129 | } | 129 | } |
130 | 130 | ||
131 | static inline void | 131 | static inline void |
132 | io_mapping_free(struct io_mapping *mapping) | 132 | io_mapping_free(struct io_mapping *mapping) |
133 | { | 133 | { |
134 | iounmap(mapping); | 134 | iounmap((void __force __iomem *) mapping); |
135 | } | 135 | } |
136 | 136 | ||
137 | /* Atomic map/unmap */ | 137 | /* Atomic map/unmap */ |
138 | static inline void * | 138 | static inline void __iomem * |
139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 139 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
140 | unsigned long offset, | 140 | unsigned long offset, |
141 | int slot) | 141 | int slot) |
142 | { | 142 | { |
143 | return ((char *) mapping) + offset; | 143 | return ((char __force __iomem *) mapping) + offset; |
144 | } | 144 | } |
145 | 145 | ||
146 | static inline void | 146 | static inline void |
147 | io_mapping_unmap_atomic(void *vaddr, int slot) | 147 | io_mapping_unmap_atomic(void __iomem *vaddr, int slot) |
148 | { | 148 | { |
149 | } | 149 | } |
150 | 150 | ||
151 | /* Non-atomic map/unmap */ | 151 | /* Non-atomic map/unmap */ |
152 | static inline void * | 152 | static inline void __iomem * |
153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) | 153 | io_mapping_map_wc(struct io_mapping *mapping, unsigned long offset) |
154 | { | 154 | { |
155 | return ((char *) mapping) + offset; | 155 | return ((char __force __iomem *) mapping) + offset; |
156 | } | 156 | } |
157 | 157 | ||
158 | static inline void | 158 | static inline void |
159 | io_mapping_unmap(void *vaddr) | 159 | io_mapping_unmap(void __iomem *vaddr) |
160 | { | 160 | { |
161 | } | 161 | } |
162 | 162 | ||
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h index 64d529133031..3e70b21884a9 100644 --- a/include/linux/iocontext.h +++ b/include/linux/iocontext.h | |||
@@ -53,7 +53,7 @@ struct io_context { | |||
53 | 53 | ||
54 | struct radix_tree_root radix_root; | 54 | struct radix_tree_root radix_root; |
55 | struct hlist_head cic_list; | 55 | struct hlist_head cic_list; |
56 | void *ioc_data; | 56 | void __rcu *ioc_data; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) | 59 | static inline struct io_context *ioc_task_link(struct io_context *ioc) |
diff --git a/include/linux/ip_vs.h b/include/linux/ip_vs.h index 9708de265bb1..5f43a3b2e3ad 100644 --- a/include/linux/ip_vs.h +++ b/include/linux/ip_vs.h | |||
@@ -70,6 +70,7 @@ | |||
70 | 70 | ||
71 | /* | 71 | /* |
72 | * IPVS Connection Flags | 72 | * IPVS Connection Flags |
73 | * Only flags 0..15 are sent to backup server | ||
73 | */ | 74 | */ |
74 | #define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ | 75 | #define IP_VS_CONN_F_FWD_MASK 0x0007 /* mask for the fwd methods */ |
75 | #define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ | 76 | #define IP_VS_CONN_F_MASQ 0x0000 /* masquerading/NAT */ |
@@ -88,9 +89,20 @@ | |||
88 | #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ | 89 | #define IP_VS_CONN_F_TEMPLATE 0x1000 /* template, not connection */ |
89 | #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ | 90 | #define IP_VS_CONN_F_ONE_PACKET 0x2000 /* forward only one packet */ |
90 | 91 | ||
92 | /* Flags that are not sent to backup server start from bit 16 */ | ||
93 | #define IP_VS_CONN_F_NFCT (1 << 16) /* use netfilter conntrack */ | ||
94 | |||
95 | /* Connection flags from destination that can be changed by user space */ | ||
96 | #define IP_VS_CONN_F_DEST_MASK (IP_VS_CONN_F_FWD_MASK | \ | ||
97 | IP_VS_CONN_F_ONE_PACKET | \ | ||
98 | IP_VS_CONN_F_NFCT | \ | ||
99 | 0) | ||
100 | |||
91 | #define IP_VS_SCHEDNAME_MAXLEN 16 | 101 | #define IP_VS_SCHEDNAME_MAXLEN 16 |
102 | #define IP_VS_PENAME_MAXLEN 16 | ||
92 | #define IP_VS_IFNAME_MAXLEN 16 | 103 | #define IP_VS_IFNAME_MAXLEN 16 |
93 | 104 | ||
105 | #define IP_VS_PEDATA_MAXLEN 255 | ||
94 | 106 | ||
95 | /* | 107 | /* |
96 | * The struct ip_vs_service_user and struct ip_vs_dest_user are | 108 | * The struct ip_vs_service_user and struct ip_vs_dest_user are |
@@ -324,6 +336,9 @@ enum { | |||
324 | IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ | 336 | IPVS_SVC_ATTR_NETMASK, /* persistent netmask */ |
325 | 337 | ||
326 | IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ | 338 | IPVS_SVC_ATTR_STATS, /* nested attribute for service stats */ |
339 | |||
340 | IPVS_SVC_ATTR_PE_NAME, /* name of ct retriever */ | ||
341 | |||
327 | __IPVS_SVC_ATTR_MAX, | 342 | __IPVS_SVC_ATTR_MAX, |
328 | }; | 343 | }; |
329 | 344 | ||
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index ab9e9e89e407..8e429d0e0405 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
@@ -58,7 +58,7 @@ struct ipv6_opt_hdr { | |||
58 | /* | 58 | /* |
59 | * TLV encoded option data follows. | 59 | * TLV encoded option data follows. |
60 | */ | 60 | */ |
61 | } __packed; /* required for some archs */ | 61 | } __attribute__((packed)); /* required for some archs */ |
62 | 62 | ||
63 | #define ipv6_destopt_hdr ipv6_opt_hdr | 63 | #define ipv6_destopt_hdr ipv6_opt_hdr |
64 | #define ipv6_hopopt_hdr ipv6_opt_hdr | 64 | #define ipv6_hopopt_hdr ipv6_opt_hdr |
@@ -99,7 +99,7 @@ struct ipv6_destopt_hao { | |||
99 | __u8 type; | 99 | __u8 type; |
100 | __u8 length; | 100 | __u8 length; |
101 | struct in6_addr addr; | 101 | struct in6_addr addr; |
102 | } __packed; | 102 | } __attribute__((packed)); |
103 | 103 | ||
104 | /* | 104 | /* |
105 | * IPv6 fixed header | 105 | * IPv6 fixed header |
@@ -341,7 +341,9 @@ struct ipv6_pinfo { | |||
341 | odstopts:1, | 341 | odstopts:1, |
342 | rxflow:1, | 342 | rxflow:1, |
343 | rxtclass:1, | 343 | rxtclass:1, |
344 | rxpmtu:1; | 344 | rxpmtu:1, |
345 | rxorigdstaddr:1; | ||
346 | /* 2 bits hole */ | ||
345 | } bits; | 347 | } bits; |
346 | __u16 all; | 348 | __u16 all; |
347 | } rxopt; | 349 | } rxopt; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index c03243ad84b4..e9639115dff1 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -72,6 +72,10 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ | 72 | #define IRQ_ONESHOT 0x08000000 /* IRQ is not unmasked after hardirq */ |
73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ | 73 | #define IRQ_NESTED_THREAD 0x10000000 /* IRQ is nested into another, no own handler thread */ |
74 | 74 | ||
75 | #define IRQF_MODIFY_MASK \ | ||
76 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ | ||
77 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL) | ||
78 | |||
75 | #ifdef CONFIG_IRQ_PER_CPU | 79 | #ifdef CONFIG_IRQ_PER_CPU |
76 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 80 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
77 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) | 81 | # define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
@@ -80,36 +84,77 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
80 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING | 84 | # define IRQ_NO_BALANCING_MASK IRQ_NO_BALANCING |
81 | #endif | 85 | #endif |
82 | 86 | ||
83 | struct proc_dir_entry; | ||
84 | struct msi_desc; | 87 | struct msi_desc; |
85 | 88 | ||
86 | /** | 89 | /** |
90 | * struct irq_data - per irq and irq chip data passed down to chip functions | ||
91 | * @irq: interrupt number | ||
92 | * @node: node index useful for balancing | ||
93 | * @chip: low level interrupt hardware access | ||
94 | * @handler_data: per-IRQ data for the irq_chip methods | ||
95 | * @chip_data: platform-specific per-chip private data for the chip | ||
96 | * methods, to allow shared chip implementations | ||
97 | * @msi_desc: MSI descriptor | ||
98 | * @affinity: IRQ affinity on SMP | ||
99 | * | ||
100 | * The fields here need to overlay the ones in irq_desc until we | ||
101 | * cleaned up the direct references and switched everything over to | ||
102 | * irq_data. | ||
103 | */ | ||
104 | struct irq_data { | ||
105 | unsigned int irq; | ||
106 | unsigned int node; | ||
107 | struct irq_chip *chip; | ||
108 | void *handler_data; | ||
109 | void *chip_data; | ||
110 | struct msi_desc *msi_desc; | ||
111 | #ifdef CONFIG_SMP | ||
112 | cpumask_var_t affinity; | ||
113 | #endif | ||
114 | }; | ||
115 | |||
116 | /** | ||
87 | * struct irq_chip - hardware interrupt chip descriptor | 117 | * struct irq_chip - hardware interrupt chip descriptor |
88 | * | 118 | * |
89 | * @name: name for /proc/interrupts | 119 | * @name: name for /proc/interrupts |
90 | * @startup: start up the interrupt (defaults to ->enable if NULL) | 120 | * @startup: deprecated, replaced by irq_startup |
91 | * @shutdown: shut down the interrupt (defaults to ->disable if NULL) | 121 | * @shutdown: deprecated, replaced by irq_shutdown |
92 | * @enable: enable the interrupt (defaults to chip->unmask if NULL) | 122 | * @enable: deprecated, replaced by irq_enable |
93 | * @disable: disable the interrupt | 123 | * @disable: deprecated, replaced by irq_disable |
94 | * @ack: start of a new interrupt | 124 | * @ack: deprecated, replaced by irq_ack |
95 | * @mask: mask an interrupt source | 125 | * @mask: deprecated, replaced by irq_mask |
96 | * @mask_ack: ack and mask an interrupt source | 126 | * @mask_ack: deprecated, replaced by irq_mask_ack |
97 | * @unmask: unmask an interrupt source | 127 | * @unmask: deprecated, replaced by irq_unmask |
98 | * @eoi: end of interrupt - chip level | 128 | * @eoi: deprecated, replaced by irq_eoi |
99 | * @end: end of interrupt - flow level | 129 | * @end: deprecated, will go away with __do_IRQ() |
100 | * @set_affinity: set the CPU affinity on SMP machines | 130 | * @set_affinity: deprecated, replaced by irq_set_affinity |
101 | * @retrigger: resend an IRQ to the CPU | 131 | * @retrigger: deprecated, replaced by irq_retrigger |
102 | * @set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | 132 | * @set_type: deprecated, replaced by irq_set_type |
103 | * @set_wake: enable/disable power-management wake-on of an IRQ | 133 | * @set_wake: deprecated, replaced by irq_wake |
134 | * @bus_lock: deprecated, replaced by irq_bus_lock | ||
135 | * @bus_sync_unlock: deprecated, replaced by irq_bus_sync_unlock | ||
104 | * | 136 | * |
105 | * @bus_lock: function to lock access to slow bus (i2c) chips | 137 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
106 | * @bus_sync_unlock: function to sync and unlock slow bus (i2c) chips | 138 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
139 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) | ||
140 | * @irq_disable: disable the interrupt | ||
141 | * @irq_ack: start of a new interrupt | ||
142 | * @irq_mask: mask an interrupt source | ||
143 | * @irq_mask_ack: ack and mask an interrupt source | ||
144 | * @irq_unmask: unmask an interrupt source | ||
145 | * @irq_eoi: end of interrupt | ||
146 | * @irq_set_affinity: set the CPU affinity on SMP machines | ||
147 | * @irq_retrigger: resend an IRQ to the CPU | ||
148 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ | ||
149 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ | ||
150 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips | ||
151 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips | ||
107 | * | 152 | * |
108 | * @release: release function solely used by UML | 153 | * @release: release function solely used by UML |
109 | * @typename: obsoleted by name, kept as migration helper | ||
110 | */ | 154 | */ |
111 | struct irq_chip { | 155 | struct irq_chip { |
112 | const char *name; | 156 | const char *name; |
157 | #ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
113 | unsigned int (*startup)(unsigned int irq); | 158 | unsigned int (*startup)(unsigned int irq); |
114 | void (*shutdown)(unsigned int irq); | 159 | void (*shutdown)(unsigned int irq); |
115 | void (*enable)(unsigned int irq); | 160 | void (*enable)(unsigned int irq); |
@@ -130,154 +175,66 @@ struct irq_chip { | |||
130 | 175 | ||
131 | void (*bus_lock)(unsigned int irq); | 176 | void (*bus_lock)(unsigned int irq); |
132 | void (*bus_sync_unlock)(unsigned int irq); | 177 | void (*bus_sync_unlock)(unsigned int irq); |
178 | #endif | ||
179 | unsigned int (*irq_startup)(struct irq_data *data); | ||
180 | void (*irq_shutdown)(struct irq_data *data); | ||
181 | void (*irq_enable)(struct irq_data *data); | ||
182 | void (*irq_disable)(struct irq_data *data); | ||
183 | |||
184 | void (*irq_ack)(struct irq_data *data); | ||
185 | void (*irq_mask)(struct irq_data *data); | ||
186 | void (*irq_mask_ack)(struct irq_data *data); | ||
187 | void (*irq_unmask)(struct irq_data *data); | ||
188 | void (*irq_eoi)(struct irq_data *data); | ||
189 | |||
190 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); | ||
191 | int (*irq_retrigger)(struct irq_data *data); | ||
192 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); | ||
193 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); | ||
194 | |||
195 | void (*irq_bus_lock)(struct irq_data *data); | ||
196 | void (*irq_bus_sync_unlock)(struct irq_data *data); | ||
133 | 197 | ||
134 | /* Currently used only by UML, might disappear one day.*/ | 198 | /* Currently used only by UML, might disappear one day.*/ |
135 | #ifdef CONFIG_IRQ_RELEASE_METHOD | 199 | #ifdef CONFIG_IRQ_RELEASE_METHOD |
136 | void (*release)(unsigned int irq, void *dev_id); | 200 | void (*release)(unsigned int irq, void *dev_id); |
137 | #endif | 201 | #endif |
138 | /* | ||
139 | * For compatibility, ->typename is copied into ->name. | ||
140 | * Will disappear. | ||
141 | */ | ||
142 | const char *typename; | ||
143 | }; | 202 | }; |
144 | 203 | ||
145 | struct timer_rand_state; | 204 | /* This include will go away once we isolated irq_desc usage to core code */ |
146 | struct irq_2_iommu; | 205 | #include <linux/irqdesc.h> |
147 | /** | ||
148 | * struct irq_desc - interrupt descriptor | ||
149 | * @irq: interrupt number for this descriptor | ||
150 | * @timer_rand_state: pointer to timer rand state struct | ||
151 | * @kstat_irqs: irq stats per cpu | ||
152 | * @irq_2_iommu: iommu with this irq | ||
153 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
154 | * @chip: low level interrupt hardware access | ||
155 | * @msi_desc: MSI descriptor | ||
156 | * @handler_data: per-IRQ data for the irq_chip methods | ||
157 | * @chip_data: platform-specific per-chip private data for the chip | ||
158 | * methods, to allow shared chip implementations | ||
159 | * @action: the irq action chain | ||
160 | * @status: status information | ||
161 | * @depth: disable-depth, for nested irq_disable() calls | ||
162 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
163 | * @irq_count: stats field to detect stalled irqs | ||
164 | * @last_unhandled: aging timer for unhandled count | ||
165 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
166 | * @lock: locking for SMP | ||
167 | * @affinity: IRQ affinity on SMP | ||
168 | * @node: node index useful for balancing | ||
169 | * @pending_mask: pending rebalanced interrupts | ||
170 | * @threads_active: number of irqaction threads currently running | ||
171 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
172 | * @dir: /proc/irq/ procfs entry | ||
173 | * @name: flow handler name for /proc/interrupts output | ||
174 | */ | ||
175 | struct irq_desc { | ||
176 | unsigned int irq; | ||
177 | struct timer_rand_state *timer_rand_state; | ||
178 | unsigned int *kstat_irqs; | ||
179 | #ifdef CONFIG_INTR_REMAP | ||
180 | struct irq_2_iommu *irq_2_iommu; | ||
181 | #endif | ||
182 | irq_flow_handler_t handle_irq; | ||
183 | struct irq_chip *chip; | ||
184 | struct msi_desc *msi_desc; | ||
185 | void *handler_data; | ||
186 | void *chip_data; | ||
187 | struct irqaction *action; /* IRQ action list */ | ||
188 | unsigned int status; /* IRQ status */ | ||
189 | |||
190 | unsigned int depth; /* nested irq disables */ | ||
191 | unsigned int wake_depth; /* nested wake enables */ | ||
192 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
193 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
194 | unsigned int irqs_unhandled; | ||
195 | raw_spinlock_t lock; | ||
196 | #ifdef CONFIG_SMP | ||
197 | cpumask_var_t affinity; | ||
198 | const struct cpumask *affinity_hint; | ||
199 | unsigned int node; | ||
200 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
201 | cpumask_var_t pending_mask; | ||
202 | #endif | ||
203 | #endif | ||
204 | atomic_t threads_active; | ||
205 | wait_queue_head_t wait_for_threads; | ||
206 | #ifdef CONFIG_PROC_FS | ||
207 | struct proc_dir_entry *dir; | ||
208 | #endif | ||
209 | const char *name; | ||
210 | } ____cacheline_internodealigned_in_smp; | ||
211 | 206 | ||
212 | extern void arch_init_copy_chip_data(struct irq_desc *old_desc, | 207 | /* |
213 | struct irq_desc *desc, int node); | 208 | * Pick up the arch-dependent methods: |
214 | extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc); | 209 | */ |
210 | #include <asm/hw_irq.h> | ||
215 | 211 | ||
216 | #ifndef CONFIG_SPARSE_IRQ | 212 | #ifndef NR_IRQS_LEGACY |
217 | extern struct irq_desc irq_desc[NR_IRQS]; | 213 | # define NR_IRQS_LEGACY 0 |
218 | #endif | 214 | #endif |
219 | 215 | ||
220 | #ifdef CONFIG_NUMA_IRQ_DESC | 216 | #ifndef ARCH_IRQ_INIT_FLAGS |
221 | extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node); | 217 | # define ARCH_IRQ_INIT_FLAGS 0 |
222 | #else | ||
223 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
224 | { | ||
225 | return desc; | ||
226 | } | ||
227 | #endif | 218 | #endif |
228 | 219 | ||
229 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | 220 | #define IRQ_DEFAULT_INIT_FLAGS (IRQ_DISABLED | ARCH_IRQ_INIT_FLAGS) |
230 | |||
231 | /* | ||
232 | * Pick up the arch-dependent methods: | ||
233 | */ | ||
234 | #include <asm/hw_irq.h> | ||
235 | 221 | ||
222 | struct irqaction; | ||
236 | extern int setup_irq(unsigned int irq, struct irqaction *new); | 223 | extern int setup_irq(unsigned int irq, struct irqaction *new); |
237 | extern void remove_irq(unsigned int irq, struct irqaction *act); | 224 | extern void remove_irq(unsigned int irq, struct irqaction *act); |
238 | 225 | ||
239 | #ifdef CONFIG_GENERIC_HARDIRQS | 226 | #ifdef CONFIG_GENERIC_HARDIRQS |
240 | 227 | ||
241 | #ifdef CONFIG_SMP | 228 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
242 | |||
243 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
244 | |||
245 | void move_native_irq(int irq); | 229 | void move_native_irq(int irq); |
246 | void move_masked_irq(int irq); | 230 | void move_masked_irq(int irq); |
247 | 231 | #else | |
248 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | 232 | static inline void move_native_irq(int irq) { } |
249 | 233 | static inline void move_masked_irq(int irq) { } | |
250 | static inline void move_irq(int irq) | 234 | #endif |
251 | { | ||
252 | } | ||
253 | |||
254 | static inline void move_native_irq(int irq) | ||
255 | { | ||
256 | } | ||
257 | |||
258 | static inline void move_masked_irq(int irq) | ||
259 | { | ||
260 | } | ||
261 | |||
262 | #endif /* CONFIG_GENERIC_PENDING_IRQ */ | ||
263 | |||
264 | #else /* CONFIG_SMP */ | ||
265 | |||
266 | #define move_native_irq(x) | ||
267 | #define move_masked_irq(x) | ||
268 | |||
269 | #endif /* CONFIG_SMP */ | ||
270 | 235 | ||
271 | extern int no_irq_affinity; | 236 | extern int no_irq_affinity; |
272 | 237 | ||
273 | static inline int irq_balancing_disabled(unsigned int irq) | ||
274 | { | ||
275 | struct irq_desc *desc; | ||
276 | |||
277 | desc = irq_to_desc(irq); | ||
278 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
279 | } | ||
280 | |||
281 | /* Handle irq action chains: */ | 238 | /* Handle irq action chains: */ |
282 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); | 239 | extern irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action); |
283 | 240 | ||
@@ -293,42 +250,10 @@ extern void handle_percpu_irq(unsigned int irq, struct irq_desc *desc); | |||
293 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); | 250 | extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); |
294 | extern void handle_nested_irq(unsigned int irq); | 251 | extern void handle_nested_irq(unsigned int irq); |
295 | 252 | ||
296 | /* | ||
297 | * Monolithic do_IRQ implementation. | ||
298 | */ | ||
299 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
300 | extern unsigned int __do_IRQ(unsigned int irq); | ||
301 | #endif | ||
302 | |||
303 | /* | ||
304 | * Architectures call this to let the generic IRQ layer | ||
305 | * handle an interrupt. If the descriptor is attached to an | ||
306 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
307 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
308 | */ | ||
309 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
310 | { | ||
311 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
312 | desc->handle_irq(irq, desc); | ||
313 | #else | ||
314 | if (likely(desc->handle_irq)) | ||
315 | desc->handle_irq(irq, desc); | ||
316 | else | ||
317 | __do_IRQ(irq); | ||
318 | #endif | ||
319 | } | ||
320 | |||
321 | static inline void generic_handle_irq(unsigned int irq) | ||
322 | { | ||
323 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
324 | } | ||
325 | |||
326 | /* Handling of unhandled and spurious interrupts: */ | 253 | /* Handling of unhandled and spurious interrupts: */ |
327 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, | 254 | extern void note_interrupt(unsigned int irq, struct irq_desc *desc, |
328 | irqreturn_t action_ret); | 255 | irqreturn_t action_ret); |
329 | 256 | ||
330 | /* Resending of interrupts :*/ | ||
331 | void check_irq_resend(struct irq_desc *desc, unsigned int irq); | ||
332 | 257 | ||
333 | /* Enable/disable irq debugging output: */ | 258 | /* Enable/disable irq debugging output: */ |
334 | extern int noirqdebug_setup(char *str); | 259 | extern int noirqdebug_setup(char *str); |
@@ -351,16 +276,6 @@ extern void | |||
351 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, | 276 | __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
352 | const char *name); | 277 | const char *name); |
353 | 278 | ||
354 | /* caller has locked the irq_desc and both params are valid */ | ||
355 | static inline void __set_irq_handler_unlocked(int irq, | ||
356 | irq_flow_handler_t handler) | ||
357 | { | ||
358 | struct irq_desc *desc; | ||
359 | |||
360 | desc = irq_to_desc(irq); | ||
361 | desc->handle_irq = handler; | ||
362 | } | ||
363 | |||
364 | /* | 279 | /* |
365 | * Set a highlevel flow handler for a given IRQ: | 280 | * Set a highlevel flow handler for a given IRQ: |
366 | */ | 281 | */ |
@@ -384,141 +299,121 @@ set_irq_chained_handler(unsigned int irq, | |||
384 | 299 | ||
385 | extern void set_irq_nested_thread(unsigned int irq, int nest); | 300 | extern void set_irq_nested_thread(unsigned int irq, int nest); |
386 | 301 | ||
387 | extern void set_irq_noprobe(unsigned int irq); | 302 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
388 | extern void set_irq_probe(unsigned int irq); | 303 | |
304 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) | ||
305 | { | ||
306 | irq_modify_status(irq, 0, set); | ||
307 | } | ||
308 | |||
309 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) | ||
310 | { | ||
311 | irq_modify_status(irq, clr, 0); | ||
312 | } | ||
313 | |||
314 | static inline void set_irq_noprobe(unsigned int irq) | ||
315 | { | ||
316 | irq_modify_status(irq, 0, IRQ_NOPROBE); | ||
317 | } | ||
318 | |||
319 | static inline void set_irq_probe(unsigned int irq) | ||
320 | { | ||
321 | irq_modify_status(irq, IRQ_NOPROBE, 0); | ||
322 | } | ||
389 | 323 | ||
390 | /* Handle dynamic irq creation and destruction */ | 324 | /* Handle dynamic irq creation and destruction */ |
391 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); | 325 | extern unsigned int create_irq_nr(unsigned int irq_want, int node); |
392 | extern int create_irq(void); | 326 | extern int create_irq(void); |
393 | extern void destroy_irq(unsigned int irq); | 327 | extern void destroy_irq(unsigned int irq); |
394 | 328 | ||
395 | /* Test to see if a driver has successfully requested an irq */ | 329 | /* |
396 | static inline int irq_has_action(unsigned int irq) | 330 | * Dynamic irq helper functions. Obsolete. Use irq_alloc_desc* and |
331 | * irq_free_desc instead. | ||
332 | */ | ||
333 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
334 | static inline void dynamic_irq_init(unsigned int irq) | ||
397 | { | 335 | { |
398 | struct irq_desc *desc = irq_to_desc(irq); | 336 | dynamic_irq_cleanup(irq); |
399 | return desc->action != NULL; | ||
400 | } | 337 | } |
401 | 338 | ||
402 | /* Dynamic irq helper functions */ | ||
403 | extern void dynamic_irq_init(unsigned int irq); | ||
404 | void dynamic_irq_init_keep_chip_data(unsigned int irq); | ||
405 | extern void dynamic_irq_cleanup(unsigned int irq); | ||
406 | void dynamic_irq_cleanup_keep_chip_data(unsigned int irq); | ||
407 | |||
408 | /* Set/get chip/data for an IRQ: */ | 339 | /* Set/get chip/data for an IRQ: */ |
409 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); | 340 | extern int set_irq_chip(unsigned int irq, struct irq_chip *chip); |
410 | extern int set_irq_data(unsigned int irq, void *data); | 341 | extern int set_irq_data(unsigned int irq, void *data); |
411 | extern int set_irq_chip_data(unsigned int irq, void *data); | 342 | extern int set_irq_chip_data(unsigned int irq, void *data); |
412 | extern int set_irq_type(unsigned int irq, unsigned int type); | 343 | extern int set_irq_type(unsigned int irq, unsigned int type); |
413 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); | 344 | extern int set_irq_msi(unsigned int irq, struct msi_desc *entry); |
345 | extern struct irq_data *irq_get_irq_data(unsigned int irq); | ||
414 | 346 | ||
415 | #define get_irq_chip(irq) (irq_to_desc(irq)->chip) | 347 | static inline struct irq_chip *get_irq_chip(unsigned int irq) |
416 | #define get_irq_chip_data(irq) (irq_to_desc(irq)->chip_data) | ||
417 | #define get_irq_data(irq) (irq_to_desc(irq)->handler_data) | ||
418 | #define get_irq_msi(irq) (irq_to_desc(irq)->msi_desc) | ||
419 | |||
420 | #define get_irq_desc_chip(desc) ((desc)->chip) | ||
421 | #define get_irq_desc_chip_data(desc) ((desc)->chip_data) | ||
422 | #define get_irq_desc_data(desc) ((desc)->handler_data) | ||
423 | #define get_irq_desc_msi(desc) ((desc)->msi_desc) | ||
424 | |||
425 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
426 | |||
427 | #endif /* !CONFIG_S390 */ | ||
428 | |||
429 | #ifdef CONFIG_SMP | ||
430 | /** | ||
431 | * alloc_desc_masks - allocate cpumasks for irq_desc | ||
432 | * @desc: pointer to irq_desc struct | ||
433 | * @node: node which will be handling the cpumasks | ||
434 | * @boot: true if need bootmem | ||
435 | * | ||
436 | * Allocates affinity and pending_mask cpumask if required. | ||
437 | * Returns true if successful (or not required). | ||
438 | */ | ||
439 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | ||
440 | bool boot) | ||
441 | { | 348 | { |
442 | gfp_t gfp = GFP_ATOMIC; | 349 | struct irq_data *d = irq_get_irq_data(irq); |
443 | 350 | return d ? d->chip : NULL; | |
444 | if (boot) | 351 | } |
445 | gfp = GFP_NOWAIT; | ||
446 | |||
447 | #ifdef CONFIG_CPUMASK_OFFSTACK | ||
448 | if (!alloc_cpumask_var_node(&desc->affinity, gfp, node)) | ||
449 | return false; | ||
450 | 352 | ||
451 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 353 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
452 | if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { | 354 | { |
453 | free_cpumask_var(desc->affinity); | 355 | return d->chip; |
454 | return false; | ||
455 | } | ||
456 | #endif | ||
457 | #endif | ||
458 | return true; | ||
459 | } | 356 | } |
460 | 357 | ||
461 | static inline void init_desc_masks(struct irq_desc *desc) | 358 | static inline void *get_irq_chip_data(unsigned int irq) |
462 | { | 359 | { |
463 | cpumask_setall(desc->affinity); | 360 | struct irq_data *d = irq_get_irq_data(irq); |
464 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 361 | return d ? d->chip_data : NULL; |
465 | cpumask_clear(desc->pending_mask); | ||
466 | #endif | ||
467 | } | 362 | } |
468 | 363 | ||
469 | /** | 364 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
470 | * init_copy_desc_masks - copy cpumasks for irq_desc | 365 | { |
471 | * @old_desc: pointer to old irq_desc struct | 366 | return d->chip_data; |
472 | * @new_desc: pointer to new irq_desc struct | 367 | } |
473 | * | ||
474 | * Insures affinity and pending_masks are copied to new irq_desc. | ||
475 | * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the | ||
476 | * irq_desc struct so the copy is redundant. | ||
477 | */ | ||
478 | 368 | ||
479 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 369 | static inline void *get_irq_data(unsigned int irq) |
480 | struct irq_desc *new_desc) | ||
481 | { | 370 | { |
482 | #ifdef CONFIG_CPUMASK_OFFSTACK | 371 | struct irq_data *d = irq_get_irq_data(irq); |
483 | cpumask_copy(new_desc->affinity, old_desc->affinity); | 372 | return d ? d->handler_data : NULL; |
373 | } | ||
484 | 374 | ||
485 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 375 | static inline void *irq_data_get_irq_data(struct irq_data *d) |
486 | cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); | 376 | { |
487 | #endif | 377 | return d->handler_data; |
488 | #endif | ||
489 | } | 378 | } |
490 | 379 | ||
491 | static inline void free_desc_masks(struct irq_desc *old_desc, | 380 | static inline struct msi_desc *get_irq_msi(unsigned int irq) |
492 | struct irq_desc *new_desc) | ||
493 | { | 381 | { |
494 | free_cpumask_var(old_desc->affinity); | 382 | struct irq_data *d = irq_get_irq_data(irq); |
383 | return d ? d->msi_desc : NULL; | ||
384 | } | ||
495 | 385 | ||
496 | #ifdef CONFIG_GENERIC_PENDING_IRQ | 386 | static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) |
497 | free_cpumask_var(old_desc->pending_mask); | 387 | { |
498 | #endif | 388 | return d->msi_desc; |
499 | } | 389 | } |
500 | 390 | ||
501 | #else /* !CONFIG_SMP */ | 391 | int irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node); |
392 | void irq_free_descs(unsigned int irq, unsigned int cnt); | ||
393 | int irq_reserve_irqs(unsigned int from, unsigned int cnt); | ||
502 | 394 | ||
503 | static inline bool alloc_desc_masks(struct irq_desc *desc, int node, | 395 | static inline int irq_alloc_desc(int node) |
504 | bool boot) | ||
505 | { | 396 | { |
506 | return true; | 397 | return irq_alloc_descs(-1, 0, 1, node); |
507 | } | 398 | } |
508 | 399 | ||
509 | static inline void init_desc_masks(struct irq_desc *desc) | 400 | static inline int irq_alloc_desc_at(unsigned int at, int node) |
510 | { | 401 | { |
402 | return irq_alloc_descs(at, at, 1, node); | ||
511 | } | 403 | } |
512 | 404 | ||
513 | static inline void init_copy_desc_masks(struct irq_desc *old_desc, | 405 | static inline int irq_alloc_desc_from(unsigned int from, int node) |
514 | struct irq_desc *new_desc) | ||
515 | { | 406 | { |
407 | return irq_alloc_descs(-1, from, 1, node); | ||
516 | } | 408 | } |
517 | 409 | ||
518 | static inline void free_desc_masks(struct irq_desc *old_desc, | 410 | static inline void irq_free_desc(unsigned int irq) |
519 | struct irq_desc *new_desc) | ||
520 | { | 411 | { |
412 | irq_free_descs(irq, 1); | ||
521 | } | 413 | } |
522 | #endif /* CONFIG_SMP */ | 414 | |
415 | #endif /* CONFIG_GENERIC_HARDIRQS */ | ||
416 | |||
417 | #endif /* !CONFIG_S390 */ | ||
523 | 418 | ||
524 | #endif /* _LINUX_IRQ_H */ | 419 | #endif /* _LINUX_IRQ_H */ |
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h new file mode 100644 index 000000000000..4fa09d4d0b71 --- /dev/null +++ b/include/linux/irq_work.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _LINUX_IRQ_WORK_H | ||
2 | #define _LINUX_IRQ_WORK_H | ||
3 | |||
4 | struct irq_work { | ||
5 | struct irq_work *next; | ||
6 | void (*func)(struct irq_work *); | ||
7 | }; | ||
8 | |||
9 | static inline | ||
10 | void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) | ||
11 | { | ||
12 | entry->next = NULL; | ||
13 | entry->func = func; | ||
14 | } | ||
15 | |||
16 | bool irq_work_queue(struct irq_work *entry); | ||
17 | void irq_work_run(void); | ||
18 | void irq_work_sync(struct irq_work *entry); | ||
19 | |||
20 | #endif /* _LINUX_IRQ_WORK_H */ | ||
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h new file mode 100644 index 000000000000..979c68cc7458 --- /dev/null +++ b/include/linux/irqdesc.h | |||
@@ -0,0 +1,159 @@ | |||
1 | #ifndef _LINUX_IRQDESC_H | ||
2 | #define _LINUX_IRQDESC_H | ||
3 | |||
4 | /* | ||
5 | * Core internal functions to deal with irq descriptors | ||
6 | * | ||
7 | * This include will move to kernel/irq once we cleaned up the tree. | ||
8 | * For now it's included from <linux/irq.h> | ||
9 | */ | ||
10 | |||
11 | struct proc_dir_entry; | ||
12 | struct timer_rand_state; | ||
13 | /** | ||
14 | * struct irq_desc - interrupt descriptor | ||
15 | * @irq_data: per irq and chip data passed down to chip functions | ||
16 | * @timer_rand_state: pointer to timer rand state struct | ||
17 | * @kstat_irqs: irq stats per cpu | ||
18 | * @handle_irq: highlevel irq-events handler [if NULL, __do_IRQ()] | ||
19 | * @action: the irq action chain | ||
20 | * @status: status information | ||
21 | * @depth: disable-depth, for nested irq_disable() calls | ||
22 | * @wake_depth: enable depth, for multiple set_irq_wake() callers | ||
23 | * @irq_count: stats field to detect stalled irqs | ||
24 | * @last_unhandled: aging timer for unhandled count | ||
25 | * @irqs_unhandled: stats field for spurious unhandled interrupts | ||
26 | * @lock: locking for SMP | ||
27 | * @pending_mask: pending rebalanced interrupts | ||
28 | * @threads_active: number of irqaction threads currently running | ||
29 | * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers | ||
30 | * @dir: /proc/irq/ procfs entry | ||
31 | * @name: flow handler name for /proc/interrupts output | ||
32 | */ | ||
33 | struct irq_desc { | ||
34 | |||
35 | #ifdef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED | ||
36 | struct irq_data irq_data; | ||
37 | #else | ||
38 | /* | ||
39 | * This union will go away, once we fixed the direct access to | ||
40 | * irq_desc all over the place. The direct fields are a 1:1 | ||
41 | * overlay of irq_data. | ||
42 | */ | ||
43 | union { | ||
44 | struct irq_data irq_data; | ||
45 | struct { | ||
46 | unsigned int irq; | ||
47 | unsigned int node; | ||
48 | struct irq_chip *chip; | ||
49 | void *handler_data; | ||
50 | void *chip_data; | ||
51 | struct msi_desc *msi_desc; | ||
52 | #ifdef CONFIG_SMP | ||
53 | cpumask_var_t affinity; | ||
54 | #endif | ||
55 | }; | ||
56 | }; | ||
57 | #endif | ||
58 | |||
59 | struct timer_rand_state *timer_rand_state; | ||
60 | unsigned int *kstat_irqs; | ||
61 | irq_flow_handler_t handle_irq; | ||
62 | struct irqaction *action; /* IRQ action list */ | ||
63 | unsigned int status; /* IRQ status */ | ||
64 | |||
65 | unsigned int depth; /* nested irq disables */ | ||
66 | unsigned int wake_depth; /* nested wake enables */ | ||
67 | unsigned int irq_count; /* For detecting broken IRQs */ | ||
68 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
69 | unsigned int irqs_unhandled; | ||
70 | raw_spinlock_t lock; | ||
71 | #ifdef CONFIG_SMP | ||
72 | const struct cpumask *affinity_hint; | ||
73 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
74 | cpumask_var_t pending_mask; | ||
75 | #endif | ||
76 | #endif | ||
77 | atomic_t threads_active; | ||
78 | wait_queue_head_t wait_for_threads; | ||
79 | #ifdef CONFIG_PROC_FS | ||
80 | struct proc_dir_entry *dir; | ||
81 | #endif | ||
82 | const char *name; | ||
83 | } ____cacheline_internodealigned_in_smp; | ||
84 | |||
85 | #ifndef CONFIG_SPARSE_IRQ | ||
86 | extern struct irq_desc irq_desc[NR_IRQS]; | ||
87 | #endif | ||
88 | |||
89 | /* Will be removed once the last users in power and sh are gone */ | ||
90 | extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node); | ||
91 | static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) | ||
92 | { | ||
93 | return desc; | ||
94 | } | ||
95 | |||
96 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
97 | |||
98 | #define get_irq_desc_chip(desc) ((desc)->irq_data.chip) | ||
99 | #define get_irq_desc_chip_data(desc) ((desc)->irq_data.chip_data) | ||
100 | #define get_irq_desc_data(desc) ((desc)->irq_data.handler_data) | ||
101 | #define get_irq_desc_msi(desc) ((desc)->irq_data.msi_desc) | ||
102 | |||
103 | /* | ||
104 | * Monolithic do_IRQ implementation. | ||
105 | */ | ||
106 | #ifndef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
107 | extern unsigned int __do_IRQ(unsigned int irq); | ||
108 | #endif | ||
109 | |||
110 | /* | ||
111 | * Architectures call this to let the generic IRQ layer | ||
112 | * handle an interrupt. If the descriptor is attached to an | ||
113 | * irqchip-style controller then we call the ->handle_irq() handler, | ||
114 | * and it calls __do_IRQ() if it's attached to an irqtype-style controller. | ||
115 | */ | ||
116 | static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) | ||
117 | { | ||
118 | #ifdef CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ | ||
119 | desc->handle_irq(irq, desc); | ||
120 | #else | ||
121 | if (likely(desc->handle_irq)) | ||
122 | desc->handle_irq(irq, desc); | ||
123 | else | ||
124 | __do_IRQ(irq); | ||
125 | #endif | ||
126 | } | ||
127 | |||
128 | static inline void generic_handle_irq(unsigned int irq) | ||
129 | { | ||
130 | generic_handle_irq_desc(irq, irq_to_desc(irq)); | ||
131 | } | ||
132 | |||
133 | /* Test to see if a driver has successfully requested an irq */ | ||
134 | static inline int irq_has_action(unsigned int irq) | ||
135 | { | ||
136 | struct irq_desc *desc = irq_to_desc(irq); | ||
137 | return desc->action != NULL; | ||
138 | } | ||
139 | |||
140 | static inline int irq_balancing_disabled(unsigned int irq) | ||
141 | { | ||
142 | struct irq_desc *desc; | ||
143 | |||
144 | desc = irq_to_desc(irq); | ||
145 | return desc->status & IRQ_NO_BALANCING_MASK; | ||
146 | } | ||
147 | |||
148 | /* caller has locked the irq_desc and both params are valid */ | ||
149 | static inline void __set_irq_handler_unlocked(int irq, | ||
150 | irq_flow_handler_t handler) | ||
151 | { | ||
152 | struct irq_desc *desc; | ||
153 | |||
154 | desc = irq_to_desc(irq); | ||
155 | desc->handle_irq = handler; | ||
156 | } | ||
157 | #endif | ||
158 | |||
159 | #endif | ||
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h index 006bf45eae30..d176d658fe25 100644 --- a/include/linux/irqflags.h +++ b/include/linux/irqflags.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #define _LINUX_TRACE_IRQFLAGS_H | 12 | #define _LINUX_TRACE_IRQFLAGS_H |
13 | 13 | ||
14 | #include <linux/typecheck.h> | 14 | #include <linux/typecheck.h> |
15 | #include <asm/irqflags.h> | ||
15 | 16 | ||
16 | #ifdef CONFIG_TRACE_IRQFLAGS | 17 | #ifdef CONFIG_TRACE_IRQFLAGS |
17 | extern void trace_softirqs_on(unsigned long ip); | 18 | extern void trace_softirqs_on(unsigned long ip); |
@@ -52,17 +53,45 @@ | |||
52 | # define start_critical_timings() do { } while (0) | 53 | # define start_critical_timings() do { } while (0) |
53 | #endif | 54 | #endif |
54 | 55 | ||
55 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 56 | /* |
56 | 57 | * Wrap the arch provided IRQ routines to provide appropriate checks. | |
57 | #include <asm/irqflags.h> | 58 | */ |
59 | #define raw_local_irq_disable() arch_local_irq_disable() | ||
60 | #define raw_local_irq_enable() arch_local_irq_enable() | ||
61 | #define raw_local_irq_save(flags) \ | ||
62 | do { \ | ||
63 | typecheck(unsigned long, flags); \ | ||
64 | flags = arch_local_irq_save(); \ | ||
65 | } while (0) | ||
66 | #define raw_local_irq_restore(flags) \ | ||
67 | do { \ | ||
68 | typecheck(unsigned long, flags); \ | ||
69 | arch_local_irq_restore(flags); \ | ||
70 | } while (0) | ||
71 | #define raw_local_save_flags(flags) \ | ||
72 | do { \ | ||
73 | typecheck(unsigned long, flags); \ | ||
74 | flags = arch_local_save_flags(); \ | ||
75 | } while (0) | ||
76 | #define raw_irqs_disabled_flags(flags) \ | ||
77 | ({ \ | ||
78 | typecheck(unsigned long, flags); \ | ||
79 | arch_irqs_disabled_flags(flags); \ | ||
80 | }) | ||
81 | #define raw_irqs_disabled() (arch_irqs_disabled()) | ||
82 | #define raw_safe_halt() arch_safe_halt() | ||
58 | 83 | ||
84 | /* | ||
85 | * The local_irq_*() APIs are equal to the raw_local_irq*() | ||
86 | * if !TRACE_IRQFLAGS. | ||
87 | */ | ||
88 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | ||
59 | #define local_irq_enable() \ | 89 | #define local_irq_enable() \ |
60 | do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) | 90 | do { trace_hardirqs_on(); raw_local_irq_enable(); } while (0) |
61 | #define local_irq_disable() \ | 91 | #define local_irq_disable() \ |
62 | do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) | 92 | do { raw_local_irq_disable(); trace_hardirqs_off(); } while (0) |
63 | #define local_irq_save(flags) \ | 93 | #define local_irq_save(flags) \ |
64 | do { \ | 94 | do { \ |
65 | typecheck(unsigned long, flags); \ | ||
66 | raw_local_irq_save(flags); \ | 95 | raw_local_irq_save(flags); \ |
67 | trace_hardirqs_off(); \ | 96 | trace_hardirqs_off(); \ |
68 | } while (0) | 97 | } while (0) |
@@ -70,7 +99,6 @@ | |||
70 | 99 | ||
71 | #define local_irq_restore(flags) \ | 100 | #define local_irq_restore(flags) \ |
72 | do { \ | 101 | do { \ |
73 | typecheck(unsigned long, flags); \ | ||
74 | if (raw_irqs_disabled_flags(flags)) { \ | 102 | if (raw_irqs_disabled_flags(flags)) { \ |
75 | raw_local_irq_restore(flags); \ | 103 | raw_local_irq_restore(flags); \ |
76 | trace_hardirqs_off(); \ | 104 | trace_hardirqs_off(); \ |
@@ -79,51 +107,44 @@ | |||
79 | raw_local_irq_restore(flags); \ | 107 | raw_local_irq_restore(flags); \ |
80 | } \ | 108 | } \ |
81 | } while (0) | 109 | } while (0) |
82 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ | 110 | #define local_save_flags(flags) \ |
83 | /* | ||
84 | * The local_irq_*() APIs are equal to the raw_local_irq*() | ||
85 | * if !TRACE_IRQFLAGS. | ||
86 | */ | ||
87 | # define raw_local_irq_disable() local_irq_disable() | ||
88 | # define raw_local_irq_enable() local_irq_enable() | ||
89 | # define raw_local_irq_save(flags) \ | ||
90 | do { \ | ||
91 | typecheck(unsigned long, flags); \ | ||
92 | local_irq_save(flags); \ | ||
93 | } while (0) | ||
94 | # define raw_local_irq_restore(flags) \ | ||
95 | do { \ | 111 | do { \ |
96 | typecheck(unsigned long, flags); \ | 112 | raw_local_save_flags(flags); \ |
97 | local_irq_restore(flags); \ | ||
98 | } while (0) | 113 | } while (0) |
99 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ | ||
100 | 114 | ||
101 | #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT | 115 | #define irqs_disabled_flags(flags) \ |
102 | #define safe_halt() \ | 116 | ({ \ |
103 | do { \ | 117 | raw_irqs_disabled_flags(flags); \ |
104 | trace_hardirqs_on(); \ | 118 | }) |
105 | raw_safe_halt(); \ | ||
106 | } while (0) | ||
107 | 119 | ||
108 | #define local_save_flags(flags) \ | 120 | #define irqs_disabled() \ |
109 | do { \ | 121 | ({ \ |
110 | typecheck(unsigned long, flags); \ | 122 | unsigned long _flags; \ |
111 | raw_local_save_flags(flags); \ | 123 | raw_local_save_flags(_flags); \ |
124 | raw_irqs_disabled_flags(_flags); \ | ||
125 | }) | ||
126 | |||
127 | #define safe_halt() \ | ||
128 | do { \ | ||
129 | trace_hardirqs_on(); \ | ||
130 | raw_safe_halt(); \ | ||
112 | } while (0) | 131 | } while (0) |
113 | 132 | ||
114 | #define irqs_disabled() \ | ||
115 | ({ \ | ||
116 | unsigned long _flags; \ | ||
117 | \ | ||
118 | raw_local_save_flags(_flags); \ | ||
119 | raw_irqs_disabled_flags(_flags); \ | ||
120 | }) | ||
121 | 133 | ||
122 | #define irqs_disabled_flags(flags) \ | 134 | #else /* !CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
123 | ({ \ | 135 | |
124 | typecheck(unsigned long, flags); \ | 136 | #define local_irq_enable() do { raw_local_irq_enable(); } while (0) |
125 | raw_irqs_disabled_flags(flags); \ | 137 | #define local_irq_disable() do { raw_local_irq_disable(); } while (0) |
126 | }) | 138 | #define local_irq_save(flags) \ |
139 | do { \ | ||
140 | raw_local_irq_save(flags); \ | ||
141 | } while (0) | ||
142 | #define local_irq_restore(flags) do { raw_local_irq_restore(flags); } while (0) | ||
143 | #define local_save_flags(flags) do { raw_local_save_flags(flags); } while (0) | ||
144 | #define irqs_disabled() (raw_irqs_disabled()) | ||
145 | #define irqs_disabled_flags(flags) (raw_irqs_disabled_flags(flags)) | ||
146 | #define safe_halt() do { raw_safe_halt(); } while (0) | ||
147 | |||
127 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ | 148 | #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */ |
128 | 149 | ||
129 | #endif | 150 | #endif |
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h index 7bf89bc8cbca..05aa8c23483f 100644 --- a/include/linux/irqnr.h +++ b/include/linux/irqnr.h | |||
@@ -25,6 +25,7 @@ | |||
25 | 25 | ||
26 | extern int nr_irqs; | 26 | extern int nr_irqs; |
27 | extern struct irq_desc *irq_to_desc(unsigned int irq); | 27 | extern struct irq_desc *irq_to_desc(unsigned int irq); |
28 | unsigned int irq_get_next_irq(unsigned int offset); | ||
28 | 29 | ||
29 | # define for_each_irq_desc(irq, desc) \ | 30 | # define for_each_irq_desc(irq, desc) \ |
30 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ | 31 | for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \ |
@@ -47,6 +48,10 @@ extern struct irq_desc *irq_to_desc(unsigned int irq); | |||
47 | #define irq_node(irq) 0 | 48 | #define irq_node(irq) 0 |
48 | #endif | 49 | #endif |
49 | 50 | ||
51 | # define for_each_active_irq(irq) \ | ||
52 | for (irq = irq_get_next_irq(0); irq < nr_irqs; \ | ||
53 | irq = irq_get_next_irq(irq + 1)) | ||
54 | |||
50 | #endif /* CONFIG_GENERIC_HARDIRQS */ | 55 | #endif /* CONFIG_GENERIC_HARDIRQS */ |
51 | 56 | ||
52 | #define for_each_irq_nr(irq) \ | 57 | #define for_each_irq_nr(irq) \ |
diff --git a/include/linux/jhash.h b/include/linux/jhash.h index 2a2f99fbcb16..ced1159fa4f2 100644 --- a/include/linux/jhash.h +++ b/include/linux/jhash.h | |||
@@ -116,7 +116,7 @@ static inline u32 jhash2(const u32 *k, u32 length, u32 initval) | |||
116 | /* A special ultra-optimized versions that knows they are hashing exactly | 116 | /* A special ultra-optimized versions that knows they are hashing exactly |
117 | * 3, 2 or 1 word(s). | 117 | * 3, 2 or 1 word(s). |
118 | * | 118 | * |
119 | * NOTE: In partilar the "c += length; __jhash_mix(a,b,c);" normally | 119 | * NOTE: In particular the "c += length; __jhash_mix(a,b,c);" normally |
120 | * done at the end is not done here. | 120 | * done at the end is not done here. |
121 | */ | 121 | */ |
122 | static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) | 122 | static inline u32 jhash_3words(u32 a, u32 b, u32 c, u32 initval) |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h new file mode 100644 index 000000000000..b67cb180e6e9 --- /dev/null +++ b/include/linux/jump_label.h | |||
@@ -0,0 +1,74 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_H | ||
2 | #define _LINUX_JUMP_LABEL_H | ||
3 | |||
4 | #if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_HAVE_ARCH_JUMP_LABEL) | ||
5 | # include <asm/jump_label.h> | ||
6 | # define HAVE_JUMP_LABEL | ||
7 | #endif | ||
8 | |||
9 | enum jump_label_type { | ||
10 | JUMP_LABEL_ENABLE, | ||
11 | JUMP_LABEL_DISABLE | ||
12 | }; | ||
13 | |||
14 | struct module; | ||
15 | |||
16 | #ifdef HAVE_JUMP_LABEL | ||
17 | |||
18 | extern struct jump_entry __start___jump_table[]; | ||
19 | extern struct jump_entry __stop___jump_table[]; | ||
20 | |||
21 | extern void arch_jump_label_transform(struct jump_entry *entry, | ||
22 | enum jump_label_type type); | ||
23 | extern void arch_jump_label_text_poke_early(jump_label_t addr); | ||
24 | extern void jump_label_update(unsigned long key, enum jump_label_type type); | ||
25 | extern void jump_label_apply_nops(struct module *mod); | ||
26 | extern int jump_label_text_reserved(void *start, void *end); | ||
27 | |||
28 | #define jump_label_enable(key) \ | ||
29 | jump_label_update((unsigned long)key, JUMP_LABEL_ENABLE); | ||
30 | |||
31 | #define jump_label_disable(key) \ | ||
32 | jump_label_update((unsigned long)key, JUMP_LABEL_DISABLE); | ||
33 | |||
34 | #else | ||
35 | |||
36 | #define JUMP_LABEL(key, label) \ | ||
37 | do { \ | ||
38 | if (unlikely(*key)) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #define jump_label_enable(cond_var) \ | ||
43 | do { \ | ||
44 | *(cond_var) = 1; \ | ||
45 | } while (0) | ||
46 | |||
47 | #define jump_label_disable(cond_var) \ | ||
48 | do { \ | ||
49 | *(cond_var) = 0; \ | ||
50 | } while (0) | ||
51 | |||
52 | static inline int jump_label_apply_nops(struct module *mod) | ||
53 | { | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static inline int jump_label_text_reserved(void *start, void *end) | ||
58 | { | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | #endif | ||
63 | |||
64 | #define COND_STMT(key, stmt) \ | ||
65 | do { \ | ||
66 | __label__ jl_enabled; \ | ||
67 | JUMP_LABEL(key, jl_enabled); \ | ||
68 | if (0) { \ | ||
69 | jl_enabled: \ | ||
70 | stmt; \ | ||
71 | } \ | ||
72 | } while (0) | ||
73 | |||
74 | #endif | ||
diff --git a/include/linux/jump_label_ref.h b/include/linux/jump_label_ref.h new file mode 100644 index 000000000000..e5d012ad92c6 --- /dev/null +++ b/include/linux/jump_label_ref.h | |||
@@ -0,0 +1,44 @@ | |||
1 | #ifndef _LINUX_JUMP_LABEL_REF_H | ||
2 | #define _LINUX_JUMP_LABEL_REF_H | ||
3 | |||
4 | #include <linux/jump_label.h> | ||
5 | #include <asm/atomic.h> | ||
6 | |||
7 | #ifdef HAVE_JUMP_LABEL | ||
8 | |||
9 | static inline void jump_label_inc(atomic_t *key) | ||
10 | { | ||
11 | if (atomic_add_return(1, key) == 1) | ||
12 | jump_label_enable(key); | ||
13 | } | ||
14 | |||
15 | static inline void jump_label_dec(atomic_t *key) | ||
16 | { | ||
17 | if (atomic_dec_and_test(key)) | ||
18 | jump_label_disable(key); | ||
19 | } | ||
20 | |||
21 | #else /* !HAVE_JUMP_LABEL */ | ||
22 | |||
23 | static inline void jump_label_inc(atomic_t *key) | ||
24 | { | ||
25 | atomic_inc(key); | ||
26 | } | ||
27 | |||
28 | static inline void jump_label_dec(atomic_t *key) | ||
29 | { | ||
30 | atomic_dec(key); | ||
31 | } | ||
32 | |||
33 | #undef JUMP_LABEL | ||
34 | #define JUMP_LABEL(key, label) \ | ||
35 | do { \ | ||
36 | if (unlikely(__builtin_choose_expr( \ | ||
37 | __builtin_types_compatible_p(typeof(key), atomic_t *), \ | ||
38 | atomic_read((atomic_t *)(key)), *(key)))) \ | ||
39 | goto label; \ | ||
40 | } while (0) | ||
41 | |||
42 | #endif /* HAVE_JUMP_LABEL */ | ||
43 | |||
44 | #endif /* _LINUX_JUMP_LABEL_REF_H */ | ||
diff --git a/include/linux/kdb.h b/include/linux/kdb.h index ea6e5244ed3f..aadff7cc2b84 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h | |||
@@ -28,6 +28,41 @@ extern int kdb_poll_idx; | |||
28 | extern int kdb_initial_cpu; | 28 | extern int kdb_initial_cpu; |
29 | extern atomic_t kdb_event; | 29 | extern atomic_t kdb_event; |
30 | 30 | ||
31 | /* Types and messages used for dynamically added kdb shell commands */ | ||
32 | |||
33 | #define KDB_MAXARGS 16 /* Maximum number of arguments to a function */ | ||
34 | |||
35 | typedef enum { | ||
36 | KDB_REPEAT_NONE = 0, /* Do not repeat this command */ | ||
37 | KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ | ||
38 | KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ | ||
39 | } kdb_repeat_t; | ||
40 | |||
41 | typedef int (*kdb_func_t)(int, const char **); | ||
42 | |||
43 | /* KDB return codes from a command or internal kdb function */ | ||
44 | #define KDB_NOTFOUND (-1) | ||
45 | #define KDB_ARGCOUNT (-2) | ||
46 | #define KDB_BADWIDTH (-3) | ||
47 | #define KDB_BADRADIX (-4) | ||
48 | #define KDB_NOTENV (-5) | ||
49 | #define KDB_NOENVVALUE (-6) | ||
50 | #define KDB_NOTIMP (-7) | ||
51 | #define KDB_ENVFULL (-8) | ||
52 | #define KDB_ENVBUFFULL (-9) | ||
53 | #define KDB_TOOMANYBPT (-10) | ||
54 | #define KDB_TOOMANYDBREGS (-11) | ||
55 | #define KDB_DUPBPT (-12) | ||
56 | #define KDB_BPTNOTFOUND (-13) | ||
57 | #define KDB_BADMODE (-14) | ||
58 | #define KDB_BADINT (-15) | ||
59 | #define KDB_INVADDRFMT (-16) | ||
60 | #define KDB_BADREG (-17) | ||
61 | #define KDB_BADCPUNUM (-18) | ||
62 | #define KDB_BADLENGTH (-19) | ||
63 | #define KDB_NOBP (-20) | ||
64 | #define KDB_BADADDR (-21) | ||
65 | |||
31 | /* | 66 | /* |
32 | * kdb_diemsg | 67 | * kdb_diemsg |
33 | * | 68 | * |
@@ -104,10 +139,26 @@ int kdb_process_cpu(const struct task_struct *p) | |||
104 | 139 | ||
105 | /* kdb access to register set for stack dumping */ | 140 | /* kdb access to register set for stack dumping */ |
106 | extern struct pt_regs *kdb_current_regs; | 141 | extern struct pt_regs *kdb_current_regs; |
142 | #ifdef CONFIG_KALLSYMS | ||
143 | extern const char *kdb_walk_kallsyms(loff_t *pos); | ||
144 | #else /* ! CONFIG_KALLSYMS */ | ||
145 | static inline const char *kdb_walk_kallsyms(loff_t *pos) | ||
146 | { | ||
147 | return NULL; | ||
148 | } | ||
149 | #endif /* ! CONFIG_KALLSYMS */ | ||
107 | 150 | ||
151 | /* Dynamic kdb shell command registration */ | ||
152 | extern int kdb_register(char *, kdb_func_t, char *, char *, short); | ||
153 | extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, | ||
154 | short, kdb_repeat_t); | ||
155 | extern int kdb_unregister(char *); | ||
108 | #else /* ! CONFIG_KGDB_KDB */ | 156 | #else /* ! CONFIG_KGDB_KDB */ |
109 | #define kdb_printf(...) | 157 | #define kdb_printf(...) |
110 | #define kdb_init(x) | 158 | #define kdb_init(x) |
159 | #define kdb_register(...) | ||
160 | #define kdb_register_repeat(...) | ||
161 | #define kdb_uregister(x) | ||
111 | #endif /* CONFIG_KGDB_KDB */ | 162 | #endif /* CONFIG_KGDB_KDB */ |
112 | enum { | 163 | enum { |
113 | KDB_NOT_INITIALIZED, | 164 | KDB_NOT_INITIALIZED, |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 2b0a35e6bc69..edef168a0406 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -58,7 +58,18 @@ extern const char linux_proc_banner[]; | |||
58 | 58 | ||
59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) | 59 | #define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) |
60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) | 60 | #define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) |
61 | #define roundup(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) | 61 | #define roundup(x, y) ( \ |
62 | { \ | ||
63 | typeof(y) __y = y; \ | ||
64 | (((x) + (__y - 1)) / __y) * __y; \ | ||
65 | } \ | ||
66 | ) | ||
67 | #define rounddown(x, y) ( \ | ||
68 | { \ | ||
69 | typeof(x) __x = (x); \ | ||
70 | __x - (__x % (y)); \ | ||
71 | } \ | ||
72 | ) | ||
62 | #define DIV_ROUND_CLOSEST(x, divisor)( \ | 73 | #define DIV_ROUND_CLOSEST(x, divisor)( \ |
63 | { \ | 74 | { \ |
64 | typeof(divisor) __divisor = divisor; \ | 75 | typeof(divisor) __divisor = divisor; \ |
@@ -641,6 +652,16 @@ static inline void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { } | |||
641 | _max1 > _max2 ? _max1 : _max2; }) | 652 | _max1 > _max2 ? _max1 : _max2; }) |
642 | 653 | ||
643 | /** | 654 | /** |
655 | * min_not_zero - return the minimum that is _not_ zero, unless both are zero | ||
656 | * @x: value1 | ||
657 | * @y: value2 | ||
658 | */ | ||
659 | #define min_not_zero(x, y) ({ \ | ||
660 | typeof(x) __x = (x); \ | ||
661 | typeof(y) __y = (y); \ | ||
662 | __x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); }) | ||
663 | |||
664 | /** | ||
644 | * clamp - return a value clamped to a given range with strict typechecking | 665 | * clamp - return a value clamped to a given range with strict typechecking |
645 | * @val: current value | 666 | * @val: current value |
646 | * @min: minimum allowable value | 667 | * @min: minimum allowable value |
diff --git a/include/linux/key.h b/include/linux/key.h index cd50dfa1d4c2..3db0adce1fda 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -178,8 +178,9 @@ struct key { | |||
178 | */ | 178 | */ |
179 | union { | 179 | union { |
180 | unsigned long value; | 180 | unsigned long value; |
181 | void __rcu *rcudata; | ||
181 | void *data; | 182 | void *data; |
182 | struct keyring_list *subscriptions; | 183 | struct keyring_list __rcu *subscriptions; |
183 | } payload; | 184 | } payload; |
184 | }; | 185 | }; |
185 | 186 | ||
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 311f8753d713..62dbee554f60 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -214,7 +214,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
214 | */ | 214 | */ |
215 | #define kfifo_reset(fifo) \ | 215 | #define kfifo_reset(fifo) \ |
216 | (void)({ \ | 216 | (void)({ \ |
217 | typeof(fifo + 1) __tmp = (fifo); \ | 217 | typeof((fifo) + 1) __tmp = (fifo); \ |
218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ | 218 | __tmp->kfifo.in = __tmp->kfifo.out = 0; \ |
219 | }) | 219 | }) |
220 | 220 | ||
@@ -228,7 +228,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
228 | */ | 228 | */ |
229 | #define kfifo_reset_out(fifo) \ | 229 | #define kfifo_reset_out(fifo) \ |
230 | (void)({ \ | 230 | (void)({ \ |
231 | typeof(fifo + 1) __tmp = (fifo); \ | 231 | typeof((fifo) + 1) __tmp = (fifo); \ |
232 | __tmp->kfifo.out = __tmp->kfifo.in; \ | 232 | __tmp->kfifo.out = __tmp->kfifo.in; \ |
233 | }) | 233 | }) |
234 | 234 | ||
@@ -238,7 +238,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
238 | */ | 238 | */ |
239 | #define kfifo_len(fifo) \ | 239 | #define kfifo_len(fifo) \ |
240 | ({ \ | 240 | ({ \ |
241 | typeof(fifo + 1) __tmpl = (fifo); \ | 241 | typeof((fifo) + 1) __tmpl = (fifo); \ |
242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ | 242 | __tmpl->kfifo.in - __tmpl->kfifo.out; \ |
243 | }) | 243 | }) |
244 | 244 | ||
@@ -248,7 +248,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
248 | */ | 248 | */ |
249 | #define kfifo_is_empty(fifo) \ | 249 | #define kfifo_is_empty(fifo) \ |
250 | ({ \ | 250 | ({ \ |
251 | typeof(fifo + 1) __tmpq = (fifo); \ | 251 | typeof((fifo) + 1) __tmpq = (fifo); \ |
252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ | 252 | __tmpq->kfifo.in == __tmpq->kfifo.out; \ |
253 | }) | 253 | }) |
254 | 254 | ||
@@ -258,7 +258,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
258 | */ | 258 | */ |
259 | #define kfifo_is_full(fifo) \ | 259 | #define kfifo_is_full(fifo) \ |
260 | ({ \ | 260 | ({ \ |
261 | typeof(fifo + 1) __tmpq = (fifo); \ | 261 | typeof((fifo) + 1) __tmpq = (fifo); \ |
262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ | 262 | kfifo_len(__tmpq) > __tmpq->kfifo.mask; \ |
263 | }) | 263 | }) |
264 | 264 | ||
@@ -269,7 +269,7 @@ __kfifo_must_check_helper(unsigned int val) | |||
269 | #define kfifo_avail(fifo) \ | 269 | #define kfifo_avail(fifo) \ |
270 | __kfifo_must_check_helper( \ | 270 | __kfifo_must_check_helper( \ |
271 | ({ \ | 271 | ({ \ |
272 | typeof(fifo + 1) __tmpq = (fifo); \ | 272 | typeof((fifo) + 1) __tmpq = (fifo); \ |
273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ | 273 | const size_t __recsize = sizeof(*__tmpq->rectype); \ |
274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ | 274 | unsigned int __avail = kfifo_size(__tmpq) - kfifo_len(__tmpq); \ |
275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ | 275 | (__recsize) ? ((__avail <= __recsize) ? 0 : \ |
@@ -284,7 +284,7 @@ __kfifo_must_check_helper( \ | |||
284 | */ | 284 | */ |
285 | #define kfifo_skip(fifo) \ | 285 | #define kfifo_skip(fifo) \ |
286 | (void)({ \ | 286 | (void)({ \ |
287 | typeof(fifo + 1) __tmp = (fifo); \ | 287 | typeof((fifo) + 1) __tmp = (fifo); \ |
288 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 288 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 289 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
290 | if (__recsize) \ | 290 | if (__recsize) \ |
@@ -302,7 +302,7 @@ __kfifo_must_check_helper( \ | |||
302 | #define kfifo_peek_len(fifo) \ | 302 | #define kfifo_peek_len(fifo) \ |
303 | __kfifo_must_check_helper( \ | 303 | __kfifo_must_check_helper( \ |
304 | ({ \ | 304 | ({ \ |
305 | typeof(fifo + 1) __tmp = (fifo); \ | 305 | typeof((fifo) + 1) __tmp = (fifo); \ |
306 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 306 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 307 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ | 308 | (!__recsize) ? kfifo_len(__tmp) * sizeof(*__tmp->type) : \ |
@@ -325,7 +325,7 @@ __kfifo_must_check_helper( \ | |||
325 | #define kfifo_alloc(fifo, size, gfp_mask) \ | 325 | #define kfifo_alloc(fifo, size, gfp_mask) \ |
326 | __kfifo_must_check_helper( \ | 326 | __kfifo_must_check_helper( \ |
327 | ({ \ | 327 | ({ \ |
328 | typeof(fifo + 1) __tmp = (fifo); \ | 328 | typeof((fifo) + 1) __tmp = (fifo); \ |
329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 329 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
330 | __is_kfifo_ptr(__tmp) ? \ | 330 | __is_kfifo_ptr(__tmp) ? \ |
331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ | 331 | __kfifo_alloc(__kfifo, size, sizeof(*__tmp->type), gfp_mask) : \ |
@@ -339,7 +339,7 @@ __kfifo_must_check_helper( \ | |||
339 | */ | 339 | */ |
340 | #define kfifo_free(fifo) \ | 340 | #define kfifo_free(fifo) \ |
341 | ({ \ | 341 | ({ \ |
342 | typeof(fifo + 1) __tmp = (fifo); \ | 342 | typeof((fifo) + 1) __tmp = (fifo); \ |
343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 343 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
344 | if (__is_kfifo_ptr(__tmp)) \ | 344 | if (__is_kfifo_ptr(__tmp)) \ |
345 | __kfifo_free(__kfifo); \ | 345 | __kfifo_free(__kfifo); \ |
@@ -358,7 +358,7 @@ __kfifo_must_check_helper( \ | |||
358 | */ | 358 | */ |
359 | #define kfifo_init(fifo, buffer, size) \ | 359 | #define kfifo_init(fifo, buffer, size) \ |
360 | ({ \ | 360 | ({ \ |
361 | typeof(fifo + 1) __tmp = (fifo); \ | 361 | typeof((fifo) + 1) __tmp = (fifo); \ |
362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 362 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
363 | __is_kfifo_ptr(__tmp) ? \ | 363 | __is_kfifo_ptr(__tmp) ? \ |
364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ | 364 | __kfifo_init(__kfifo, buffer, size, sizeof(*__tmp->type)) : \ |
@@ -379,8 +379,8 @@ __kfifo_must_check_helper( \ | |||
379 | */ | 379 | */ |
380 | #define kfifo_put(fifo, val) \ | 380 | #define kfifo_put(fifo, val) \ |
381 | ({ \ | 381 | ({ \ |
382 | typeof(fifo + 1) __tmp = (fifo); \ | 382 | typeof((fifo) + 1) __tmp = (fifo); \ |
383 | typeof(val + 1) __val = (val); \ | 383 | typeof((val) + 1) __val = (val); \ |
384 | unsigned int __ret; \ | 384 | unsigned int __ret; \ |
385 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 385 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 386 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -421,8 +421,8 @@ __kfifo_must_check_helper( \ | |||
421 | #define kfifo_get(fifo, val) \ | 421 | #define kfifo_get(fifo, val) \ |
422 | __kfifo_must_check_helper( \ | 422 | __kfifo_must_check_helper( \ |
423 | ({ \ | 423 | ({ \ |
424 | typeof(fifo + 1) __tmp = (fifo); \ | 424 | typeof((fifo) + 1) __tmp = (fifo); \ |
425 | typeof(val + 1) __val = (val); \ | 425 | typeof((val) + 1) __val = (val); \ |
426 | unsigned int __ret; \ | 426 | unsigned int __ret; \ |
427 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 427 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 428 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -462,8 +462,8 @@ __kfifo_must_check_helper( \ | |||
462 | #define kfifo_peek(fifo, val) \ | 462 | #define kfifo_peek(fifo, val) \ |
463 | __kfifo_must_check_helper( \ | 463 | __kfifo_must_check_helper( \ |
464 | ({ \ | 464 | ({ \ |
465 | typeof(fifo + 1) __tmp = (fifo); \ | 465 | typeof((fifo) + 1) __tmp = (fifo); \ |
466 | typeof(val + 1) __val = (val); \ | 466 | typeof((val) + 1) __val = (val); \ |
467 | unsigned int __ret; \ | 467 | unsigned int __ret; \ |
468 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 468 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 469 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -501,8 +501,8 @@ __kfifo_must_check_helper( \ | |||
501 | */ | 501 | */ |
502 | #define kfifo_in(fifo, buf, n) \ | 502 | #define kfifo_in(fifo, buf, n) \ |
503 | ({ \ | 503 | ({ \ |
504 | typeof(fifo + 1) __tmp = (fifo); \ | 504 | typeof((fifo) + 1) __tmp = (fifo); \ |
505 | typeof(buf + 1) __buf = (buf); \ | 505 | typeof((buf) + 1) __buf = (buf); \ |
506 | unsigned long __n = (n); \ | 506 | unsigned long __n = (n); \ |
507 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 507 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 508 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -554,8 +554,8 @@ __kfifo_must_check_helper( \ | |||
554 | #define kfifo_out(fifo, buf, n) \ | 554 | #define kfifo_out(fifo, buf, n) \ |
555 | __kfifo_must_check_helper( \ | 555 | __kfifo_must_check_helper( \ |
556 | ({ \ | 556 | ({ \ |
557 | typeof(fifo + 1) __tmp = (fifo); \ | 557 | typeof((fifo) + 1) __tmp = (fifo); \ |
558 | typeof(buf + 1) __buf = (buf); \ | 558 | typeof((buf) + 1) __buf = (buf); \ |
559 | unsigned long __n = (n); \ | 559 | unsigned long __n = (n); \ |
560 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 560 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 561 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -611,7 +611,7 @@ __kfifo_must_check_helper( \ | |||
611 | #define kfifo_from_user(fifo, from, len, copied) \ | 611 | #define kfifo_from_user(fifo, from, len, copied) \ |
612 | __kfifo_must_check_helper( \ | 612 | __kfifo_must_check_helper( \ |
613 | ({ \ | 613 | ({ \ |
614 | typeof(fifo + 1) __tmp = (fifo); \ | 614 | typeof((fifo) + 1) __tmp = (fifo); \ |
615 | const void __user *__from = (from); \ | 615 | const void __user *__from = (from); \ |
616 | unsigned int __len = (len); \ | 616 | unsigned int __len = (len); \ |
617 | unsigned int *__copied = (copied); \ | 617 | unsigned int *__copied = (copied); \ |
@@ -639,7 +639,7 @@ __kfifo_must_check_helper( \ | |||
639 | #define kfifo_to_user(fifo, to, len, copied) \ | 639 | #define kfifo_to_user(fifo, to, len, copied) \ |
640 | __kfifo_must_check_helper( \ | 640 | __kfifo_must_check_helper( \ |
641 | ({ \ | 641 | ({ \ |
642 | typeof(fifo + 1) __tmp = (fifo); \ | 642 | typeof((fifo) + 1) __tmp = (fifo); \ |
643 | void __user *__to = (to); \ | 643 | void __user *__to = (to); \ |
644 | unsigned int __len = (len); \ | 644 | unsigned int __len = (len); \ |
645 | unsigned int *__copied = (copied); \ | 645 | unsigned int *__copied = (copied); \ |
@@ -666,7 +666,7 @@ __kfifo_must_check_helper( \ | |||
666 | */ | 666 | */ |
667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ | 667 | #define kfifo_dma_in_prepare(fifo, sgl, nents, len) \ |
668 | ({ \ | 668 | ({ \ |
669 | typeof(fifo + 1) __tmp = (fifo); \ | 669 | typeof((fifo) + 1) __tmp = (fifo); \ |
670 | struct scatterlist *__sgl = (sgl); \ | 670 | struct scatterlist *__sgl = (sgl); \ |
671 | int __nents = (nents); \ | 671 | int __nents = (nents); \ |
672 | unsigned int __len = (len); \ | 672 | unsigned int __len = (len); \ |
@@ -690,7 +690,7 @@ __kfifo_must_check_helper( \ | |||
690 | */ | 690 | */ |
691 | #define kfifo_dma_in_finish(fifo, len) \ | 691 | #define kfifo_dma_in_finish(fifo, len) \ |
692 | (void)({ \ | 692 | (void)({ \ |
693 | typeof(fifo + 1) __tmp = (fifo); \ | 693 | typeof((fifo) + 1) __tmp = (fifo); \ |
694 | unsigned int __len = (len); \ | 694 | unsigned int __len = (len); \ |
695 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 695 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 696 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -717,7 +717,7 @@ __kfifo_must_check_helper( \ | |||
717 | */ | 717 | */ |
718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ | 718 | #define kfifo_dma_out_prepare(fifo, sgl, nents, len) \ |
719 | ({ \ | 719 | ({ \ |
720 | typeof(fifo + 1) __tmp = (fifo); \ | 720 | typeof((fifo) + 1) __tmp = (fifo); \ |
721 | struct scatterlist *__sgl = (sgl); \ | 721 | struct scatterlist *__sgl = (sgl); \ |
722 | int __nents = (nents); \ | 722 | int __nents = (nents); \ |
723 | unsigned int __len = (len); \ | 723 | unsigned int __len = (len); \ |
@@ -741,7 +741,7 @@ __kfifo_must_check_helper( \ | |||
741 | */ | 741 | */ |
742 | #define kfifo_dma_out_finish(fifo, len) \ | 742 | #define kfifo_dma_out_finish(fifo, len) \ |
743 | (void)({ \ | 743 | (void)({ \ |
744 | typeof(fifo + 1) __tmp = (fifo); \ | 744 | typeof((fifo) + 1) __tmp = (fifo); \ |
745 | unsigned int __len = (len); \ | 745 | unsigned int __len = (len); \ |
746 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 746 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 747 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -766,8 +766,8 @@ __kfifo_must_check_helper( \ | |||
766 | #define kfifo_out_peek(fifo, buf, n) \ | 766 | #define kfifo_out_peek(fifo, buf, n) \ |
767 | __kfifo_must_check_helper( \ | 767 | __kfifo_must_check_helper( \ |
768 | ({ \ | 768 | ({ \ |
769 | typeof(fifo + 1) __tmp = (fifo); \ | 769 | typeof((fifo) + 1) __tmp = (fifo); \ |
770 | typeof(buf + 1) __buf = (buf); \ | 770 | typeof((buf) + 1) __buf = (buf); \ |
771 | unsigned long __n = (n); \ | 771 | unsigned long __n = (n); \ |
772 | const size_t __recsize = sizeof(*__tmp->rectype); \ | 772 | const size_t __recsize = sizeof(*__tmp->rectype); \ |
773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ | 773 | struct __kfifo *__kfifo = &__tmp->kfifo; \ |
@@ -836,6 +836,8 @@ extern void __kfifo_dma_out_finish_r(struct __kfifo *fifo, size_t recsize); | |||
836 | 836 | ||
837 | extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); | 837 | extern unsigned int __kfifo_len_r(struct __kfifo *fifo, size_t recsize); |
838 | 838 | ||
839 | extern void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize); | ||
840 | |||
839 | extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, | 841 | extern unsigned int __kfifo_out_peek_r(struct __kfifo *fifo, |
840 | void *buf, unsigned int len, size_t recsize); | 842 | void *buf, unsigned int len, size_t recsize); |
841 | 843 | ||
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index cf343a852534..8f6d12151048 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
23 | #include <linux/spinlock.h> | 23 | #include <linux/spinlock.h> |
24 | #include <linux/kref.h> | 24 | #include <linux/kref.h> |
25 | #include <linux/kobject_ns.h> | ||
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/wait.h> | 27 | #include <linux/wait.h> |
27 | #include <asm/atomic.h> | 28 | #include <asm/atomic.h> |
@@ -136,42 +137,8 @@ struct kobj_attribute { | |||
136 | 137 | ||
137 | extern const struct sysfs_ops kobj_sysfs_ops; | 138 | extern const struct sysfs_ops kobj_sysfs_ops; |
138 | 139 | ||
139 | /* | ||
140 | * Namespace types which are used to tag kobjects and sysfs entries. | ||
141 | * Network namespace will likely be the first. | ||
142 | */ | ||
143 | enum kobj_ns_type { | ||
144 | KOBJ_NS_TYPE_NONE = 0, | ||
145 | KOBJ_NS_TYPE_NET, | ||
146 | KOBJ_NS_TYPES | ||
147 | }; | ||
148 | |||
149 | struct sock; | 140 | struct sock; |
150 | 141 | ||
151 | /* | ||
152 | * Callbacks so sysfs can determine namespaces | ||
153 | * @current_ns: return calling task's namespace | ||
154 | * @netlink_ns: return namespace to which a sock belongs (right?) | ||
155 | * @initial_ns: return the initial namespace (i.e. init_net_ns) | ||
156 | */ | ||
157 | struct kobj_ns_type_operations { | ||
158 | enum kobj_ns_type type; | ||
159 | const void *(*current_ns)(void); | ||
160 | const void *(*netlink_ns)(struct sock *sk); | ||
161 | const void *(*initial_ns)(void); | ||
162 | }; | ||
163 | |||
164 | int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); | ||
165 | int kobj_ns_type_registered(enum kobj_ns_type type); | ||
166 | const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); | ||
167 | const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); | ||
168 | |||
169 | const void *kobj_ns_current(enum kobj_ns_type type); | ||
170 | const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); | ||
171 | const void *kobj_ns_initial(enum kobj_ns_type type); | ||
172 | void kobj_ns_exit(enum kobj_ns_type type, const void *ns); | ||
173 | |||
174 | |||
175 | /** | 142 | /** |
176 | * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. | 143 | * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem. |
177 | * | 144 | * |
@@ -224,6 +191,8 @@ static inline struct kobj_type *get_ktype(struct kobject *kobj) | |||
224 | } | 191 | } |
225 | 192 | ||
226 | extern struct kobject *kset_find_obj(struct kset *, const char *); | 193 | extern struct kobject *kset_find_obj(struct kset *, const char *); |
194 | extern struct kobject *kset_find_obj_hinted(struct kset *, const char *, | ||
195 | struct kobject *); | ||
227 | 196 | ||
228 | /* The global /sys/kernel/ kobject for people to chain off of */ | 197 | /* The global /sys/kernel/ kobject for people to chain off of */ |
229 | extern struct kobject *kernel_kobj; | 198 | extern struct kobject *kernel_kobj; |
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h new file mode 100644 index 000000000000..82cb5bf461fb --- /dev/null +++ b/include/linux/kobject_ns.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* Kernel object name space definitions | ||
2 | * | ||
3 | * Copyright (c) 2002-2003 Patrick Mochel | ||
4 | * Copyright (c) 2002-2003 Open Source Development Labs | ||
5 | * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> | ||
6 | * Copyright (c) 2006-2008 Novell Inc. | ||
7 | * | ||
8 | * Split from kobject.h by David Howells (dhowells@redhat.com) | ||
9 | * | ||
10 | * This file is released under the GPLv2. | ||
11 | * | ||
12 | * Please read Documentation/kobject.txt before using the kobject | ||
13 | * interface, ESPECIALLY the parts about reference counts and object | ||
14 | * destructors. | ||
15 | */ | ||
16 | |||
17 | #ifndef _LINUX_KOBJECT_NS_H | ||
18 | #define _LINUX_KOBJECT_NS_H | ||
19 | |||
20 | struct sock; | ||
21 | struct kobject; | ||
22 | |||
23 | /* | ||
24 | * Namespace types which are used to tag kobjects and sysfs entries. | ||
25 | * Network namespace will likely be the first. | ||
26 | */ | ||
27 | enum kobj_ns_type { | ||
28 | KOBJ_NS_TYPE_NONE = 0, | ||
29 | KOBJ_NS_TYPE_NET, | ||
30 | KOBJ_NS_TYPES | ||
31 | }; | ||
32 | |||
33 | /* | ||
34 | * Callbacks so sysfs can determine namespaces | ||
35 | * @current_ns: return calling task's namespace | ||
36 | * @netlink_ns: return namespace to which a sock belongs (right?) | ||
37 | * @initial_ns: return the initial namespace (i.e. init_net_ns) | ||
38 | */ | ||
39 | struct kobj_ns_type_operations { | ||
40 | enum kobj_ns_type type; | ||
41 | const void *(*current_ns)(void); | ||
42 | const void *(*netlink_ns)(struct sock *sk); | ||
43 | const void *(*initial_ns)(void); | ||
44 | }; | ||
45 | |||
46 | int kobj_ns_type_register(const struct kobj_ns_type_operations *ops); | ||
47 | int kobj_ns_type_registered(enum kobj_ns_type type); | ||
48 | const struct kobj_ns_type_operations *kobj_child_ns_ops(struct kobject *parent); | ||
49 | const struct kobj_ns_type_operations *kobj_ns_ops(struct kobject *kobj); | ||
50 | |||
51 | const void *kobj_ns_current(enum kobj_ns_type type); | ||
52 | const void *kobj_ns_netlink(enum kobj_ns_type type, struct sock *sk); | ||
53 | const void *kobj_ns_initial(enum kobj_ns_type type); | ||
54 | void kobj_ns_exit(enum kobj_ns_type type, const void *ns); | ||
55 | |||
56 | #endif /* _LINUX_KOBJECT_NS_H */ | ||
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 74d691ee9121..3319a6967626 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
@@ -16,6 +16,9 @@ | |||
16 | struct stable_node; | 16 | struct stable_node; |
17 | struct mem_cgroup; | 17 | struct mem_cgroup; |
18 | 18 | ||
19 | struct page *ksm_does_need_to_copy(struct page *page, | ||
20 | struct vm_area_struct *vma, unsigned long address); | ||
21 | |||
19 | #ifdef CONFIG_KSM | 22 | #ifdef CONFIG_KSM |
20 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | 23 | int ksm_madvise(struct vm_area_struct *vma, unsigned long start, |
21 | unsigned long end, int advice, unsigned long *vm_flags); | 24 | unsigned long end, int advice, unsigned long *vm_flags); |
@@ -70,19 +73,14 @@ static inline void set_page_stable_node(struct page *page, | |||
70 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, | 73 | * We'd like to make this conditional on vma->vm_flags & VM_MERGEABLE, |
71 | * but what if the vma was unmerged while the page was swapped out? | 74 | * but what if the vma was unmerged while the page was swapped out? |
72 | */ | 75 | */ |
73 | struct page *ksm_does_need_to_copy(struct page *page, | 76 | static inline int ksm_might_need_to_copy(struct page *page, |
74 | struct vm_area_struct *vma, unsigned long address); | ||
75 | static inline struct page *ksm_might_need_to_copy(struct page *page, | ||
76 | struct vm_area_struct *vma, unsigned long address) | 77 | struct vm_area_struct *vma, unsigned long address) |
77 | { | 78 | { |
78 | struct anon_vma *anon_vma = page_anon_vma(page); | 79 | struct anon_vma *anon_vma = page_anon_vma(page); |
79 | 80 | ||
80 | if (!anon_vma || | 81 | return anon_vma && |
81 | (anon_vma->root == vma->anon_vma->root && | 82 | (anon_vma->root != vma->anon_vma->root || |
82 | page->index == linear_page_index(vma, address))) | 83 | page->index != linear_page_index(vma, address)); |
83 | return page; | ||
84 | |||
85 | return ksm_does_need_to_copy(page, vma, address); | ||
86 | } | 84 | } |
87 | 85 | ||
88 | int page_referenced_ksm(struct page *page, | 86 | int page_referenced_ksm(struct page *page, |
@@ -115,10 +113,10 @@ static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start, | |||
115 | return 0; | 113 | return 0; |
116 | } | 114 | } |
117 | 115 | ||
118 | static inline struct page *ksm_might_need_to_copy(struct page *page, | 116 | static inline int ksm_might_need_to_copy(struct page *page, |
119 | struct vm_area_struct *vma, unsigned long address) | 117 | struct vm_area_struct *vma, unsigned long address) |
120 | { | 118 | { |
121 | return page; | 119 | return 0; |
122 | } | 120 | } |
123 | 121 | ||
124 | static inline int page_referenced_ksm(struct page *page, | 122 | static inline int page_referenced_ksm(struct page *page, |
diff --git a/include/linux/kvm.h b/include/linux/kvm.h index 636fc381c897..919ae53adc5c 100644 --- a/include/linux/kvm.h +++ b/include/linux/kvm.h | |||
@@ -414,6 +414,14 @@ struct kvm_enable_cap { | |||
414 | __u8 pad[64]; | 414 | __u8 pad[64]; |
415 | }; | 415 | }; |
416 | 416 | ||
417 | /* for KVM_PPC_GET_PVINFO */ | ||
418 | struct kvm_ppc_pvinfo { | ||
419 | /* out */ | ||
420 | __u32 flags; | ||
421 | __u32 hcall[4]; | ||
422 | __u8 pad[108]; | ||
423 | }; | ||
424 | |||
417 | #define KVMIO 0xAE | 425 | #define KVMIO 0xAE |
418 | 426 | ||
419 | /* | 427 | /* |
@@ -530,6 +538,8 @@ struct kvm_enable_cap { | |||
530 | #ifdef __KVM_HAVE_XCRS | 538 | #ifdef __KVM_HAVE_XCRS |
531 | #define KVM_CAP_XCRS 56 | 539 | #define KVM_CAP_XCRS 56 |
532 | #endif | 540 | #endif |
541 | #define KVM_CAP_PPC_GET_PVINFO 57 | ||
542 | #define KVM_CAP_PPC_IRQ_LEVEL 58 | ||
533 | 543 | ||
534 | #ifdef KVM_CAP_IRQ_ROUTING | 544 | #ifdef KVM_CAP_IRQ_ROUTING |
535 | 545 | ||
@@ -664,6 +674,8 @@ struct kvm_clock_data { | |||
664 | /* Available with KVM_CAP_PIT_STATE2 */ | 674 | /* Available with KVM_CAP_PIT_STATE2 */ |
665 | #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) | 675 | #define KVM_GET_PIT2 _IOR(KVMIO, 0x9f, struct kvm_pit_state2) |
666 | #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) | 676 | #define KVM_SET_PIT2 _IOW(KVMIO, 0xa0, struct kvm_pit_state2) |
677 | /* Available with KVM_CAP_PPC_GET_PVINFO */ | ||
678 | #define KVM_PPC_GET_PVINFO _IOW(KVMIO, 0xa1, struct kvm_ppc_pvinfo) | ||
667 | 679 | ||
668 | /* | 680 | /* |
669 | * ioctls for vcpu fds | 681 | * ioctls for vcpu fds |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c13cc48697aa..a0557422715e 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -36,9 +36,10 @@ | |||
36 | #define KVM_REQ_PENDING_TIMER 5 | 36 | #define KVM_REQ_PENDING_TIMER 5 |
37 | #define KVM_REQ_UNHALT 6 | 37 | #define KVM_REQ_UNHALT 6 |
38 | #define KVM_REQ_MMU_SYNC 7 | 38 | #define KVM_REQ_MMU_SYNC 7 |
39 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 39 | #define KVM_REQ_CLOCK_UPDATE 8 |
40 | #define KVM_REQ_KICK 9 | 40 | #define KVM_REQ_KICK 9 |
41 | #define KVM_REQ_DEACTIVATE_FPU 10 | 41 | #define KVM_REQ_DEACTIVATE_FPU 10 |
42 | #define KVM_REQ_EVENT 11 | ||
42 | 43 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 44 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 45 | ||
@@ -205,7 +206,7 @@ struct kvm { | |||
205 | 206 | ||
206 | struct mutex irq_lock; | 207 | struct mutex irq_lock; |
207 | #ifdef CONFIG_HAVE_KVM_IRQCHIP | 208 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
208 | struct kvm_irq_routing_table *irq_routing; | 209 | struct kvm_irq_routing_table __rcu *irq_routing; |
209 | struct hlist_head mask_notifier_list; | 210 | struct hlist_head mask_notifier_list; |
210 | struct hlist_head irq_ack_notifier_list; | 211 | struct hlist_head irq_ack_notifier_list; |
211 | #endif | 212 | #endif |
@@ -289,6 +290,9 @@ void kvm_arch_commit_memory_region(struct kvm *kvm, | |||
289 | void kvm_disable_largepages(void); | 290 | void kvm_disable_largepages(void); |
290 | void kvm_arch_flush_shadow(struct kvm *kvm); | 291 | void kvm_arch_flush_shadow(struct kvm *kvm); |
291 | 292 | ||
293 | int gfn_to_page_many_atomic(struct kvm *kvm, gfn_t gfn, struct page **pages, | ||
294 | int nr_pages); | ||
295 | |||
292 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); | 296 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
293 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); | 297 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
294 | void kvm_release_page_clean(struct page *page); | 298 | void kvm_release_page_clean(struct page *page); |
@@ -296,6 +300,8 @@ void kvm_release_page_dirty(struct page *page); | |||
296 | void kvm_set_page_dirty(struct page *page); | 300 | void kvm_set_page_dirty(struct page *page); |
297 | void kvm_set_page_accessed(struct page *page); | 301 | void kvm_set_page_accessed(struct page *page); |
298 | 302 | ||
303 | pfn_t hva_to_pfn_atomic(struct kvm *kvm, unsigned long addr); | ||
304 | pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); | ||
299 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); | 305 | pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
300 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, | 306 | pfn_t gfn_to_pfn_memslot(struct kvm *kvm, |
301 | struct kvm_memory_slot *slot, gfn_t gfn); | 307 | struct kvm_memory_slot *slot, gfn_t gfn); |
@@ -477,8 +483,7 @@ int kvm_deassign_device(struct kvm *kvm, | |||
477 | struct kvm_assigned_dev_kernel *assigned_dev); | 483 | struct kvm_assigned_dev_kernel *assigned_dev); |
478 | #else /* CONFIG_IOMMU_API */ | 484 | #else /* CONFIG_IOMMU_API */ |
479 | static inline int kvm_iommu_map_pages(struct kvm *kvm, | 485 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
480 | gfn_t base_gfn, | 486 | struct kvm_memory_slot *slot) |
481 | unsigned long npages) | ||
482 | { | 487 | { |
483 | return 0; | 488 | return 0; |
484 | } | 489 | } |
@@ -518,11 +523,22 @@ static inline void kvm_guest_exit(void) | |||
518 | current->flags &= ~PF_VCPU; | 523 | current->flags &= ~PF_VCPU; |
519 | } | 524 | } |
520 | 525 | ||
526 | static inline unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, | ||
527 | gfn_t gfn) | ||
528 | { | ||
529 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; | ||
530 | } | ||
531 | |||
521 | static inline gpa_t gfn_to_gpa(gfn_t gfn) | 532 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
522 | { | 533 | { |
523 | return (gpa_t)gfn << PAGE_SHIFT; | 534 | return (gpa_t)gfn << PAGE_SHIFT; |
524 | } | 535 | } |
525 | 536 | ||
537 | static inline gfn_t gpa_to_gfn(gpa_t gpa) | ||
538 | { | ||
539 | return (gfn_t)(gpa >> PAGE_SHIFT); | ||
540 | } | ||
541 | |||
526 | static inline hpa_t pfn_to_hpa(pfn_t pfn) | 542 | static inline hpa_t pfn_to_hpa(pfn_t pfn) |
527 | { | 543 | { |
528 | return (hpa_t)pfn << PAGE_SHIFT; | 544 | return (hpa_t)pfn << PAGE_SHIFT; |
diff --git a/include/linux/kvm_para.h b/include/linux/kvm_para.h index d73109243fda..47a070b0520e 100644 --- a/include/linux/kvm_para.h +++ b/include/linux/kvm_para.h | |||
@@ -17,6 +17,8 @@ | |||
17 | 17 | ||
18 | #define KVM_HC_VAPIC_POLL_IRQ 1 | 18 | #define KVM_HC_VAPIC_POLL_IRQ 1 |
19 | #define KVM_HC_MMU_OP 2 | 19 | #define KVM_HC_MMU_OP 2 |
20 | #define KVM_HC_FEATURES 3 | ||
21 | #define KVM_HC_PPC_MAP_MAGIC_PAGE 4 | ||
20 | 22 | ||
21 | /* | 23 | /* |
22 | * hypercalls use architecture specific | 24 | * hypercalls use architecture specific |
@@ -24,11 +26,6 @@ | |||
24 | #include <asm/kvm_para.h> | 26 | #include <asm/kvm_para.h> |
25 | 27 | ||
26 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
27 | #ifdef CONFIG_KVM_GUEST | ||
28 | void __init kvm_guest_init(void); | ||
29 | #else | ||
30 | #define kvm_guest_init() do { } while (0) | ||
31 | #endif | ||
32 | 29 | ||
33 | static inline int kvm_para_has_feature(unsigned int feature) | 30 | static inline int kvm_para_has_feature(unsigned int feature) |
34 | { | 31 | { |
diff --git a/include/linux/lglock.h b/include/linux/lglock.h new file mode 100644 index 000000000000..f549056fb20b --- /dev/null +++ b/include/linux/lglock.h | |||
@@ -0,0 +1,172 @@ | |||
1 | /* | ||
2 | * Specialised local-global spinlock. Can only be declared as global variables | ||
3 | * to avoid overhead and keep things simple (and we don't want to start using | ||
4 | * these inside dynamically allocated structures). | ||
5 | * | ||
6 | * "local/global locks" (lglocks) can be used to: | ||
7 | * | ||
8 | * - Provide fast exclusive access to per-CPU data, with exclusive access to | ||
9 | * another CPU's data allowed but possibly subject to contention, and to | ||
10 | * provide very slow exclusive access to all per-CPU data. | ||
11 | * - Or to provide very fast and scalable read serialisation, and to provide | ||
12 | * very slow exclusive serialisation of data (not necessarily per-CPU data). | ||
13 | * | ||
14 | * Brlocks are also implemented as a short-hand notation for the latter use | ||
15 | * case. | ||
16 | * | ||
17 | * Copyright 2009, 2010, Nick Piggin, Novell Inc. | ||
18 | */ | ||
19 | #ifndef __LINUX_LGLOCK_H | ||
20 | #define __LINUX_LGLOCK_H | ||
21 | |||
22 | #include <linux/spinlock.h> | ||
23 | #include <linux/lockdep.h> | ||
24 | #include <linux/percpu.h> | ||
25 | |||
26 | /* can make br locks by using local lock for read side, global lock for write */ | ||
27 | #define br_lock_init(name) name##_lock_init() | ||
28 | #define br_read_lock(name) name##_local_lock() | ||
29 | #define br_read_unlock(name) name##_local_unlock() | ||
30 | #define br_write_lock(name) name##_global_lock_online() | ||
31 | #define br_write_unlock(name) name##_global_unlock_online() | ||
32 | |||
33 | #define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name) | ||
34 | #define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) | ||
35 | |||
36 | |||
37 | #define lg_lock_init(name) name##_lock_init() | ||
38 | #define lg_local_lock(name) name##_local_lock() | ||
39 | #define lg_local_unlock(name) name##_local_unlock() | ||
40 | #define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu) | ||
41 | #define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu) | ||
42 | #define lg_global_lock(name) name##_global_lock() | ||
43 | #define lg_global_unlock(name) name##_global_unlock() | ||
44 | #define lg_global_lock_online(name) name##_global_lock_online() | ||
45 | #define lg_global_unlock_online(name) name##_global_unlock_online() | ||
46 | |||
47 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
48 | #define LOCKDEP_INIT_MAP lockdep_init_map | ||
49 | |||
50 | #define DEFINE_LGLOCK_LOCKDEP(name) \ | ||
51 | struct lock_class_key name##_lock_key; \ | ||
52 | struct lockdep_map name##_lock_dep_map; \ | ||
53 | EXPORT_SYMBOL(name##_lock_dep_map) | ||
54 | |||
55 | #else | ||
56 | #define LOCKDEP_INIT_MAP(a, b, c, d) | ||
57 | |||
58 | #define DEFINE_LGLOCK_LOCKDEP(name) | ||
59 | #endif | ||
60 | |||
61 | |||
62 | #define DECLARE_LGLOCK(name) \ | ||
63 | extern void name##_lock_init(void); \ | ||
64 | extern void name##_local_lock(void); \ | ||
65 | extern void name##_local_unlock(void); \ | ||
66 | extern void name##_local_lock_cpu(int cpu); \ | ||
67 | extern void name##_local_unlock_cpu(int cpu); \ | ||
68 | extern void name##_global_lock(void); \ | ||
69 | extern void name##_global_unlock(void); \ | ||
70 | extern void name##_global_lock_online(void); \ | ||
71 | extern void name##_global_unlock_online(void); \ | ||
72 | |||
73 | #define DEFINE_LGLOCK(name) \ | ||
74 | \ | ||
75 | DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ | ||
76 | DEFINE_LGLOCK_LOCKDEP(name); \ | ||
77 | \ | ||
78 | void name##_lock_init(void) { \ | ||
79 | int i; \ | ||
80 | LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \ | ||
81 | for_each_possible_cpu(i) { \ | ||
82 | arch_spinlock_t *lock; \ | ||
83 | lock = &per_cpu(name##_lock, i); \ | ||
84 | *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \ | ||
85 | } \ | ||
86 | } \ | ||
87 | EXPORT_SYMBOL(name##_lock_init); \ | ||
88 | \ | ||
89 | void name##_local_lock(void) { \ | ||
90 | arch_spinlock_t *lock; \ | ||
91 | preempt_disable(); \ | ||
92 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
93 | lock = &__get_cpu_var(name##_lock); \ | ||
94 | arch_spin_lock(lock); \ | ||
95 | } \ | ||
96 | EXPORT_SYMBOL(name##_local_lock); \ | ||
97 | \ | ||
98 | void name##_local_unlock(void) { \ | ||
99 | arch_spinlock_t *lock; \ | ||
100 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
101 | lock = &__get_cpu_var(name##_lock); \ | ||
102 | arch_spin_unlock(lock); \ | ||
103 | preempt_enable(); \ | ||
104 | } \ | ||
105 | EXPORT_SYMBOL(name##_local_unlock); \ | ||
106 | \ | ||
107 | void name##_local_lock_cpu(int cpu) { \ | ||
108 | arch_spinlock_t *lock; \ | ||
109 | preempt_disable(); \ | ||
110 | rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \ | ||
111 | lock = &per_cpu(name##_lock, cpu); \ | ||
112 | arch_spin_lock(lock); \ | ||
113 | } \ | ||
114 | EXPORT_SYMBOL(name##_local_lock_cpu); \ | ||
115 | \ | ||
116 | void name##_local_unlock_cpu(int cpu) { \ | ||
117 | arch_spinlock_t *lock; \ | ||
118 | rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \ | ||
119 | lock = &per_cpu(name##_lock, cpu); \ | ||
120 | arch_spin_unlock(lock); \ | ||
121 | preempt_enable(); \ | ||
122 | } \ | ||
123 | EXPORT_SYMBOL(name##_local_unlock_cpu); \ | ||
124 | \ | ||
125 | void name##_global_lock_online(void) { \ | ||
126 | int i; \ | ||
127 | preempt_disable(); \ | ||
128 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
129 | for_each_online_cpu(i) { \ | ||
130 | arch_spinlock_t *lock; \ | ||
131 | lock = &per_cpu(name##_lock, i); \ | ||
132 | arch_spin_lock(lock); \ | ||
133 | } \ | ||
134 | } \ | ||
135 | EXPORT_SYMBOL(name##_global_lock_online); \ | ||
136 | \ | ||
137 | void name##_global_unlock_online(void) { \ | ||
138 | int i; \ | ||
139 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
140 | for_each_online_cpu(i) { \ | ||
141 | arch_spinlock_t *lock; \ | ||
142 | lock = &per_cpu(name##_lock, i); \ | ||
143 | arch_spin_unlock(lock); \ | ||
144 | } \ | ||
145 | preempt_enable(); \ | ||
146 | } \ | ||
147 | EXPORT_SYMBOL(name##_global_unlock_online); \ | ||
148 | \ | ||
149 | void name##_global_lock(void) { \ | ||
150 | int i; \ | ||
151 | preempt_disable(); \ | ||
152 | rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \ | ||
153 | for_each_possible_cpu(i) { \ | ||
154 | arch_spinlock_t *lock; \ | ||
155 | lock = &per_cpu(name##_lock, i); \ | ||
156 | arch_spin_lock(lock); \ | ||
157 | } \ | ||
158 | } \ | ||
159 | EXPORT_SYMBOL(name##_global_lock); \ | ||
160 | \ | ||
161 | void name##_global_unlock(void) { \ | ||
162 | int i; \ | ||
163 | rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \ | ||
164 | for_each_possible_cpu(i) { \ | ||
165 | arch_spinlock_t *lock; \ | ||
166 | lock = &per_cpu(name##_lock, i); \ | ||
167 | arch_spin_unlock(lock); \ | ||
168 | } \ | ||
169 | preempt_enable(); \ | ||
170 | } \ | ||
171 | EXPORT_SYMBOL(name##_global_unlock); | ||
172 | #endif | ||
diff --git a/include/linux/libata.h b/include/linux/libata.h index f010f18a0f86..15b77b8dc7e1 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <scsi/scsi_host.h> | 37 | #include <scsi/scsi_host.h> |
38 | #include <linux/acpi.h> | 38 | #include <linux/acpi.h> |
39 | #include <linux/cdrom.h> | 39 | #include <linux/cdrom.h> |
40 | #include <linux/sched.h> | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * Define if arch has non-standard setup. This is a _PCI_ standard | 43 | * Define if arch has non-standard setup. This is a _PCI_ standard |
@@ -172,6 +173,7 @@ enum { | |||
172 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ | 173 | ATA_LFLAG_NO_RETRY = (1 << 5), /* don't retry this link */ |
173 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ | 174 | ATA_LFLAG_DISABLED = (1 << 6), /* link is disabled */ |
174 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ | 175 | ATA_LFLAG_SW_ACTIVITY = (1 << 7), /* keep activity stats */ |
176 | ATA_LFLAG_NO_LPM = (1 << 8), /* disable LPM on this link */ | ||
175 | 177 | ||
176 | /* struct ata_port flags */ | 178 | /* struct ata_port flags */ |
177 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ | 179 | ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */ |
@@ -196,7 +198,7 @@ enum { | |||
196 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ | 198 | ATA_FLAG_ACPI_SATA = (1 << 17), /* need native SATA ACPI layout */ |
197 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ | 199 | ATA_FLAG_AN = (1 << 18), /* controller supports AN */ |
198 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ | 200 | ATA_FLAG_PMP = (1 << 19), /* controller supports PMP */ |
199 | ATA_FLAG_IPM = (1 << 20), /* driver can handle IPM */ | 201 | ATA_FLAG_LPM = (1 << 20), /* driver can handle LPM */ |
200 | ATA_FLAG_EM = (1 << 21), /* driver supports enclosure | 202 | ATA_FLAG_EM = (1 << 21), /* driver supports enclosure |
201 | * management */ | 203 | * management */ |
202 | ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity | 204 | ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity |
@@ -324,23 +326,23 @@ enum { | |||
324 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ | 326 | ATA_EH_HARDRESET = (1 << 2), /* meaningful only in ->prereset */ |
325 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, | 327 | ATA_EH_RESET = ATA_EH_SOFTRESET | ATA_EH_HARDRESET, |
326 | ATA_EH_ENABLE_LINK = (1 << 3), | 328 | ATA_EH_ENABLE_LINK = (1 << 3), |
327 | ATA_EH_LPM = (1 << 4), /* link power management action */ | ||
328 | ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ | 329 | ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */ |
329 | 330 | ||
330 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, | 331 | ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK, |
331 | ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | | 332 | ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET | |
332 | ATA_EH_ENABLE_LINK | ATA_EH_LPM, | 333 | ATA_EH_ENABLE_LINK, |
333 | 334 | ||
334 | /* ata_eh_info->flags */ | 335 | /* ata_eh_info->flags */ |
335 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ | 336 | ATA_EHI_HOTPLUGGED = (1 << 0), /* could have been hotplugged */ |
336 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ | 337 | ATA_EHI_NO_AUTOPSY = (1 << 2), /* no autopsy */ |
337 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ | 338 | ATA_EHI_QUIET = (1 << 3), /* be quiet */ |
339 | ATA_EHI_NO_RECOVERY = (1 << 4), /* no recovery */ | ||
338 | 340 | ||
339 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ | 341 | ATA_EHI_DID_SOFTRESET = (1 << 16), /* already soft-reset this port */ |
340 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ | 342 | ATA_EHI_DID_HARDRESET = (1 << 17), /* already soft-reset this port */ |
341 | ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ | 343 | ATA_EHI_PRINTINFO = (1 << 18), /* print configuration info */ |
342 | ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ | 344 | ATA_EHI_SETMODE = (1 << 19), /* configure transfer mode */ |
343 | ATA_EHI_POST_SETMODE = (1 << 20), /* revaildating after setmode */ | 345 | ATA_EHI_POST_SETMODE = (1 << 20), /* revalidating after setmode */ |
344 | 346 | ||
345 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, | 347 | ATA_EHI_DID_RESET = ATA_EHI_DID_SOFTRESET | ATA_EHI_DID_HARDRESET, |
346 | 348 | ||
@@ -376,7 +378,6 @@ enum { | |||
376 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ | 378 | ATA_HORKAGE_BROKEN_HPA = (1 << 4), /* Broken HPA */ |
377 | ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ | 379 | ATA_HORKAGE_DISABLE = (1 << 5), /* Disable it */ |
378 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ | 380 | ATA_HORKAGE_HPA_SIZE = (1 << 6), /* native size off by one */ |
379 | ATA_HORKAGE_IPM = (1 << 7), /* Link PM problems */ | ||
380 | ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ | 381 | ATA_HORKAGE_IVB = (1 << 8), /* cbl det validity bit bugs */ |
381 | ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ | 382 | ATA_HORKAGE_STUCK_ERR = (1 << 9), /* stuck ERR on next PACKET */ |
382 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ | 383 | ATA_HORKAGE_BRIDGE_OK = (1 << 10), /* no bridge limits */ |
@@ -463,6 +464,22 @@ enum ata_completion_errors { | |||
463 | AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ | 464 | AC_ERR_NCQ = (1 << 10), /* marker for offending NCQ qc */ |
464 | }; | 465 | }; |
465 | 466 | ||
467 | /* | ||
468 | * Link power management policy: If you alter this, you also need to | ||
469 | * alter libata-scsi.c (for the ascii descriptions) | ||
470 | */ | ||
471 | enum ata_lpm_policy { | ||
472 | ATA_LPM_UNKNOWN, | ||
473 | ATA_LPM_MAX_POWER, | ||
474 | ATA_LPM_MED_POWER, | ||
475 | ATA_LPM_MIN_POWER, | ||
476 | }; | ||
477 | |||
478 | enum ata_lpm_hints { | ||
479 | ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */ | ||
480 | ATA_LPM_HIPM = (1 << 1), /* may use HIPM */ | ||
481 | }; | ||
482 | |||
466 | /* forward declarations */ | 483 | /* forward declarations */ |
467 | struct scsi_device; | 484 | struct scsi_device; |
468 | struct ata_port_operations; | 485 | struct ata_port_operations; |
@@ -477,16 +494,6 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes, | |||
477 | unsigned long deadline); | 494 | unsigned long deadline); |
478 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); | 495 | typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes); |
479 | 496 | ||
480 | /* | ||
481 | * host pm policy: If you alter this, you also need to alter libata-scsi.c | ||
482 | * (for the ascii descriptions) | ||
483 | */ | ||
484 | enum link_pm { | ||
485 | NOT_AVAILABLE, | ||
486 | MIN_POWER, | ||
487 | MAX_PERFORMANCE, | ||
488 | MEDIUM_POWER, | ||
489 | }; | ||
490 | extern struct device_attribute dev_attr_link_power_management_policy; | 497 | extern struct device_attribute dev_attr_link_power_management_policy; |
491 | extern struct device_attribute dev_attr_unload_heads; | 498 | extern struct device_attribute dev_attr_unload_heads; |
492 | extern struct device_attribute dev_attr_em_message_type; | 499 | extern struct device_attribute dev_attr_em_message_type; |
@@ -529,6 +536,10 @@ struct ata_host { | |||
529 | void *private_data; | 536 | void *private_data; |
530 | struct ata_port_operations *ops; | 537 | struct ata_port_operations *ops; |
531 | unsigned long flags; | 538 | unsigned long flags; |
539 | |||
540 | struct mutex eh_mutex; | ||
541 | struct task_struct *eh_owner; | ||
542 | |||
532 | #ifdef CONFIG_ATA_ACPI | 543 | #ifdef CONFIG_ATA_ACPI |
533 | acpi_handle acpi_handle; | 544 | acpi_handle acpi_handle; |
534 | #endif | 545 | #endif |
@@ -559,13 +570,13 @@ struct ata_queued_cmd { | |||
559 | unsigned int extrabytes; | 570 | unsigned int extrabytes; |
560 | unsigned int curbytes; | 571 | unsigned int curbytes; |
561 | 572 | ||
562 | struct scatterlist *cursg; | ||
563 | unsigned int cursg_ofs; | ||
564 | |||
565 | struct scatterlist sgent; | 573 | struct scatterlist sgent; |
566 | 574 | ||
567 | struct scatterlist *sg; | 575 | struct scatterlist *sg; |
568 | 576 | ||
577 | struct scatterlist *cursg; | ||
578 | unsigned int cursg_ofs; | ||
579 | |||
569 | unsigned int err_mask; | 580 | unsigned int err_mask; |
570 | struct ata_taskfile result_tf; | 581 | struct ata_taskfile result_tf; |
571 | ata_qc_cb_t complete_fn; | 582 | ata_qc_cb_t complete_fn; |
@@ -603,6 +614,7 @@ struct ata_device { | |||
603 | union acpi_object *gtf_cache; | 614 | union acpi_object *gtf_cache; |
604 | unsigned int gtf_filter; | 615 | unsigned int gtf_filter; |
605 | #endif | 616 | #endif |
617 | struct device tdev; | ||
606 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ | 618 | /* n_sector is CLEAR_BEGIN, read comment above CLEAR_BEGIN */ |
607 | u64 n_sectors; /* size of device, if ATA */ | 619 | u64 n_sectors; /* size of device, if ATA */ |
608 | u64 n_native_sectors; /* native size, if ATA */ | 620 | u64 n_native_sectors; /* native size, if ATA */ |
@@ -689,6 +701,7 @@ struct ata_link { | |||
689 | struct ata_port *ap; | 701 | struct ata_port *ap; |
690 | int pmp; /* port multiplier port # */ | 702 | int pmp; /* port multiplier port # */ |
691 | 703 | ||
704 | struct device tdev; | ||
692 | unsigned int active_tag; /* active tag on this link */ | 705 | unsigned int active_tag; /* active tag on this link */ |
693 | u32 sactive; /* active NCQ commands */ | 706 | u32 sactive; /* active NCQ commands */ |
694 | 707 | ||
@@ -698,6 +711,7 @@ struct ata_link { | |||
698 | unsigned int hw_sata_spd_limit; | 711 | unsigned int hw_sata_spd_limit; |
699 | unsigned int sata_spd_limit; | 712 | unsigned int sata_spd_limit; |
700 | unsigned int sata_spd; /* current SATA PHY speed */ | 713 | unsigned int sata_spd; /* current SATA PHY speed */ |
714 | enum ata_lpm_policy lpm_policy; | ||
701 | 715 | ||
702 | /* record runtime error info, protected by host_set lock */ | 716 | /* record runtime error info, protected by host_set lock */ |
703 | struct ata_eh_info eh_info; | 717 | struct ata_eh_info eh_info; |
@@ -706,6 +720,8 @@ struct ata_link { | |||
706 | 720 | ||
707 | struct ata_device device[ATA_MAX_DEVICES]; | 721 | struct ata_device device[ATA_MAX_DEVICES]; |
708 | }; | 722 | }; |
723 | #define ATA_LINK_CLEAR_BEGIN offsetof(struct ata_link, active_tag) | ||
724 | #define ATA_LINK_CLEAR_END offsetof(struct ata_link, device[0]) | ||
709 | 725 | ||
710 | struct ata_port { | 726 | struct ata_port { |
711 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ | 727 | struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ |
@@ -723,6 +739,7 @@ struct ata_port { | |||
723 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ | 739 | struct ata_ioports ioaddr; /* ATA cmd/ctl/dma register blocks */ |
724 | u8 ctl; /* cache of ATA control register */ | 740 | u8 ctl; /* cache of ATA control register */ |
725 | u8 last_ctl; /* Cache last written value */ | 741 | u8 last_ctl; /* Cache last written value */ |
742 | struct ata_link* sff_pio_task_link; /* link currently used */ | ||
726 | struct delayed_work sff_pio_task; | 743 | struct delayed_work sff_pio_task; |
727 | #ifdef CONFIG_ATA_BMDMA | 744 | #ifdef CONFIG_ATA_BMDMA |
728 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ | 745 | struct ata_bmdma_prd *bmdma_prd; /* BMDMA SG list */ |
@@ -750,6 +767,7 @@ struct ata_port { | |||
750 | struct ata_port_stats stats; | 767 | struct ata_port_stats stats; |
751 | struct ata_host *host; | 768 | struct ata_host *host; |
752 | struct device *dev; | 769 | struct device *dev; |
770 | struct device tdev; | ||
753 | 771 | ||
754 | struct mutex scsi_scan_mutex; | 772 | struct mutex scsi_scan_mutex; |
755 | struct delayed_work hotplug_task; | 773 | struct delayed_work hotplug_task; |
@@ -765,7 +783,7 @@ struct ata_port { | |||
765 | 783 | ||
766 | pm_message_t pm_mesg; | 784 | pm_message_t pm_mesg; |
767 | int *pm_result; | 785 | int *pm_result; |
768 | enum link_pm pm_policy; | 786 | enum ata_lpm_policy target_lpm_policy; |
769 | 787 | ||
770 | struct timer_list fastdrain_timer; | 788 | struct timer_list fastdrain_timer; |
771 | unsigned long fastdrain_cnt; | 789 | unsigned long fastdrain_cnt; |
@@ -831,8 +849,8 @@ struct ata_port_operations { | |||
831 | int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); | 849 | int (*scr_write)(struct ata_link *link, unsigned int sc_reg, u32 val); |
832 | void (*pmp_attach)(struct ata_port *ap); | 850 | void (*pmp_attach)(struct ata_port *ap); |
833 | void (*pmp_detach)(struct ata_port *ap); | 851 | void (*pmp_detach)(struct ata_port *ap); |
834 | int (*enable_pm)(struct ata_port *ap, enum link_pm policy); | 852 | int (*set_lpm)(struct ata_link *link, enum ata_lpm_policy policy, |
835 | void (*disable_pm)(struct ata_port *ap); | 853 | unsigned hints); |
836 | 854 | ||
837 | /* | 855 | /* |
838 | * Start, stop, suspend and resume | 856 | * Start, stop, suspend and resume |
@@ -944,6 +962,8 @@ extern int sata_link_debounce(struct ata_link *link, | |||
944 | const unsigned long *params, unsigned long deadline); | 962 | const unsigned long *params, unsigned long deadline); |
945 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, | 963 | extern int sata_link_resume(struct ata_link *link, const unsigned long *params, |
946 | unsigned long deadline); | 964 | unsigned long deadline); |
965 | extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy, | ||
966 | bool spm_wakeup); | ||
947 | extern int sata_link_hardreset(struct ata_link *link, | 967 | extern int sata_link_hardreset(struct ata_link *link, |
948 | const unsigned long *timing, unsigned long deadline, | 968 | const unsigned long *timing, unsigned long deadline, |
949 | bool *online, int (*check_ready)(struct ata_link *)); | 969 | bool *online, int (*check_ready)(struct ata_link *)); |
@@ -989,8 +1009,9 @@ extern int ata_host_suspend(struct ata_host *host, pm_message_t mesg); | |||
989 | extern void ata_host_resume(struct ata_host *host); | 1009 | extern void ata_host_resume(struct ata_host *host); |
990 | #endif | 1010 | #endif |
991 | extern int ata_ratelimit(void); | 1011 | extern int ata_ratelimit(void); |
992 | extern u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | 1012 | extern void ata_msleep(struct ata_port *ap, unsigned int msecs); |
993 | unsigned long interval, unsigned long timeout); | 1013 | extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, |
1014 | u32 val, unsigned long interval, unsigned long timeout); | ||
994 | extern int atapi_cmd_type(u8 opcode); | 1015 | extern int atapi_cmd_type(u8 opcode); |
995 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, | 1016 | extern void ata_tf_to_fis(const struct ata_taskfile *tf, |
996 | u8 pmp, int is_cmd, u8 *fis); | 1017 | u8 pmp, int is_cmd, u8 *fis); |
@@ -1594,7 +1615,7 @@ extern void ata_sff_irq_on(struct ata_port *ap); | |||
1594 | extern void ata_sff_irq_clear(struct ata_port *ap); | 1615 | extern void ata_sff_irq_clear(struct ata_port *ap); |
1595 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1616 | extern int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1596 | u8 status, int in_wq); | 1617 | u8 status, int in_wq); |
1597 | extern void ata_sff_queue_pio_task(struct ata_port *ap, unsigned long delay); | 1618 | extern void ata_sff_queue_pio_task(struct ata_link *link, unsigned long delay); |
1598 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); | 1619 | extern unsigned int ata_sff_qc_issue(struct ata_queued_cmd *qc); |
1599 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); | 1620 | extern bool ata_sff_qc_fill_rtf(struct ata_queued_cmd *qc); |
1600 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, | 1621 | extern unsigned int ata_sff_port_intr(struct ata_port *ap, |
diff --git a/include/linux/list.h b/include/linux/list.h index d167b5d7c0ac..88a000617d77 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
@@ -5,7 +5,6 @@ | |||
5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
6 | #include <linux/poison.h> | 6 | #include <linux/poison.h> |
7 | #include <linux/prefetch.h> | 7 | #include <linux/prefetch.h> |
8 | #include <asm/system.h> | ||
9 | 8 | ||
10 | /* | 9 | /* |
11 | * Simple doubly linked list implementation. | 10 | * Simple doubly linked list implementation. |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 06aed8305bf3..71c09b26c759 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -32,6 +32,17 @@ extern int lock_stat; | |||
32 | #define MAX_LOCKDEP_SUBCLASSES 8UL | 32 | #define MAX_LOCKDEP_SUBCLASSES 8UL |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * NR_LOCKDEP_CACHING_CLASSES ... Number of classes | ||
36 | * cached in the instance of lockdep_map | ||
37 | * | ||
38 | * Currently main class (subclass == 0) and signle depth subclass | ||
39 | * are cached in lockdep_map. This optimization is mainly targeting | ||
40 | * on rq->lock. double_rq_lock() acquires this highly competitive with | ||
41 | * single depth. | ||
42 | */ | ||
43 | #define NR_LOCKDEP_CACHING_CLASSES 2 | ||
44 | |||
45 | /* | ||
35 | * Lock-classes are keyed via unique addresses, by embedding the | 46 | * Lock-classes are keyed via unique addresses, by embedding the |
36 | * lockclass-key into the kernel (or module) .data section. (For | 47 | * lockclass-key into the kernel (or module) .data section. (For |
37 | * static locks we use the lock address itself as the key.) | 48 | * static locks we use the lock address itself as the key.) |
@@ -138,7 +149,7 @@ void clear_lock_stats(struct lock_class *class); | |||
138 | */ | 149 | */ |
139 | struct lockdep_map { | 150 | struct lockdep_map { |
140 | struct lock_class_key *key; | 151 | struct lock_class_key *key; |
141 | struct lock_class *class_cache; | 152 | struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES]; |
142 | const char *name; | 153 | const char *name; |
143 | #ifdef CONFIG_LOCK_STAT | 154 | #ifdef CONFIG_LOCK_STAT |
144 | int cpu; | 155 | int cpu; |
@@ -424,14 +435,6 @@ do { \ | |||
424 | 435 | ||
425 | #endif /* CONFIG_LOCKDEP */ | 436 | #endif /* CONFIG_LOCKDEP */ |
426 | 437 | ||
427 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
428 | extern void early_init_irq_lock_class(void); | ||
429 | #else | ||
430 | static inline void early_init_irq_lock_class(void) | ||
431 | { | ||
432 | } | ||
433 | #endif | ||
434 | |||
435 | #ifdef CONFIG_TRACE_IRQFLAGS | 438 | #ifdef CONFIG_TRACE_IRQFLAGS |
436 | extern void early_boot_irqs_off(void); | 439 | extern void early_boot_irqs_off(void); |
437 | extern void early_boot_irqs_on(void); | 440 | extern void early_boot_irqs_on(void); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index a59faf2b5edd..62a10c2a11f2 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _LINUX_MEMBLOCK_H | 2 | #define _LINUX_MEMBLOCK_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #ifdef CONFIG_HAVE_MEMBLOCK | ||
5 | /* | 6 | /* |
6 | * Logical memory blocks. | 7 | * Logical memory blocks. |
7 | * | 8 | * |
@@ -16,73 +17,150 @@ | |||
16 | #include <linux/init.h> | 17 | #include <linux/init.h> |
17 | #include <linux/mm.h> | 18 | #include <linux/mm.h> |
18 | 19 | ||
19 | #define MAX_MEMBLOCK_REGIONS 128 | 20 | #include <asm/memblock.h> |
20 | 21 | ||
21 | struct memblock_property { | 22 | #define INIT_MEMBLOCK_REGIONS 128 |
22 | u64 base; | 23 | #define MEMBLOCK_ERROR 0 |
23 | u64 size; | ||
24 | }; | ||
25 | 24 | ||
26 | struct memblock_region { | 25 | struct memblock_region { |
27 | unsigned long cnt; | 26 | phys_addr_t base; |
28 | u64 size; | 27 | phys_addr_t size; |
29 | struct memblock_property region[MAX_MEMBLOCK_REGIONS+1]; | 28 | }; |
29 | |||
30 | struct memblock_type { | ||
31 | unsigned long cnt; /* number of regions */ | ||
32 | unsigned long max; /* size of the allocated array */ | ||
33 | struct memblock_region *regions; | ||
30 | }; | 34 | }; |
31 | 35 | ||
32 | struct memblock { | 36 | struct memblock { |
33 | unsigned long debug; | 37 | phys_addr_t current_limit; |
34 | u64 rmo_size; | 38 | phys_addr_t memory_size; /* Updated by memblock_analyze() */ |
35 | struct memblock_region memory; | 39 | struct memblock_type memory; |
36 | struct memblock_region reserved; | 40 | struct memblock_type reserved; |
37 | }; | 41 | }; |
38 | 42 | ||
39 | extern struct memblock memblock; | 43 | extern struct memblock memblock; |
44 | extern int memblock_debug; | ||
45 | extern int memblock_can_resize; | ||
40 | 46 | ||
41 | extern void __init memblock_init(void); | 47 | #define memblock_dbg(fmt, ...) \ |
42 | extern void __init memblock_analyze(void); | 48 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
43 | extern long memblock_add(u64 base, u64 size); | 49 | |
44 | extern long memblock_remove(u64 base, u64 size); | 50 | u64 memblock_find_in_range(u64 start, u64 end, u64 size, u64 align); |
45 | extern long __init memblock_free(u64 base, u64 size); | 51 | int memblock_free_reserved_regions(void); |
46 | extern long __init memblock_reserve(u64 base, u64 size); | 52 | int memblock_reserve_reserved_regions(void); |
47 | extern u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 53 | |
48 | u64 (*nid_range)(u64, u64, int *)); | 54 | extern void memblock_init(void); |
49 | extern u64 __init memblock_alloc(u64 size, u64 align); | 55 | extern void memblock_analyze(void); |
50 | extern u64 __init memblock_alloc_base(u64 size, | 56 | extern long memblock_add(phys_addr_t base, phys_addr_t size); |
51 | u64, u64 max_addr); | 57 | extern long memblock_remove(phys_addr_t base, phys_addr_t size); |
52 | extern u64 __init __memblock_alloc_base(u64 size, | 58 | extern long memblock_free(phys_addr_t base, phys_addr_t size); |
53 | u64 align, u64 max_addr); | 59 | extern long memblock_reserve(phys_addr_t base, phys_addr_t size); |
54 | extern u64 __init memblock_phys_mem_size(void); | 60 | |
55 | extern u64 memblock_end_of_DRAM(void); | 61 | /* The numa aware allocator is only available if |
56 | extern void __init memblock_enforce_memory_limit(u64 memory_limit); | 62 | * CONFIG_ARCH_POPULATES_NODE_MAP is set |
57 | extern int __init memblock_is_reserved(u64 addr); | 63 | */ |
58 | extern int memblock_is_region_reserved(u64 base, u64 size); | 64 | extern phys_addr_t memblock_alloc_nid(phys_addr_t size, phys_addr_t align, |
59 | extern int memblock_find(struct memblock_property *res); | 65 | int nid); |
66 | extern phys_addr_t memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | ||
67 | int nid); | ||
68 | |||
69 | extern phys_addr_t memblock_alloc(phys_addr_t size, phys_addr_t align); | ||
70 | |||
71 | /* Flags for memblock_alloc_base() amd __memblock_alloc_base() */ | ||
72 | #define MEMBLOCK_ALLOC_ANYWHERE (~(phys_addr_t)0) | ||
73 | #define MEMBLOCK_ALLOC_ACCESSIBLE 0 | ||
74 | |||
75 | extern phys_addr_t memblock_alloc_base(phys_addr_t size, | ||
76 | phys_addr_t align, | ||
77 | phys_addr_t max_addr); | ||
78 | extern phys_addr_t __memblock_alloc_base(phys_addr_t size, | ||
79 | phys_addr_t align, | ||
80 | phys_addr_t max_addr); | ||
81 | extern phys_addr_t memblock_phys_mem_size(void); | ||
82 | extern phys_addr_t memblock_end_of_DRAM(void); | ||
83 | extern void memblock_enforce_memory_limit(phys_addr_t memory_limit); | ||
84 | extern int memblock_is_memory(phys_addr_t addr); | ||
85 | extern int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | ||
86 | extern int memblock_is_reserved(phys_addr_t addr); | ||
87 | extern int memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | ||
60 | 88 | ||
61 | extern void memblock_dump_all(void); | 89 | extern void memblock_dump_all(void); |
62 | 90 | ||
63 | static inline u64 | 91 | /* Provided by the architecture */ |
64 | memblock_size_bytes(struct memblock_region *type, unsigned long region_nr) | 92 | extern phys_addr_t memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid); |
93 | extern int memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, | ||
94 | phys_addr_t addr2, phys_addr_t size2); | ||
95 | |||
96 | /** | ||
97 | * memblock_set_current_limit - Set the current allocation limit to allow | ||
98 | * limiting allocations to what is currently | ||
99 | * accessible during boot | ||
100 | * @limit: New limit value (physical address) | ||
101 | */ | ||
102 | extern void memblock_set_current_limit(phys_addr_t limit); | ||
103 | |||
104 | |||
105 | /* | ||
106 | * pfn conversion functions | ||
107 | * | ||
108 | * While the memory MEMBLOCKs should always be page aligned, the reserved | ||
109 | * MEMBLOCKs may not be. This accessor attempt to provide a very clear | ||
110 | * idea of what they return for such non aligned MEMBLOCKs. | ||
111 | */ | ||
112 | |||
113 | /** | ||
114 | * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region | ||
115 | * @reg: memblock_region structure | ||
116 | */ | ||
117 | static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg) | ||
65 | { | 118 | { |
66 | return type->region[region_nr].size; | 119 | return PFN_UP(reg->base); |
67 | } | 120 | } |
68 | static inline u64 | 121 | |
69 | memblock_size_pages(struct memblock_region *type, unsigned long region_nr) | 122 | /** |
123 | * memblock_region_memory_end_pfn - Return the end_pfn this region | ||
124 | * @reg: memblock_region structure | ||
125 | */ | ||
126 | static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg) | ||
70 | { | 127 | { |
71 | return memblock_size_bytes(type, region_nr) >> PAGE_SHIFT; | 128 | return PFN_DOWN(reg->base + reg->size); |
72 | } | 129 | } |
73 | static inline u64 | 130 | |
74 | memblock_start_pfn(struct memblock_region *type, unsigned long region_nr) | 131 | /** |
132 | * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region | ||
133 | * @reg: memblock_region structure | ||
134 | */ | ||
135 | static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg) | ||
75 | { | 136 | { |
76 | return type->region[region_nr].base >> PAGE_SHIFT; | 137 | return PFN_DOWN(reg->base); |
77 | } | 138 | } |
78 | static inline u64 | 139 | |
79 | memblock_end_pfn(struct memblock_region *type, unsigned long region_nr) | 140 | /** |
141 | * memblock_region_reserved_end_pfn - Return the end_pfn this region | ||
142 | * @reg: memblock_region structure | ||
143 | */ | ||
144 | static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg) | ||
80 | { | 145 | { |
81 | return memblock_start_pfn(type, region_nr) + | 146 | return PFN_UP(reg->base + reg->size); |
82 | memblock_size_pages(type, region_nr); | ||
83 | } | 147 | } |
84 | 148 | ||
85 | #include <asm/memblock.h> | 149 | #define for_each_memblock(memblock_type, region) \ |
150 | for (region = memblock.memblock_type.regions; \ | ||
151 | region < (memblock.memblock_type.regions + memblock.memblock_type.cnt); \ | ||
152 | region++) | ||
153 | |||
154 | |||
155 | #ifdef ARCH_DISCARD_MEMBLOCK | ||
156 | #define __init_memblock __init | ||
157 | #define __initdata_memblock __initdata | ||
158 | #else | ||
159 | #define __init_memblock | ||
160 | #define __initdata_memblock | ||
161 | #endif | ||
162 | |||
163 | #endif /* CONFIG_HAVE_MEMBLOCK */ | ||
86 | 164 | ||
87 | #endif /* __KERNEL__ */ | 165 | #endif /* __KERNEL__ */ |
88 | 166 | ||
diff --git a/include/linux/memory.h b/include/linux/memory.h index 85582e1bcee9..06c1fa0a5c7b 100644 --- a/include/linux/memory.h +++ b/include/linux/memory.h | |||
@@ -23,6 +23,8 @@ | |||
23 | struct memory_block { | 23 | struct memory_block { |
24 | unsigned long phys_index; | 24 | unsigned long phys_index; |
25 | unsigned long state; | 25 | unsigned long state; |
26 | int section_count; | ||
27 | |||
26 | /* | 28 | /* |
27 | * This serializes all state change requests. It isn't | 29 | * This serializes all state change requests. It isn't |
28 | * held during creation because the control files are | 30 | * held during creation because the control files are |
@@ -113,6 +115,8 @@ extern int memory_dev_init(void); | |||
113 | extern int remove_memory_block(unsigned long, struct mem_section *, int); | 115 | extern int remove_memory_block(unsigned long, struct mem_section *, int); |
114 | extern int memory_notify(unsigned long val, void *v); | 116 | extern int memory_notify(unsigned long val, void *v); |
115 | extern int memory_isolate_notify(unsigned long val, void *v); | 117 | extern int memory_isolate_notify(unsigned long val, void *v); |
118 | extern struct memory_block *find_memory_block_hinted(struct mem_section *, | ||
119 | struct memory_block *); | ||
116 | extern struct memory_block *find_memory_block(struct mem_section *); | 120 | extern struct memory_block *find_memory_block(struct mem_section *); |
117 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) | 121 | #define CONFIG_MEM_BLOCK_SIZE (PAGES_PER_SECTION<<PAGE_SHIFT) |
118 | enum mem_add_context { BOOT, HOTPLUG }; | 122 | enum mem_add_context { BOOT, HOTPLUG }; |
diff --git a/include/linux/mfd/tc35892.h b/include/linux/mfd/tc35892.h index e47f770d3068..eff3094ca84e 100644 --- a/include/linux/mfd/tc35892.h +++ b/include/linux/mfd/tc35892.h | |||
@@ -111,9 +111,13 @@ extern int tc35892_set_bits(struct tc35892 *tc35892, u8 reg, u8 mask, u8 val); | |||
111 | * struct tc35892_gpio_platform_data - TC35892 GPIO platform data | 111 | * struct tc35892_gpio_platform_data - TC35892 GPIO platform data |
112 | * @gpio_base: first gpio number assigned to TC35892. A maximum of | 112 | * @gpio_base: first gpio number assigned to TC35892. A maximum of |
113 | * %TC35892_NR_GPIOS GPIOs will be allocated. | 113 | * %TC35892_NR_GPIOS GPIOs will be allocated. |
114 | * @setup: callback for board-specific initialization | ||
115 | * @remove: callback for board-specific teardown | ||
114 | */ | 116 | */ |
115 | struct tc35892_gpio_platform_data { | 117 | struct tc35892_gpio_platform_data { |
116 | int gpio_base; | 118 | int gpio_base; |
119 | void (*setup)(struct tc35892 *tc35892, unsigned gpio_base); | ||
120 | void (*remove)(struct tc35892 *tc35892, unsigned gpio_base); | ||
117 | }; | 121 | }; |
118 | 122 | ||
119 | /** | 123 | /** |
diff --git a/include/linux/mlx4/cmd.h b/include/linux/mlx4/cmd.h index 0f82293a82ed..78a1b9671752 100644 --- a/include/linux/mlx4/cmd.h +++ b/include/linux/mlx4/cmd.h | |||
@@ -56,6 +56,7 @@ enum { | |||
56 | MLX4_CMD_QUERY_HCA = 0xb, | 56 | MLX4_CMD_QUERY_HCA = 0xb, |
57 | MLX4_CMD_QUERY_PORT = 0x43, | 57 | MLX4_CMD_QUERY_PORT = 0x43, |
58 | MLX4_CMD_SENSE_PORT = 0x4d, | 58 | MLX4_CMD_SENSE_PORT = 0x4d, |
59 | MLX4_CMD_HW_HEALTH_CHECK = 0x50, | ||
59 | MLX4_CMD_SET_PORT = 0xc, | 60 | MLX4_CMD_SET_PORT = 0xc, |
60 | MLX4_CMD_ACCESS_DDR = 0x2e, | 61 | MLX4_CMD_ACCESS_DDR = 0x2e, |
61 | MLX4_CMD_MAP_ICM = 0xffa, | 62 | MLX4_CMD_MAP_ICM = 0xffa, |
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h index 7a7f9c1e679a..7338654c02b4 100644 --- a/include/linux/mlx4/device.h +++ b/include/linux/mlx4/device.h | |||
@@ -186,6 +186,10 @@ struct mlx4_caps { | |||
186 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; | 186 | int eth_mtu_cap[MLX4_MAX_PORTS + 1]; |
187 | int gid_table_len[MLX4_MAX_PORTS + 1]; | 187 | int gid_table_len[MLX4_MAX_PORTS + 1]; |
188 | int pkey_table_len[MLX4_MAX_PORTS + 1]; | 188 | int pkey_table_len[MLX4_MAX_PORTS + 1]; |
189 | int trans_type[MLX4_MAX_PORTS + 1]; | ||
190 | int vendor_oui[MLX4_MAX_PORTS + 1]; | ||
191 | int wavelength[MLX4_MAX_PORTS + 1]; | ||
192 | u64 trans_code[MLX4_MAX_PORTS + 1]; | ||
189 | int local_ca_ack_delay; | 193 | int local_ca_ack_delay; |
190 | int num_uars; | 194 | int num_uars; |
191 | int bf_reg_size; | 195 | int bf_reg_size; |
@@ -229,6 +233,8 @@ struct mlx4_caps { | |||
229 | u32 bmme_flags; | 233 | u32 bmme_flags; |
230 | u32 reserved_lkey; | 234 | u32 reserved_lkey; |
231 | u16 stat_rate_support; | 235 | u16 stat_rate_support; |
236 | int udp_rss; | ||
237 | int loopback_support; | ||
232 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; | 238 | u8 port_width_cap[MLX4_MAX_PORTS + 1]; |
233 | int max_gso_sz; | 239 | int max_gso_sz; |
234 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; | 240 | int reserved_qps_cnt[MLX4_NUM_QP_REGION]; |
@@ -480,5 +486,6 @@ void mlx4_fmr_unmap(struct mlx4_dev *dev, struct mlx4_fmr *fmr, | |||
480 | u32 *lkey, u32 *rkey); | 486 | u32 *lkey, u32 *rkey); |
481 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); | 487 | int mlx4_fmr_free(struct mlx4_dev *dev, struct mlx4_fmr *fmr); |
482 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); | 488 | int mlx4_SYNC_TPT(struct mlx4_dev *dev); |
489 | int mlx4_test_interrupts(struct mlx4_dev *dev); | ||
483 | 490 | ||
484 | #endif /* MLX4_DEVICE_H */ | 491 | #endif /* MLX4_DEVICE_H */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 709f6728fc90..7687228dd3b7 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -78,7 +78,11 @@ extern unsigned int kobjsize(const void *objp); | |||
78 | #define VM_MAYSHARE 0x00000080 | 78 | #define VM_MAYSHARE 0x00000080 |
79 | 79 | ||
80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ | 80 | #define VM_GROWSDOWN 0x00000100 /* general info on the segment */ |
81 | #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) | ||
81 | #define VM_GROWSUP 0x00000200 | 82 | #define VM_GROWSUP 0x00000200 |
83 | #else | ||
84 | #define VM_GROWSUP 0x00000000 | ||
85 | #endif | ||
82 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ | 86 | #define VM_PFNMAP 0x00000400 /* Page-ranges managed without "struct page", just pure PFN */ |
83 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ | 87 | #define VM_DENYWRITE 0x00000800 /* ETXTBSY on write attempts.. */ |
84 | 88 | ||
@@ -860,6 +864,12 @@ int set_page_dirty(struct page *page); | |||
860 | int set_page_dirty_lock(struct page *page); | 864 | int set_page_dirty_lock(struct page *page); |
861 | int clear_page_dirty_for_io(struct page *page); | 865 | int clear_page_dirty_for_io(struct page *page); |
862 | 866 | ||
867 | /* Is the vma a continuation of the stack vma above it? */ | ||
868 | static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) | ||
869 | { | ||
870 | return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); | ||
871 | } | ||
872 | |||
863 | extern unsigned long move_page_tables(struct vm_area_struct *vma, | 873 | extern unsigned long move_page_tables(struct vm_area_struct *vma, |
864 | unsigned long old_addr, struct vm_area_struct *new_vma, | 874 | unsigned long old_addr, struct vm_area_struct *new_vma, |
865 | unsigned long new_addr, unsigned long len); | 875 | unsigned long new_addr, unsigned long len); |
@@ -1165,6 +1175,8 @@ extern void free_bootmem_with_active_regions(int nid, | |||
1165 | unsigned long max_low_pfn); | 1175 | unsigned long max_low_pfn); |
1166 | int add_from_early_node_map(struct range *range, int az, | 1176 | int add_from_early_node_map(struct range *range, int az, |
1167 | int nr_range, int nid); | 1177 | int nr_range, int nid); |
1178 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
1179 | u64 goal, u64 limit); | ||
1168 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, | 1180 | void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, |
1169 | u64 goal, u64 limit); | 1181 | u64 goal, u64 limit); |
1170 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); | 1182 | typedef int (*work_fn_t)(unsigned long, unsigned long, void *); |
@@ -1330,8 +1342,10 @@ unsigned long ra_submit(struct file_ra_state *ra, | |||
1330 | 1342 | ||
1331 | /* Do stack extension */ | 1343 | /* Do stack extension */ |
1332 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); | 1344 | extern int expand_stack(struct vm_area_struct *vma, unsigned long address); |
1333 | #ifdef CONFIG_IA64 | 1345 | #if VM_GROWSUP |
1334 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); | 1346 | extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); |
1347 | #else | ||
1348 | #define expand_upwards(vma, address) do { } while (0) | ||
1335 | #endif | 1349 | #endif |
1336 | extern int expand_stack_downwards(struct vm_area_struct *vma, | 1350 | extern int expand_stack_downwards(struct vm_area_struct *vma, |
1337 | unsigned long address); | 1351 | unsigned long address); |
@@ -1357,7 +1371,15 @@ static inline unsigned long vma_pages(struct vm_area_struct *vma) | |||
1357 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 1371 | return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
1358 | } | 1372 | } |
1359 | 1373 | ||
1374 | #ifdef CONFIG_MMU | ||
1360 | pgprot_t vm_get_page_prot(unsigned long vm_flags); | 1375 | pgprot_t vm_get_page_prot(unsigned long vm_flags); |
1376 | #else | ||
1377 | static inline pgprot_t vm_get_page_prot(unsigned long vm_flags) | ||
1378 | { | ||
1379 | return __pgprot(0); | ||
1380 | } | ||
1381 | #endif | ||
1382 | |||
1361 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); | 1383 | struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); |
1362 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, | 1384 | int remap_pfn_range(struct vm_area_struct *, unsigned long addr, |
1363 | unsigned long pfn, unsigned long size, pgprot_t); | 1385 | unsigned long pfn, unsigned long size, pgprot_t); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index b8bb9a6a1f37..cb57d657ce4d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -134,7 +134,7 @@ struct vm_area_struct { | |||
134 | within vm_mm. */ | 134 | within vm_mm. */ |
135 | 135 | ||
136 | /* linked list of VM areas per task, sorted by address */ | 136 | /* linked list of VM areas per task, sorted by address */ |
137 | struct vm_area_struct *vm_next; | 137 | struct vm_area_struct *vm_next, *vm_prev; |
138 | 138 | ||
139 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ | 139 | pgprot_t vm_page_prot; /* Access permissions of this VMA. */ |
140 | unsigned long vm_flags; /* Flags, see mm.h. */ | 140 | unsigned long vm_flags; /* Flags, see mm.h. */ |
@@ -299,7 +299,7 @@ struct mm_struct { | |||
299 | * new_owner->mm == mm | 299 | * new_owner->mm == mm |
300 | * new_owner->alloc_lock is held | 300 | * new_owner->alloc_lock is held |
301 | */ | 301 | */ |
302 | struct task_struct *owner; | 302 | struct task_struct __rcu *owner; |
303 | #endif | 303 | #endif |
304 | 304 | ||
305 | #ifdef CONFIG_PROC_FS | 305 | #ifdef CONFIG_PROC_FS |
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h index 329a8faa6e37..245cdacee544 100644 --- a/include/linux/mmc/sdio.h +++ b/include/linux/mmc/sdio.h | |||
@@ -38,6 +38,8 @@ | |||
38 | * [8:0] Byte/block count | 38 | * [8:0] Byte/block count |
39 | */ | 39 | */ |
40 | 40 | ||
41 | #define R4_MEMORY_PRESENT (1 << 27) | ||
42 | |||
41 | /* | 43 | /* |
42 | SDIO status in R5 | 44 | SDIO status in R5 |
43 | Type | 45 | Type |
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h index 33b2ea09a4ad..a36ab3bc7b03 100644 --- a/include/linux/mmc/sdio_ids.h +++ b/include/linux/mmc/sdio_ids.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ | 18 | #define SDIO_CLASS_PHS 0x06 /* PHS standard interface */ |
19 | #define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ | 19 | #define SDIO_CLASS_WLAN 0x07 /* WLAN interface */ |
20 | #define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ | 20 | #define SDIO_CLASS_ATA 0x08 /* Embedded SDIO-ATA std interface */ |
21 | #define SDIO_CLASS_BT_AMP 0x09 /* Type-A Bluetooth AMP interface */ | ||
21 | 22 | ||
22 | /* | 23 | /* |
23 | * Vendors and devices. Sort key: vendor first, device next. | 24 | * Vendors and devices. Sort key: vendor first, device next. |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 6e6e62648a4d..3984c4eb41fd 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -284,6 +284,13 @@ struct zone { | |||
284 | unsigned long watermark[NR_WMARK]; | 284 | unsigned long watermark[NR_WMARK]; |
285 | 285 | ||
286 | /* | 286 | /* |
287 | * When free pages are below this point, additional steps are taken | ||
288 | * when reading the number of free pages to avoid per-cpu counter | ||
289 | * drift allowing watermarks to be breached | ||
290 | */ | ||
291 | unsigned long percpu_drift_mark; | ||
292 | |||
293 | /* | ||
287 | * We don't know if the memory that we're going to allocate will be freeable | 294 | * We don't know if the memory that we're going to allocate will be freeable |
288 | * or/and it will be released eventually, so to avoid totally wasting several | 295 | * or/and it will be released eventually, so to avoid totally wasting several |
289 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk | 296 | * GB of ram we must reserve some of the lower zone memory (otherwise we risk |
@@ -441,6 +448,12 @@ static inline int zone_is_oom_locked(const struct zone *zone) | |||
441 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); | 448 | return test_bit(ZONE_OOM_LOCKED, &zone->flags); |
442 | } | 449 | } |
443 | 450 | ||
451 | #ifdef CONFIG_SMP | ||
452 | unsigned long zone_nr_free_pages(struct zone *zone); | ||
453 | #else | ||
454 | #define zone_nr_free_pages(zone) zone_page_state(zone, NR_FREE_PAGES) | ||
455 | #endif /* CONFIG_SMP */ | ||
456 | |||
444 | /* | 457 | /* |
445 | * The "priority" of VM scanning is how much of the queues we will scan in one | 458 | * The "priority" of VM scanning is how much of the queues we will scan in one |
446 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the | 459 | * go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the |
diff --git a/include/linux/module.h b/include/linux/module.h index 8a6b9fdc7ffa..b29e7458b966 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -350,7 +350,10 @@ struct module | |||
350 | struct tracepoint *tracepoints; | 350 | struct tracepoint *tracepoints; |
351 | unsigned int num_tracepoints; | 351 | unsigned int num_tracepoints; |
352 | #endif | 352 | #endif |
353 | 353 | #ifdef HAVE_JUMP_LABEL | |
354 | struct jump_entry *jump_entries; | ||
355 | unsigned int num_jump_entries; | ||
356 | #endif | ||
354 | #ifdef CONFIG_TRACING | 357 | #ifdef CONFIG_TRACING |
355 | const char **trace_bprintk_fmt_start; | 358 | const char **trace_bprintk_fmt_start; |
356 | unsigned int num_trace_bprintk_fmt; | 359 | unsigned int num_trace_bprintk_fmt; |
@@ -686,17 +689,16 @@ extern int module_sysfs_initialized; | |||
686 | 689 | ||
687 | 690 | ||
688 | #ifdef CONFIG_GENERIC_BUG | 691 | #ifdef CONFIG_GENERIC_BUG |
689 | int module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, | 692 | void module_bug_finalize(const Elf_Ehdr *, const Elf_Shdr *, |
690 | struct module *); | 693 | struct module *); |
691 | void module_bug_cleanup(struct module *); | 694 | void module_bug_cleanup(struct module *); |
692 | 695 | ||
693 | #else /* !CONFIG_GENERIC_BUG */ | 696 | #else /* !CONFIG_GENERIC_BUG */ |
694 | 697 | ||
695 | static inline int module_bug_finalize(const Elf_Ehdr *hdr, | 698 | static inline void module_bug_finalize(const Elf_Ehdr *hdr, |
696 | const Elf_Shdr *sechdrs, | 699 | const Elf_Shdr *sechdrs, |
697 | struct module *mod) | 700 | struct module *mod) |
698 | { | 701 | { |
699 | return 0; | ||
700 | } | 702 | } |
701 | static inline void module_bug_cleanup(struct module *mod) {} | 703 | static inline void module_bug_cleanup(struct module *mod) {} |
702 | #endif /* CONFIG_GENERIC_BUG */ | 704 | #endif /* CONFIG_GENERIC_BUG */ |
diff --git a/include/linux/mroute.h b/include/linux/mroute.h index fa04b246c9ae..0fa7a3a874c8 100644 --- a/include/linux/mroute.h +++ b/include/linux/mroute.h | |||
@@ -213,6 +213,7 @@ struct mfc_cache { | |||
213 | unsigned char ttls[MAXVIFS]; /* TTL thresholds */ | 213 | unsigned char ttls[MAXVIFS]; /* TTL thresholds */ |
214 | } res; | 214 | } res; |
215 | } mfc_un; | 215 | } mfc_un; |
216 | struct rcu_head rcu; | ||
216 | }; | 217 | }; |
217 | 218 | ||
218 | #define MFC_STATIC 1 | 219 | #define MFC_STATIC 1 |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 91b05c171854..05acced439a3 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
@@ -10,12 +10,13 @@ struct msi_msg { | |||
10 | }; | 10 | }; |
11 | 11 | ||
12 | /* Helper functions */ | 12 | /* Helper functions */ |
13 | struct irq_desc; | 13 | struct irq_data; |
14 | extern void mask_msi_irq(unsigned int irq); | 14 | struct msi_desc; |
15 | extern void unmask_msi_irq(unsigned int irq); | 15 | extern void mask_msi_irq(struct irq_data *data); |
16 | extern void read_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 16 | extern void unmask_msi_irq(struct irq_data *data); |
17 | extern void get_cached_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 17 | extern void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
18 | extern void write_msi_msg_desc(struct irq_desc *desc, struct msi_msg *msg); | 18 | extern void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); |
19 | extern void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); | ||
19 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); | 20 | extern void read_msi_msg(unsigned int irq, struct msi_msg *msg); |
20 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); | 21 | extern void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); |
21 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); | 22 | extern void write_msi_msg(unsigned int irq, struct msi_msg *msg); |
diff --git a/include/linux/mtio.h b/include/linux/mtio.h index ef01d6aa5934..8f825756c459 100644 --- a/include/linux/mtio.h +++ b/include/linux/mtio.h | |||
@@ -63,6 +63,7 @@ struct mtop { | |||
63 | #define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ | 63 | #define MTCOMPRESSION 32/* control compression with SCSI mode page 15 */ |
64 | #define MTSETPART 33 /* Change the active tape partition */ | 64 | #define MTSETPART 33 /* Change the active tape partition */ |
65 | #define MTMKPART 34 /* Format the tape with one or two partitions */ | 65 | #define MTMKPART 34 /* Format the tape with one or two partitions */ |
66 | #define MTWEOFI 35 /* write an end-of-file record (mark) in immediate mode */ | ||
66 | 67 | ||
67 | /* structure for MTIOCGET - mag tape get status command */ | 68 | /* structure for MTIOCGET - mag tape get status command */ |
68 | 69 | ||
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 878cab4f5fcc..f363bc8fdc74 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -78,6 +78,14 @@ struct mutex_waiter { | |||
78 | # include <linux/mutex-debug.h> | 78 | # include <linux/mutex-debug.h> |
79 | #else | 79 | #else |
80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) | 80 | # define __DEBUG_MUTEX_INITIALIZER(lockname) |
81 | /** | ||
82 | * mutex_init - initialize the mutex | ||
83 | * @mutex: the mutex to be initialized | ||
84 | * | ||
85 | * Initialize the mutex to unlocked state. | ||
86 | * | ||
87 | * It is not allowed to initialize an already locked mutex. | ||
88 | */ | ||
81 | # define mutex_init(mutex) \ | 89 | # define mutex_init(mutex) \ |
82 | do { \ | 90 | do { \ |
83 | static struct lock_class_key __key; \ | 91 | static struct lock_class_key __key; \ |
diff --git a/include/linux/n_r3964.h b/include/linux/n_r3964.h index de24af79ebd3..54b8e0d8d916 100644 --- a/include/linux/n_r3964.h +++ b/include/linux/n_r3964.h | |||
@@ -4,7 +4,6 @@ | |||
4 | * Copyright by | 4 | * Copyright by |
5 | * Philips Automation Projects | 5 | * Philips Automation Projects |
6 | * Kassel (Germany) | 6 | * Kassel (Germany) |
7 | * http://www.pap-philips.de | ||
8 | * ----------------------------------------------------------- | 7 | * ----------------------------------------------------------- |
9 | * This software may be used and distributed according to the terms of | 8 | * This software may be used and distributed according to the terms of |
10 | * the GNU General Public License, incorporated herein by reference. | 9 | * the GNU General Public License, incorporated herein by reference. |
diff --git a/include/linux/nbd.h b/include/linux/nbd.h index bb58854a8061..d146ca10c0f5 100644 --- a/include/linux/nbd.h +++ b/include/linux/nbd.h | |||
@@ -88,7 +88,7 @@ struct nbd_request { | |||
88 | char handle[8]; | 88 | char handle[8]; |
89 | __be64 from; | 89 | __be64 from; |
90 | __be32 len; | 90 | __be32 len; |
91 | } __packed; | 91 | } __attribute__((packed)); |
92 | 92 | ||
93 | /* | 93 | /* |
94 | * This is the reply packet that nbd-server sends back to the client after | 94 | * This is the reply packet that nbd-server sends back to the client after |
diff --git a/include/linux/ncp.h b/include/linux/ncp.h index 3ace8370e61e..99f0adeeb3f3 100644 --- a/include/linux/ncp.h +++ b/include/linux/ncp.h | |||
@@ -27,7 +27,7 @@ struct ncp_request_header { | |||
27 | __u8 conn_high; | 27 | __u8 conn_high; |
28 | __u8 function; | 28 | __u8 function; |
29 | __u8 data[0]; | 29 | __u8 data[0]; |
30 | } __packed; | 30 | } __attribute__((packed)); |
31 | 31 | ||
32 | #define NCP_REPLY (0x3333) | 32 | #define NCP_REPLY (0x3333) |
33 | #define NCP_WATCHDOG (0x3E3E) | 33 | #define NCP_WATCHDOG (0x3E3E) |
@@ -42,7 +42,7 @@ struct ncp_reply_header { | |||
42 | __u8 completion_code; | 42 | __u8 completion_code; |
43 | __u8 connection_state; | 43 | __u8 connection_state; |
44 | __u8 data[0]; | 44 | __u8 data[0]; |
45 | } __packed; | 45 | } __attribute__((packed)); |
46 | 46 | ||
47 | #define NCP_VOLNAME_LEN (16) | 47 | #define NCP_VOLNAME_LEN (16) |
48 | #define NCP_NUMBER_OF_VOLUMES (256) | 48 | #define NCP_NUMBER_OF_VOLUMES (256) |
@@ -158,7 +158,7 @@ struct nw_info_struct { | |||
158 | #ifdef __KERNEL__ | 158 | #ifdef __KERNEL__ |
159 | struct nw_nfs_info nfs; | 159 | struct nw_nfs_info nfs; |
160 | #endif | 160 | #endif |
161 | } __packed; | 161 | } __attribute__((packed)); |
162 | 162 | ||
163 | /* modify mask - use with MODIFY_DOS_INFO structure */ | 163 | /* modify mask - use with MODIFY_DOS_INFO structure */ |
164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) | 164 | #define DM_ATTRIBUTES (cpu_to_le32(0x02)) |
@@ -190,12 +190,12 @@ struct nw_modify_dos_info { | |||
190 | __u16 inheritanceGrantMask; | 190 | __u16 inheritanceGrantMask; |
191 | __u16 inheritanceRevokeMask; | 191 | __u16 inheritanceRevokeMask; |
192 | __u32 maximumSpace; | 192 | __u32 maximumSpace; |
193 | } __packed; | 193 | } __attribute__((packed)); |
194 | 194 | ||
195 | struct nw_search_sequence { | 195 | struct nw_search_sequence { |
196 | __u8 volNumber; | 196 | __u8 volNumber; |
197 | __u32 dirBase; | 197 | __u32 dirBase; |
198 | __u32 sequence; | 198 | __u32 sequence; |
199 | } __packed; | 199 | } __attribute__((packed)); |
200 | 200 | ||
201 | #endif /* _LINUX_NCP_H */ | 201 | #endif /* _LINUX_NCP_H */ |
diff --git a/include/linux/ncp_fs.h b/include/linux/ncp_fs.h index 4522aed00906..ef663061d5ac 100644 --- a/include/linux/ncp_fs.h +++ b/include/linux/ncp_fs.h | |||
@@ -241,34 +241,6 @@ int ncp_mmap(struct file *, struct vm_area_struct *); | |||
241 | /* linux/fs/ncpfs/ncplib_kernel.c */ | 241 | /* linux/fs/ncpfs/ncplib_kernel.c */ |
242 | int ncp_make_closed(struct inode *); | 242 | int ncp_make_closed(struct inode *); |
243 | 243 | ||
244 | #define ncp_namespace(i) (NCP_SERVER(i)->name_space[NCP_FINFO(i)->volNumber]) | ||
245 | |||
246 | static inline int ncp_preserve_entry_case(struct inode *i, __u32 nscreator) | ||
247 | { | ||
248 | #ifdef CONFIG_NCPFS_SMALLDOS | ||
249 | int ns = ncp_namespace(i); | ||
250 | |||
251 | if ((ns == NW_NS_DOS) | ||
252 | #ifdef CONFIG_NCPFS_OS2_NS | ||
253 | || ((ns == NW_NS_OS2) && (nscreator == NW_NS_DOS)) | ||
254 | #endif /* CONFIG_NCPFS_OS2_NS */ | ||
255 | ) | ||
256 | return 0; | ||
257 | #endif /* CONFIG_NCPFS_SMALLDOS */ | ||
258 | return 1; | ||
259 | } | ||
260 | |||
261 | #define ncp_preserve_case(i) (ncp_namespace(i) != NW_NS_DOS) | ||
262 | |||
263 | static inline int ncp_case_sensitive(struct inode *i) | ||
264 | { | ||
265 | #ifdef CONFIG_NCPFS_NFS_NS | ||
266 | return ncp_namespace(i) == NW_NS_NFS; | ||
267 | #else | ||
268 | return 0; | ||
269 | #endif /* CONFIG_NCPFS_NFS_NS */ | ||
270 | } | ||
271 | |||
272 | #endif /* __KERNEL__ */ | 244 | #endif /* __KERNEL__ */ |
273 | 245 | ||
274 | #endif /* _LINUX_NCP_FS_H */ | 246 | #endif /* _LINUX_NCP_FS_H */ |
diff --git a/include/linux/ncp_fs_sb.h b/include/linux/ncp_fs_sb.h index 8da05bc098ca..d64b0e894336 100644 --- a/include/linux/ncp_fs_sb.h +++ b/include/linux/ncp_fs_sb.h | |||
@@ -62,6 +62,7 @@ struct ncp_server { | |||
62 | int ncp_reply_size; | 62 | int ncp_reply_size; |
63 | 63 | ||
64 | int root_setuped; | 64 | int root_setuped; |
65 | struct mutex root_setup_lock; | ||
65 | 66 | ||
66 | /* info for packet signing */ | 67 | /* info for packet signing */ |
67 | int sign_wanted; /* 1=Server needs signed packets */ | 68 | int sign_wanted; /* 1=Server needs signed packets */ |
@@ -81,13 +82,14 @@ struct ncp_server { | |||
81 | size_t len; | 82 | size_t len; |
82 | void* data; | 83 | void* data; |
83 | } priv; | 84 | } priv; |
85 | struct rw_semaphore auth_rwsem; | ||
84 | 86 | ||
85 | /* nls info: codepage for volume and charset for I/O */ | 87 | /* nls info: codepage for volume and charset for I/O */ |
86 | struct nls_table *nls_vol; | 88 | struct nls_table *nls_vol; |
87 | struct nls_table *nls_io; | 89 | struct nls_table *nls_io; |
88 | 90 | ||
89 | /* maximum age in jiffies */ | 91 | /* maximum age in jiffies */ |
90 | int dentry_ttl; | 92 | atomic_t dentry_ttl; |
91 | 93 | ||
92 | /* miscellaneous */ | 94 | /* miscellaneous */ |
93 | unsigned int flags; | 95 | unsigned int flags; |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 46c36ffe20ee..fcd3dda86322 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -228,9 +228,9 @@ struct netdev_hw_addr { | |||
228 | #define NETDEV_HW_ADDR_T_SLAVE 3 | 228 | #define NETDEV_HW_ADDR_T_SLAVE 3 |
229 | #define NETDEV_HW_ADDR_T_UNICAST 4 | 229 | #define NETDEV_HW_ADDR_T_UNICAST 4 |
230 | #define NETDEV_HW_ADDR_T_MULTICAST 5 | 230 | #define NETDEV_HW_ADDR_T_MULTICAST 5 |
231 | int refcount; | ||
232 | bool synced; | 231 | bool synced; |
233 | bool global_use; | 232 | bool global_use; |
233 | int refcount; | ||
234 | struct rcu_head rcu_head; | 234 | struct rcu_head rcu_head; |
235 | }; | 235 | }; |
236 | 236 | ||
@@ -281,6 +281,12 @@ struct hh_cache { | |||
281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; | 281 | unsigned long hh_data[HH_DATA_ALIGN(LL_MAX_HEADER) / sizeof(long)]; |
282 | }; | 282 | }; |
283 | 283 | ||
284 | static inline void hh_cache_put(struct hh_cache *hh) | ||
285 | { | ||
286 | if (atomic_dec_and_test(&hh->hh_refcnt)) | ||
287 | kfree(hh); | ||
288 | } | ||
289 | |||
284 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. | 290 | /* Reserve HH_DATA_MOD byte aligned hard_header_len, but at least that much. |
285 | * Alternative is: | 291 | * Alternative is: |
286 | * dev->hard_header_len ? (dev->hard_header_len + | 292 | * dev->hard_header_len ? (dev->hard_header_len + |
@@ -884,6 +890,9 @@ struct net_device { | |||
884 | int iflink; | 890 | int iflink; |
885 | 891 | ||
886 | struct net_device_stats stats; | 892 | struct net_device_stats stats; |
893 | atomic_long_t rx_dropped; /* dropped packets by core network | ||
894 | * Do not use this in drivers. | ||
895 | */ | ||
887 | 896 | ||
888 | #ifdef CONFIG_WIRELESS_EXT | 897 | #ifdef CONFIG_WIRELESS_EXT |
889 | /* List of functions to handle Wireless Extensions (instead of ioctl). | 898 | /* List of functions to handle Wireless Extensions (instead of ioctl). |
@@ -901,7 +910,7 @@ struct net_device { | |||
901 | 910 | ||
902 | unsigned int flags; /* interface flags (a la BSD) */ | 911 | unsigned int flags; /* interface flags (a la BSD) */ |
903 | unsigned short gflags; | 912 | unsigned short gflags; |
904 | unsigned short priv_flags; /* Like 'flags' but invisible to userspace. */ | 913 | unsigned int priv_flags; /* Like 'flags' but invisible to userspace. */ |
905 | unsigned short padded; /* How much padding added by alloc_netdev() */ | 914 | unsigned short padded; /* How much padding added by alloc_netdev() */ |
906 | 915 | ||
907 | unsigned char operstate; /* RFC2863 operstate */ | 916 | unsigned char operstate; /* RFC2863 operstate */ |
@@ -918,10 +927,6 @@ struct net_device { | |||
918 | unsigned short needed_headroom; | 927 | unsigned short needed_headroom; |
919 | unsigned short needed_tailroom; | 928 | unsigned short needed_tailroom; |
920 | 929 | ||
921 | struct net_device *master; /* Pointer to master device of a group, | ||
922 | * which this device is member of. | ||
923 | */ | ||
924 | |||
925 | /* Interface address info. */ | 930 | /* Interface address info. */ |
926 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ | 931 | unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */ |
927 | unsigned char addr_assign_type; /* hw address assignment type */ | 932 | unsigned char addr_assign_type; /* hw address assignment type */ |
@@ -937,12 +942,15 @@ struct net_device { | |||
937 | 942 | ||
938 | 943 | ||
939 | /* Protocol specific pointers */ | 944 | /* Protocol specific pointers */ |
940 | 945 | ||
946 | #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) | ||
947 | struct vlan_group *vlgrp; /* VLAN group */ | ||
948 | #endif | ||
941 | #ifdef CONFIG_NET_DSA | 949 | #ifdef CONFIG_NET_DSA |
942 | void *dsa_ptr; /* dsa specific data */ | 950 | void *dsa_ptr; /* dsa specific data */ |
943 | #endif | 951 | #endif |
944 | void *atalk_ptr; /* AppleTalk link */ | 952 | void *atalk_ptr; /* AppleTalk link */ |
945 | void *ip_ptr; /* IPv4 specific data */ | 953 | struct in_device __rcu *ip_ptr; /* IPv4 specific data */ |
946 | void *dn_ptr; /* DECnet specific data */ | 954 | void *dn_ptr; /* DECnet specific data */ |
947 | void *ip6_ptr; /* IPv6 specific data */ | 955 | void *ip6_ptr; /* IPv6 specific data */ |
948 | void *ec_ptr; /* Econet specific data */ | 956 | void *ec_ptr; /* Econet specific data */ |
@@ -951,9 +959,20 @@ struct net_device { | |||
951 | assign before registering */ | 959 | assign before registering */ |
952 | 960 | ||
953 | /* | 961 | /* |
954 | * Cache line mostly used on receive path (including eth_type_trans()) | 962 | * Cache lines mostly used on receive path (including eth_type_trans()) |
955 | */ | 963 | */ |
956 | unsigned long last_rx; /* Time of last Rx */ | 964 | unsigned long last_rx; /* Time of last Rx |
965 | * This should not be set in | ||
966 | * drivers, unless really needed, | ||
967 | * because network stack (bonding) | ||
968 | * use it if/when necessary, to | ||
969 | * avoid dirtying this cache line. | ||
970 | */ | ||
971 | |||
972 | struct net_device *master; /* Pointer to master device of a group, | ||
973 | * which this device is member of. | ||
974 | */ | ||
975 | |||
957 | /* Interface address info used in eth_type_trans() */ | 976 | /* Interface address info used in eth_type_trans() */ |
958 | unsigned char *dev_addr; /* hw address, (before bcast | 977 | unsigned char *dev_addr; /* hw address, (before bcast |
959 | because most packets are | 978 | because most packets are |
@@ -969,14 +988,21 @@ struct net_device { | |||
969 | 988 | ||
970 | struct netdev_rx_queue *_rx; | 989 | struct netdev_rx_queue *_rx; |
971 | 990 | ||
972 | /* Number of RX queues allocated at alloc_netdev_mq() time */ | 991 | /* Number of RX queues allocated at register_netdev() time */ |
973 | unsigned int num_rx_queues; | 992 | unsigned int num_rx_queues; |
993 | |||
994 | /* Number of RX queues currently active in device */ | ||
995 | unsigned int real_num_rx_queues; | ||
974 | #endif | 996 | #endif |
975 | 997 | ||
976 | struct netdev_queue rx_queue; | ||
977 | rx_handler_func_t *rx_handler; | 998 | rx_handler_func_t *rx_handler; |
978 | void *rx_handler_data; | 999 | void *rx_handler_data; |
979 | 1000 | ||
1001 | struct netdev_queue __rcu *ingress_queue; | ||
1002 | |||
1003 | /* | ||
1004 | * Cache lines mostly used on transmit path | ||
1005 | */ | ||
980 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; | 1006 | struct netdev_queue *_tx ____cacheline_aligned_in_smp; |
981 | 1007 | ||
982 | /* Number of TX queues allocated at alloc_netdev_mq() time */ | 1008 | /* Number of TX queues allocated at alloc_netdev_mq() time */ |
@@ -990,9 +1016,7 @@ struct net_device { | |||
990 | 1016 | ||
991 | unsigned long tx_queue_len; /* Max frames per queue allowed */ | 1017 | unsigned long tx_queue_len; /* Max frames per queue allowed */ |
992 | spinlock_t tx_global_lock; | 1018 | spinlock_t tx_global_lock; |
993 | /* | 1019 | |
994 | * One part is mostly used on xmit path (device) | ||
995 | */ | ||
996 | /* These may be needed for future network-power-down code. */ | 1020 | /* These may be needed for future network-power-down code. */ |
997 | 1021 | ||
998 | /* | 1022 | /* |
@@ -1005,7 +1029,7 @@ struct net_device { | |||
1005 | struct timer_list watchdog_timer; | 1029 | struct timer_list watchdog_timer; |
1006 | 1030 | ||
1007 | /* Number of references to this device */ | 1031 | /* Number of references to this device */ |
1008 | atomic_t refcnt ____cacheline_aligned_in_smp; | 1032 | int __percpu *pcpu_refcnt; |
1009 | 1033 | ||
1010 | /* delayed register/unregister */ | 1034 | /* delayed register/unregister */ |
1011 | struct list_head todo_list; | 1035 | struct list_head todo_list; |
@@ -1041,8 +1065,12 @@ struct net_device { | |||
1041 | #endif | 1065 | #endif |
1042 | 1066 | ||
1043 | /* mid-layer private */ | 1067 | /* mid-layer private */ |
1044 | void *ml_priv; | 1068 | union { |
1045 | 1069 | void *ml_priv; | |
1070 | struct pcpu_lstats __percpu *lstats; /* loopback stats */ | ||
1071 | struct pcpu_tstats __percpu *tstats; /* tunnel stats */ | ||
1072 | struct pcpu_dstats __percpu *dstats; /* dummy stats */ | ||
1073 | }; | ||
1046 | /* GARP */ | 1074 | /* GARP */ |
1047 | struct garp_port *garp_port; | 1075 | struct garp_port *garp_port; |
1048 | 1076 | ||
@@ -1305,6 +1333,7 @@ static inline void unregister_netdevice(struct net_device *dev) | |||
1305 | unregister_netdevice_queue(dev, NULL); | 1333 | unregister_netdevice_queue(dev, NULL); |
1306 | } | 1334 | } |
1307 | 1335 | ||
1336 | extern int netdev_refcnt_read(const struct net_device *dev); | ||
1308 | extern void free_netdev(struct net_device *dev); | 1337 | extern void free_netdev(struct net_device *dev); |
1309 | extern void synchronize_net(void); | 1338 | extern void synchronize_net(void); |
1310 | extern int register_netdevice_notifier(struct notifier_block *nb); | 1339 | extern int register_netdevice_notifier(struct notifier_block *nb); |
@@ -1667,11 +1696,34 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index) | |||
1667 | */ | 1696 | */ |
1668 | static inline int netif_is_multiqueue(const struct net_device *dev) | 1697 | static inline int netif_is_multiqueue(const struct net_device *dev) |
1669 | { | 1698 | { |
1670 | return (dev->num_tx_queues > 1); | 1699 | return dev->num_tx_queues > 1; |
1671 | } | 1700 | } |
1672 | 1701 | ||
1673 | extern void netif_set_real_num_tx_queues(struct net_device *dev, | 1702 | extern int netif_set_real_num_tx_queues(struct net_device *dev, |
1674 | unsigned int txq); | 1703 | unsigned int txq); |
1704 | |||
1705 | #ifdef CONFIG_RPS | ||
1706 | extern int netif_set_real_num_rx_queues(struct net_device *dev, | ||
1707 | unsigned int rxq); | ||
1708 | #else | ||
1709 | static inline int netif_set_real_num_rx_queues(struct net_device *dev, | ||
1710 | unsigned int rxq) | ||
1711 | { | ||
1712 | return 0; | ||
1713 | } | ||
1714 | #endif | ||
1715 | |||
1716 | static inline int netif_copy_real_num_queues(struct net_device *to_dev, | ||
1717 | const struct net_device *from_dev) | ||
1718 | { | ||
1719 | netif_set_real_num_tx_queues(to_dev, from_dev->real_num_tx_queues); | ||
1720 | #ifdef CONFIG_RPS | ||
1721 | return netif_set_real_num_rx_queues(to_dev, | ||
1722 | from_dev->real_num_rx_queues); | ||
1723 | #else | ||
1724 | return 0; | ||
1725 | #endif | ||
1726 | } | ||
1675 | 1727 | ||
1676 | /* Use this variant when it is known for sure that it | 1728 | /* Use this variant when it is known for sure that it |
1677 | * is executing from hardware interrupt context or with hardware interrupts | 1729 | * is executing from hardware interrupt context or with hardware interrupts |
@@ -1695,8 +1747,7 @@ extern gro_result_t dev_gro_receive(struct napi_struct *napi, | |||
1695 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); | 1747 | extern gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb); |
1696 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, | 1748 | extern gro_result_t napi_gro_receive(struct napi_struct *napi, |
1697 | struct sk_buff *skb); | 1749 | struct sk_buff *skb); |
1698 | extern void napi_reuse_skb(struct napi_struct *napi, | 1750 | extern void napi_gro_flush(struct napi_struct *napi); |
1699 | struct sk_buff *skb); | ||
1700 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); | 1751 | extern struct sk_buff * napi_get_frags(struct napi_struct *napi); |
1701 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, | 1752 | extern gro_result_t napi_frags_finish(struct napi_struct *napi, |
1702 | struct sk_buff *skb, | 1753 | struct sk_buff *skb, |
@@ -1715,7 +1766,6 @@ extern int netdev_rx_handler_register(struct net_device *dev, | |||
1715 | void *rx_handler_data); | 1766 | void *rx_handler_data); |
1716 | extern void netdev_rx_handler_unregister(struct net_device *dev); | 1767 | extern void netdev_rx_handler_unregister(struct net_device *dev); |
1717 | 1768 | ||
1718 | extern void netif_nit_deliver(struct sk_buff *skb); | ||
1719 | extern int dev_valid_name(const char *name); | 1769 | extern int dev_valid_name(const char *name); |
1720 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 1770 | extern int dev_ioctl(struct net *net, unsigned int cmd, void __user *); |
1721 | extern int dev_ethtool(struct net *net, struct ifreq *); | 1771 | extern int dev_ethtool(struct net *net, struct ifreq *); |
@@ -1749,7 +1799,7 @@ extern void netdev_run_todo(void); | |||
1749 | */ | 1799 | */ |
1750 | static inline void dev_put(struct net_device *dev) | 1800 | static inline void dev_put(struct net_device *dev) |
1751 | { | 1801 | { |
1752 | atomic_dec(&dev->refcnt); | 1802 | irqsafe_cpu_dec(*dev->pcpu_refcnt); |
1753 | } | 1803 | } |
1754 | 1804 | ||
1755 | /** | 1805 | /** |
@@ -1760,7 +1810,7 @@ static inline void dev_put(struct net_device *dev) | |||
1760 | */ | 1810 | */ |
1761 | static inline void dev_hold(struct net_device *dev) | 1811 | static inline void dev_hold(struct net_device *dev) |
1762 | { | 1812 | { |
1763 | atomic_inc(&dev->refcnt); | 1813 | irqsafe_cpu_inc(*dev->pcpu_refcnt); |
1764 | } | 1814 | } |
1765 | 1815 | ||
1766 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on | 1816 | /* Carrier loss detection, dial on demand. The functions netif_carrier_on |
@@ -2171,6 +2221,8 @@ extern void dev_seq_stop(struct seq_file *seq, void *v); | |||
2171 | extern int netdev_class_create_file(struct class_attribute *class_attr); | 2221 | extern int netdev_class_create_file(struct class_attribute *class_attr); |
2172 | extern void netdev_class_remove_file(struct class_attribute *class_attr); | 2222 | extern void netdev_class_remove_file(struct class_attribute *class_attr); |
2173 | 2223 | ||
2224 | extern struct kobj_ns_type_operations net_ns_type_operations; | ||
2225 | |||
2174 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); | 2226 | extern char *netdev_drivername(const struct net_device *dev, char *buffer, int len); |
2175 | 2227 | ||
2176 | extern void linkwatch_run_queue(void); | 2228 | extern void linkwatch_run_queue(void); |
@@ -2191,14 +2243,22 @@ static inline int net_gso_ok(int features, int gso_type) | |||
2191 | static inline int skb_gso_ok(struct sk_buff *skb, int features) | 2243 | static inline int skb_gso_ok(struct sk_buff *skb, int features) |
2192 | { | 2244 | { |
2193 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && | 2245 | return net_gso_ok(features, skb_shinfo(skb)->gso_type) && |
2194 | (!skb_has_frags(skb) || (features & NETIF_F_FRAGLIST)); | 2246 | (!skb_has_frag_list(skb) || (features & NETIF_F_FRAGLIST)); |
2195 | } | 2247 | } |
2196 | 2248 | ||
2197 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) | 2249 | static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) |
2198 | { | 2250 | { |
2199 | return skb_is_gso(skb) && | 2251 | if (skb_is_gso(skb)) { |
2200 | (!skb_gso_ok(skb, dev->features) || | 2252 | int features = dev->features; |
2201 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | 2253 | |
2254 | if (skb->protocol == htons(ETH_P_8021Q) || skb->vlan_tci) | ||
2255 | features &= dev->vlan_features; | ||
2256 | |||
2257 | return (!skb_gso_ok(skb, features) || | ||
2258 | unlikely(skb->ip_summed != CHECKSUM_PARTIAL)); | ||
2259 | } | ||
2260 | |||
2261 | return 0; | ||
2202 | } | 2262 | } |
2203 | 2263 | ||
2204 | static inline void netif_set_gso_max_size(struct net_device *dev, | 2264 | static inline void netif_set_gso_max_size(struct net_device *dev, |
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h index 1afd18c855ec..50cdc2559a5a 100644 --- a/include/linux/netfilter/nf_conntrack_common.h +++ b/include/linux/netfilter/nf_conntrack_common.h | |||
@@ -98,8 +98,14 @@ enum ip_conntrack_events { | |||
98 | 98 | ||
99 | enum ip_conntrack_expect_events { | 99 | enum ip_conntrack_expect_events { |
100 | IPEXP_NEW, /* new expectation */ | 100 | IPEXP_NEW, /* new expectation */ |
101 | IPEXP_DESTROY, /* destroyed expectation */ | ||
101 | }; | 102 | }; |
102 | 103 | ||
104 | /* expectation flags */ | ||
105 | #define NF_CT_EXPECT_PERMANENT 0x1 | ||
106 | #define NF_CT_EXPECT_INACTIVE 0x2 | ||
107 | #define NF_CT_EXPECT_USERSPACE 0x4 | ||
108 | |||
103 | #ifdef __KERNEL__ | 109 | #ifdef __KERNEL__ |
104 | struct ip_conntrack_stat { | 110 | struct ip_conntrack_stat { |
105 | unsigned int searched; | 111 | unsigned int searched; |
diff --git a/include/linux/netfilter/nf_conntrack_sip.h b/include/linux/netfilter/nf_conntrack_sip.h index ff8cfbcf3b81..0ce91d56a5f2 100644 --- a/include/linux/netfilter/nf_conntrack_sip.h +++ b/include/linux/netfilter/nf_conntrack_sip.h | |||
@@ -89,6 +89,7 @@ enum sip_header_types { | |||
89 | SIP_HDR_VIA_TCP, | 89 | SIP_HDR_VIA_TCP, |
90 | SIP_HDR_EXPIRES, | 90 | SIP_HDR_EXPIRES, |
91 | SIP_HDR_CONTENT_LENGTH, | 91 | SIP_HDR_CONTENT_LENGTH, |
92 | SIP_HDR_CALL_ID, | ||
92 | }; | 93 | }; |
93 | 94 | ||
94 | enum sdp_header_types { | 95 | enum sdp_header_types { |
diff --git a/include/linux/netfilter/nfnetlink_conntrack.h b/include/linux/netfilter/nfnetlink_conntrack.h index 9ed534c991b9..19711e3ffd42 100644 --- a/include/linux/netfilter/nfnetlink_conntrack.h +++ b/include/linux/netfilter/nfnetlink_conntrack.h | |||
@@ -39,8 +39,9 @@ enum ctattr_type { | |||
39 | CTA_TUPLE_MASTER, | 39 | CTA_TUPLE_MASTER, |
40 | CTA_NAT_SEQ_ADJ_ORIG, | 40 | CTA_NAT_SEQ_ADJ_ORIG, |
41 | CTA_NAT_SEQ_ADJ_REPLY, | 41 | CTA_NAT_SEQ_ADJ_REPLY, |
42 | CTA_SECMARK, | 42 | CTA_SECMARK, /* obsolete */ |
43 | CTA_ZONE, | 43 | CTA_ZONE, |
44 | CTA_SECCTX, | ||
44 | __CTA_MAX | 45 | __CTA_MAX |
45 | }; | 46 | }; |
46 | #define CTA_MAX (__CTA_MAX - 1) | 47 | #define CTA_MAX (__CTA_MAX - 1) |
@@ -161,6 +162,7 @@ enum ctattr_expect { | |||
161 | CTA_EXPECT_ID, | 162 | CTA_EXPECT_ID, |
162 | CTA_EXPECT_HELP_NAME, | 163 | CTA_EXPECT_HELP_NAME, |
163 | CTA_EXPECT_ZONE, | 164 | CTA_EXPECT_ZONE, |
165 | CTA_EXPECT_FLAGS, | ||
164 | __CTA_EXPECT_MAX | 166 | __CTA_EXPECT_MAX |
165 | }; | 167 | }; |
166 | #define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) | 168 | #define CTA_EXPECT_MAX (__CTA_EXPECT_MAX - 1) |
@@ -172,4 +174,11 @@ enum ctattr_help { | |||
172 | }; | 174 | }; |
173 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) | 175 | #define CTA_HELP_MAX (__CTA_HELP_MAX - 1) |
174 | 176 | ||
177 | enum ctattr_secctx { | ||
178 | CTA_SECCTX_UNSPEC, | ||
179 | CTA_SECCTX_NAME, | ||
180 | __CTA_SECCTX_MAX | ||
181 | }; | ||
182 | #define CTA_SECCTX_MAX (__CTA_SECCTX_MAX - 1) | ||
183 | |||
175 | #endif /* _IPCONNTRACK_NETLINK_H */ | 184 | #endif /* _IPCONNTRACK_NETLINK_H */ |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 24e5d01d27d0..742bec051440 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -66,6 +66,11 @@ struct xt_standard_target { | |||
66 | int verdict; | 66 | int verdict; |
67 | }; | 67 | }; |
68 | 68 | ||
69 | struct xt_error_target { | ||
70 | struct xt_entry_target target; | ||
71 | char errorname[XT_FUNCTION_MAXNAMELEN]; | ||
72 | }; | ||
73 | |||
69 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision | 74 | /* The argument to IPT_SO_GET_REVISION_*. Returns highest revision |
70 | * kernel supports, if >= revision. */ | 75 | * kernel supports, if >= revision. */ |
71 | struct xt_get_revision { | 76 | struct xt_get_revision { |
diff --git a/include/linux/netfilter/xt_IDLETIMER.h b/include/linux/netfilter/xt_IDLETIMER.h index 3e1aa1be942e..208ae9387331 100644 --- a/include/linux/netfilter/xt_IDLETIMER.h +++ b/include/linux/netfilter/xt_IDLETIMER.h | |||
@@ -39,7 +39,7 @@ struct idletimer_tg_info { | |||
39 | char label[MAX_IDLETIMER_LABEL_SIZE]; | 39 | char label[MAX_IDLETIMER_LABEL_SIZE]; |
40 | 40 | ||
41 | /* for kernel module internal use only */ | 41 | /* for kernel module internal use only */ |
42 | struct idletimer_tg *timer __attribute((aligned(8))); | 42 | struct idletimer_tg *timer __attribute__((aligned(8))); |
43 | }; | 43 | }; |
44 | 44 | ||
45 | #endif | 45 | #endif |
diff --git a/include/linux/netfilter/xt_SECMARK.h b/include/linux/netfilter/xt_SECMARK.h index 6fcd3448b186..989092bd6274 100644 --- a/include/linux/netfilter/xt_SECMARK.h +++ b/include/linux/netfilter/xt_SECMARK.h | |||
@@ -11,18 +11,12 @@ | |||
11 | * packets are being marked for. | 11 | * packets are being marked for. |
12 | */ | 12 | */ |
13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ | 13 | #define SECMARK_MODE_SEL 0x01 /* SELinux */ |
14 | #define SECMARK_SELCTX_MAX 256 | 14 | #define SECMARK_SECCTX_MAX 256 |
15 | |||
16 | struct xt_secmark_target_selinux_info { | ||
17 | __u32 selsid; | ||
18 | char selctx[SECMARK_SELCTX_MAX]; | ||
19 | }; | ||
20 | 15 | ||
21 | struct xt_secmark_target_info { | 16 | struct xt_secmark_target_info { |
22 | __u8 mode; | 17 | __u8 mode; |
23 | union { | 18 | __u32 secid; |
24 | struct xt_secmark_target_selinux_info sel; | 19 | char secctx[SECMARK_SECCTX_MAX]; |
25 | } u; | ||
26 | }; | 20 | }; |
27 | 21 | ||
28 | #endif /*_XT_SECMARK_H_target */ | 22 | #endif /*_XT_SECMARK_H_target */ |
diff --git a/include/linux/netfilter/xt_TPROXY.h b/include/linux/netfilter/xt_TPROXY.h index 152e8f97132b..3f3d69361289 100644 --- a/include/linux/netfilter/xt_TPROXY.h +++ b/include/linux/netfilter/xt_TPROXY.h | |||
@@ -1,5 +1,5 @@ | |||
1 | #ifndef _XT_TPROXY_H_target | 1 | #ifndef _XT_TPROXY_H |
2 | #define _XT_TPROXY_H_target | 2 | #define _XT_TPROXY_H |
3 | 3 | ||
4 | /* TPROXY target is capable of marking the packet to perform | 4 | /* TPROXY target is capable of marking the packet to perform |
5 | * redirection. We can get rid of that whenever we get support for | 5 | * redirection. We can get rid of that whenever we get support for |
@@ -11,4 +11,11 @@ struct xt_tproxy_target_info { | |||
11 | __be16 lport; | 11 | __be16 lport; |
12 | }; | 12 | }; |
13 | 13 | ||
14 | #endif /* _XT_TPROXY_H_target */ | 14 | struct xt_tproxy_target_info_v1 { |
15 | u_int32_t mark_mask; | ||
16 | u_int32_t mark_value; | ||
17 | union nf_inet_addr laddr; | ||
18 | __be16 lport; | ||
19 | }; | ||
20 | |||
21 | #endif /* _XT_TPROXY_H */ | ||
diff --git a/include/linux/netfilter/xt_ipvs.h b/include/linux/netfilter/xt_ipvs.h index 1167aeb7a347..eff34ac18808 100644 --- a/include/linux/netfilter/xt_ipvs.h +++ b/include/linux/netfilter/xt_ipvs.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef _XT_IPVS_H | 1 | #ifndef _XT_IPVS_H |
2 | #define _XT_IPVS_H | 2 | #define _XT_IPVS_H |
3 | 3 | ||
4 | #include <linux/types.h> | ||
5 | |||
4 | enum { | 6 | enum { |
5 | XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ | 7 | XT_IPVS_IPVS_PROPERTY = 1 << 0, /* all other options imply this one */ |
6 | XT_IPVS_PROTO = 1 << 1, | 8 | XT_IPVS_PROTO = 1 << 1, |
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h index e9948c0560f6..adbf4bff87ed 100644 --- a/include/linux/netfilter_arp/arp_tables.h +++ b/include/linux/netfilter_arp/arp_tables.h | |||
@@ -21,8 +21,21 @@ | |||
21 | 21 | ||
22 | #include <linux/netfilter/x_tables.h> | 22 | #include <linux/netfilter/x_tables.h> |
23 | 23 | ||
24 | #ifndef __KERNEL__ | ||
24 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 25 | #define ARPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
25 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 26 | #define ARPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
27 | #define arpt_entry_target xt_entry_target | ||
28 | #define arpt_standard_target xt_standard_target | ||
29 | #define arpt_error_target xt_error_target | ||
30 | #define ARPT_CONTINUE XT_CONTINUE | ||
31 | #define ARPT_RETURN XT_RETURN | ||
32 | #define arpt_counters_info xt_counters_info | ||
33 | #define arpt_counters xt_counters | ||
34 | #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
35 | #define ARPT_ERROR_TARGET XT_ERROR_TARGET | ||
36 | #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
37 | XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) | ||
38 | #endif | ||
26 | 39 | ||
27 | #define ARPT_DEV_ADDR_LEN_MAX 16 | 40 | #define ARPT_DEV_ADDR_LEN_MAX 16 |
28 | 41 | ||
@@ -63,9 +76,6 @@ struct arpt_arp { | |||
63 | u_int16_t invflags; | 76 | u_int16_t invflags; |
64 | }; | 77 | }; |
65 | 78 | ||
66 | #define arpt_entry_target xt_entry_target | ||
67 | #define arpt_standard_target xt_standard_target | ||
68 | |||
69 | /* Values for "flag" field in struct arpt_ip (general arp structure). | 79 | /* Values for "flag" field in struct arpt_ip (general arp structure). |
70 | * No flags defined yet. | 80 | * No flags defined yet. |
71 | */ | 81 | */ |
@@ -125,16 +135,10 @@ struct arpt_entry | |||
125 | #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) | 135 | #define ARPT_SO_GET_REVISION_TARGET (ARPT_BASE_CTL + 3) |
126 | #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) | 136 | #define ARPT_SO_GET_MAX (ARPT_SO_GET_REVISION_TARGET) |
127 | 137 | ||
128 | /* CONTINUE verdict for targets */ | ||
129 | #define ARPT_CONTINUE XT_CONTINUE | ||
130 | |||
131 | /* For standard target */ | ||
132 | #define ARPT_RETURN XT_RETURN | ||
133 | |||
134 | /* The argument to ARPT_SO_GET_INFO */ | 138 | /* The argument to ARPT_SO_GET_INFO */ |
135 | struct arpt_getinfo { | 139 | struct arpt_getinfo { |
136 | /* Which table: caller fills this in. */ | 140 | /* Which table: caller fills this in. */ |
137 | char name[ARPT_TABLE_MAXNAMELEN]; | 141 | char name[XT_TABLE_MAXNAMELEN]; |
138 | 142 | ||
139 | /* Kernel fills these in. */ | 143 | /* Kernel fills these in. */ |
140 | /* Which hook entry points are valid: bitmask */ | 144 | /* Which hook entry points are valid: bitmask */ |
@@ -156,7 +160,7 @@ struct arpt_getinfo { | |||
156 | /* The argument to ARPT_SO_SET_REPLACE. */ | 160 | /* The argument to ARPT_SO_SET_REPLACE. */ |
157 | struct arpt_replace { | 161 | struct arpt_replace { |
158 | /* Which table. */ | 162 | /* Which table. */ |
159 | char name[ARPT_TABLE_MAXNAMELEN]; | 163 | char name[XT_TABLE_MAXNAMELEN]; |
160 | 164 | ||
161 | /* Which hook entry points are valid: bitmask. You can't | 165 | /* Which hook entry points are valid: bitmask. You can't |
162 | change this. */ | 166 | change this. */ |
@@ -184,14 +188,10 @@ struct arpt_replace { | |||
184 | struct arpt_entry entries[0]; | 188 | struct arpt_entry entries[0]; |
185 | }; | 189 | }; |
186 | 190 | ||
187 | /* The argument to ARPT_SO_ADD_COUNTERS. */ | ||
188 | #define arpt_counters_info xt_counters_info | ||
189 | #define arpt_counters xt_counters | ||
190 | |||
191 | /* The argument to ARPT_SO_GET_ENTRIES. */ | 191 | /* The argument to ARPT_SO_GET_ENTRIES. */ |
192 | struct arpt_get_entries { | 192 | struct arpt_get_entries { |
193 | /* Which table: user fills this in. */ | 193 | /* Which table: user fills this in. */ |
194 | char name[ARPT_TABLE_MAXNAMELEN]; | 194 | char name[XT_TABLE_MAXNAMELEN]; |
195 | 195 | ||
196 | /* User fills this in: total entry size. */ | 196 | /* User fills this in: total entry size. */ |
197 | unsigned int size; | 197 | unsigned int size; |
@@ -200,23 +200,12 @@ struct arpt_get_entries { | |||
200 | struct arpt_entry entrytable[0]; | 200 | struct arpt_entry entrytable[0]; |
201 | }; | 201 | }; |
202 | 202 | ||
203 | /* Standard return verdict, or do jump. */ | ||
204 | #define ARPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
205 | /* Error verdict. */ | ||
206 | #define ARPT_ERROR_TARGET XT_ERROR_TARGET | ||
207 | |||
208 | /* Helper functions */ | 203 | /* Helper functions */ |
209 | static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e) | 204 | static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e) |
210 | { | 205 | { |
211 | return (void *)e + e->target_offset; | 206 | return (void *)e + e->target_offset; |
212 | } | 207 | } |
213 | 208 | ||
214 | #ifndef __KERNEL__ | ||
215 | /* fn returns 0 to continue iteration */ | ||
216 | #define ARPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
217 | XT_ENTRY_ITERATE(struct arpt_entry, entries, size, fn, ## args) | ||
218 | #endif | ||
219 | |||
220 | /* | 209 | /* |
221 | * Main firewall chains definitions and global var's definitions. | 210 | * Main firewall chains definitions and global var's definitions. |
222 | */ | 211 | */ |
@@ -225,17 +214,12 @@ static __inline__ struct arpt_entry_target *arpt_get_target(struct arpt_entry *e | |||
225 | /* Standard entry. */ | 214 | /* Standard entry. */ |
226 | struct arpt_standard { | 215 | struct arpt_standard { |
227 | struct arpt_entry entry; | 216 | struct arpt_entry entry; |
228 | struct arpt_standard_target target; | 217 | struct xt_standard_target target; |
229 | }; | ||
230 | |||
231 | struct arpt_error_target { | ||
232 | struct arpt_entry_target target; | ||
233 | char errorname[ARPT_FUNCTION_MAXNAMELEN]; | ||
234 | }; | 218 | }; |
235 | 219 | ||
236 | struct arpt_error { | 220 | struct arpt_error { |
237 | struct arpt_entry entry; | 221 | struct arpt_entry entry; |
238 | struct arpt_error_target target; | 222 | struct xt_error_target target; |
239 | }; | 223 | }; |
240 | 224 | ||
241 | #define ARPT_ENTRY_INIT(__size) \ | 225 | #define ARPT_ENTRY_INIT(__size) \ |
@@ -247,16 +231,16 @@ struct arpt_error { | |||
247 | #define ARPT_STANDARD_INIT(__verdict) \ | 231 | #define ARPT_STANDARD_INIT(__verdict) \ |
248 | { \ | 232 | { \ |
249 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ | 233 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_standard)), \ |
250 | .target = XT_TARGET_INIT(ARPT_STANDARD_TARGET, \ | 234 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
251 | sizeof(struct arpt_standard_target)), \ | 235 | sizeof(struct xt_standard_target)), \ |
252 | .target.verdict = -(__verdict) - 1, \ | 236 | .target.verdict = -(__verdict) - 1, \ |
253 | } | 237 | } |
254 | 238 | ||
255 | #define ARPT_ERROR_INIT \ | 239 | #define ARPT_ERROR_INIT \ |
256 | { \ | 240 | { \ |
257 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ | 241 | .entry = ARPT_ENTRY_INIT(sizeof(struct arpt_error)), \ |
258 | .target = XT_TARGET_INIT(ARPT_ERROR_TARGET, \ | 242 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
259 | sizeof(struct arpt_error_target)), \ | 243 | sizeof(struct xt_error_target)), \ |
260 | .target.errorname = "ERROR", \ | 244 | .target.errorname = "ERROR", \ |
261 | } | 245 | } |
262 | 246 | ||
@@ -271,8 +255,6 @@ extern unsigned int arpt_do_table(struct sk_buff *skb, | |||
271 | const struct net_device *out, | 255 | const struct net_device *out, |
272 | struct xt_table *table); | 256 | struct xt_table *table); |
273 | 257 | ||
274 | #define ARPT_ALIGN(s) XT_ALIGN(s) | ||
275 | |||
276 | #ifdef CONFIG_COMPAT | 258 | #ifdef CONFIG_COMPAT |
277 | #include <net/compat.h> | 259 | #include <net/compat.h> |
278 | 260 | ||
@@ -285,14 +267,12 @@ struct compat_arpt_entry { | |||
285 | unsigned char elems[0]; | 267 | unsigned char elems[0]; |
286 | }; | 268 | }; |
287 | 269 | ||
288 | static inline struct arpt_entry_target * | 270 | static inline struct xt_entry_target * |
289 | compat_arpt_get_target(struct compat_arpt_entry *e) | 271 | compat_arpt_get_target(struct compat_arpt_entry *e) |
290 | { | 272 | { |
291 | return (void *)e + e->target_offset; | 273 | return (void *)e + e->target_offset; |
292 | } | 274 | } |
293 | 275 | ||
294 | #define COMPAT_ARPT_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
295 | |||
296 | #endif /* CONFIG_COMPAT */ | 276 | #endif /* CONFIG_COMPAT */ |
297 | #endif /*__KERNEL__*/ | 277 | #endif /*__KERNEL__*/ |
298 | #endif /* _ARPTABLES_H */ | 278 | #endif /* _ARPTABLES_H */ |
diff --git a/include/linux/netfilter_bridge/Kbuild b/include/linux/netfilter_bridge/Kbuild index d4d78672873e..e48f1a3f5a4a 100644 --- a/include/linux/netfilter_bridge/Kbuild +++ b/include/linux/netfilter_bridge/Kbuild | |||
@@ -3,11 +3,13 @@ header-y += ebt_among.h | |||
3 | header-y += ebt_arp.h | 3 | header-y += ebt_arp.h |
4 | header-y += ebt_arpreply.h | 4 | header-y += ebt_arpreply.h |
5 | header-y += ebt_ip.h | 5 | header-y += ebt_ip.h |
6 | header-y += ebt_ip6.h | ||
6 | header-y += ebt_limit.h | 7 | header-y += ebt_limit.h |
7 | header-y += ebt_log.h | 8 | header-y += ebt_log.h |
8 | header-y += ebt_mark_m.h | 9 | header-y += ebt_mark_m.h |
9 | header-y += ebt_mark_t.h | 10 | header-y += ebt_mark_t.h |
10 | header-y += ebt_nat.h | 11 | header-y += ebt_nat.h |
12 | header-y += ebt_nflog.h | ||
11 | header-y += ebt_pkttype.h | 13 | header-y += ebt_pkttype.h |
12 | header-y += ebt_redirect.h | 14 | header-y += ebt_redirect.h |
13 | header-y += ebt_stp.h | 15 | header-y += ebt_stp.h |
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h index 704a7b6e8169..64a5d95c58e8 100644 --- a/include/linux/netfilter_ipv4/ip_tables.h +++ b/include/linux/netfilter_ipv4/ip_tables.h | |||
@@ -27,12 +27,49 @@ | |||
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | 29 | ||
30 | #ifndef __KERNEL__ | ||
30 | #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 31 | #define IPT_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
31 | #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 32 | #define IPT_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
32 | #define ipt_match xt_match | 33 | #define ipt_match xt_match |
33 | #define ipt_target xt_target | 34 | #define ipt_target xt_target |
34 | #define ipt_table xt_table | 35 | #define ipt_table xt_table |
35 | #define ipt_get_revision xt_get_revision | 36 | #define ipt_get_revision xt_get_revision |
37 | #define ipt_entry_match xt_entry_match | ||
38 | #define ipt_entry_target xt_entry_target | ||
39 | #define ipt_standard_target xt_standard_target | ||
40 | #define ipt_error_target xt_error_target | ||
41 | #define ipt_counters xt_counters | ||
42 | #define IPT_CONTINUE XT_CONTINUE | ||
43 | #define IPT_RETURN XT_RETURN | ||
44 | |||
45 | /* This group is older than old (iptables < v1.4.0-rc1~89) */ | ||
46 | #include <linux/netfilter/xt_tcpudp.h> | ||
47 | #define ipt_udp xt_udp | ||
48 | #define ipt_tcp xt_tcp | ||
49 | #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
50 | #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
51 | #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
52 | #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
53 | #define IPT_TCP_INV_MASK XT_TCP_INV_MASK | ||
54 | #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
55 | #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
56 | #define IPT_UDP_INV_MASK XT_UDP_INV_MASK | ||
57 | |||
58 | /* The argument to IPT_SO_ADD_COUNTERS. */ | ||
59 | #define ipt_counters_info xt_counters_info | ||
60 | /* Standard return verdict, or do jump. */ | ||
61 | #define IPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
62 | /* Error verdict. */ | ||
63 | #define IPT_ERROR_TARGET XT_ERROR_TARGET | ||
64 | |||
65 | /* fn returns 0 to continue iteration */ | ||
66 | #define IPT_MATCH_ITERATE(e, fn, args...) \ | ||
67 | XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) | ||
68 | |||
69 | /* fn returns 0 to continue iteration */ | ||
70 | #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
71 | XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) | ||
72 | #endif | ||
36 | 73 | ||
37 | /* Yes, Virginia, you have to zero the padding. */ | 74 | /* Yes, Virginia, you have to zero the padding. */ |
38 | struct ipt_ip { | 75 | struct ipt_ip { |
@@ -52,12 +89,6 @@ struct ipt_ip { | |||
52 | u_int8_t invflags; | 89 | u_int8_t invflags; |
53 | }; | 90 | }; |
54 | 91 | ||
55 | #define ipt_entry_match xt_entry_match | ||
56 | #define ipt_entry_target xt_entry_target | ||
57 | #define ipt_standard_target xt_standard_target | ||
58 | |||
59 | #define ipt_counters xt_counters | ||
60 | |||
61 | /* Values for "flag" field in struct ipt_ip (general ip structure). */ | 92 | /* Values for "flag" field in struct ipt_ip (general ip structure). */ |
62 | #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ | 93 | #define IPT_F_FRAG 0x01 /* Set if rule is a fragment rule */ |
63 | #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ | 94 | #define IPT_F_GOTO 0x02 /* Set if jump is a goto */ |
@@ -116,23 +147,6 @@ struct ipt_entry { | |||
116 | #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) | 147 | #define IPT_SO_GET_REVISION_TARGET (IPT_BASE_CTL + 3) |
117 | #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET | 148 | #define IPT_SO_GET_MAX IPT_SO_GET_REVISION_TARGET |
118 | 149 | ||
119 | #define IPT_CONTINUE XT_CONTINUE | ||
120 | #define IPT_RETURN XT_RETURN | ||
121 | |||
122 | #include <linux/netfilter/xt_tcpudp.h> | ||
123 | #define ipt_udp xt_udp | ||
124 | #define ipt_tcp xt_tcp | ||
125 | |||
126 | #define IPT_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
127 | #define IPT_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
128 | #define IPT_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
129 | #define IPT_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
130 | #define IPT_TCP_INV_MASK XT_TCP_INV_MASK | ||
131 | |||
132 | #define IPT_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
133 | #define IPT_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
134 | #define IPT_UDP_INV_MASK XT_UDP_INV_MASK | ||
135 | |||
136 | /* ICMP matching stuff */ | 150 | /* ICMP matching stuff */ |
137 | struct ipt_icmp { | 151 | struct ipt_icmp { |
138 | u_int8_t type; /* type to match */ | 152 | u_int8_t type; /* type to match */ |
@@ -146,7 +160,7 @@ struct ipt_icmp { | |||
146 | /* The argument to IPT_SO_GET_INFO */ | 160 | /* The argument to IPT_SO_GET_INFO */ |
147 | struct ipt_getinfo { | 161 | struct ipt_getinfo { |
148 | /* Which table: caller fills this in. */ | 162 | /* Which table: caller fills this in. */ |
149 | char name[IPT_TABLE_MAXNAMELEN]; | 163 | char name[XT_TABLE_MAXNAMELEN]; |
150 | 164 | ||
151 | /* Kernel fills these in. */ | 165 | /* Kernel fills these in. */ |
152 | /* Which hook entry points are valid: bitmask */ | 166 | /* Which hook entry points are valid: bitmask */ |
@@ -168,7 +182,7 @@ struct ipt_getinfo { | |||
168 | /* The argument to IPT_SO_SET_REPLACE. */ | 182 | /* The argument to IPT_SO_SET_REPLACE. */ |
169 | struct ipt_replace { | 183 | struct ipt_replace { |
170 | /* Which table. */ | 184 | /* Which table. */ |
171 | char name[IPT_TABLE_MAXNAMELEN]; | 185 | char name[XT_TABLE_MAXNAMELEN]; |
172 | 186 | ||
173 | /* Which hook entry points are valid: bitmask. You can't | 187 | /* Which hook entry points are valid: bitmask. You can't |
174 | change this. */ | 188 | change this. */ |
@@ -196,13 +210,10 @@ struct ipt_replace { | |||
196 | struct ipt_entry entries[0]; | 210 | struct ipt_entry entries[0]; |
197 | }; | 211 | }; |
198 | 212 | ||
199 | /* The argument to IPT_SO_ADD_COUNTERS. */ | ||
200 | #define ipt_counters_info xt_counters_info | ||
201 | |||
202 | /* The argument to IPT_SO_GET_ENTRIES. */ | 213 | /* The argument to IPT_SO_GET_ENTRIES. */ |
203 | struct ipt_get_entries { | 214 | struct ipt_get_entries { |
204 | /* Which table: user fills this in. */ | 215 | /* Which table: user fills this in. */ |
205 | char name[IPT_TABLE_MAXNAMELEN]; | 216 | char name[XT_TABLE_MAXNAMELEN]; |
206 | 217 | ||
207 | /* User fills this in: total entry size. */ | 218 | /* User fills this in: total entry size. */ |
208 | unsigned int size; | 219 | unsigned int size; |
@@ -211,28 +222,13 @@ struct ipt_get_entries { | |||
211 | struct ipt_entry entrytable[0]; | 222 | struct ipt_entry entrytable[0]; |
212 | }; | 223 | }; |
213 | 224 | ||
214 | /* Standard return verdict, or do jump. */ | ||
215 | #define IPT_STANDARD_TARGET XT_STANDARD_TARGET | ||
216 | /* Error verdict. */ | ||
217 | #define IPT_ERROR_TARGET XT_ERROR_TARGET | ||
218 | |||
219 | /* Helper functions */ | 225 | /* Helper functions */ |
220 | static __inline__ struct ipt_entry_target * | 226 | static __inline__ struct xt_entry_target * |
221 | ipt_get_target(struct ipt_entry *e) | 227 | ipt_get_target(struct ipt_entry *e) |
222 | { | 228 | { |
223 | return (void *)e + e->target_offset; | 229 | return (void *)e + e->target_offset; |
224 | } | 230 | } |
225 | 231 | ||
226 | #ifndef __KERNEL__ | ||
227 | /* fn returns 0 to continue iteration */ | ||
228 | #define IPT_MATCH_ITERATE(e, fn, args...) \ | ||
229 | XT_MATCH_ITERATE(struct ipt_entry, e, fn, ## args) | ||
230 | |||
231 | /* fn returns 0 to continue iteration */ | ||
232 | #define IPT_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
233 | XT_ENTRY_ITERATE(struct ipt_entry, entries, size, fn, ## args) | ||
234 | #endif | ||
235 | |||
236 | /* | 232 | /* |
237 | * Main firewall chains definitions and global var's definitions. | 233 | * Main firewall chains definitions and global var's definitions. |
238 | */ | 234 | */ |
@@ -249,17 +245,12 @@ extern void ipt_unregister_table(struct net *net, struct xt_table *table); | |||
249 | /* Standard entry. */ | 245 | /* Standard entry. */ |
250 | struct ipt_standard { | 246 | struct ipt_standard { |
251 | struct ipt_entry entry; | 247 | struct ipt_entry entry; |
252 | struct ipt_standard_target target; | 248 | struct xt_standard_target target; |
253 | }; | ||
254 | |||
255 | struct ipt_error_target { | ||
256 | struct ipt_entry_target target; | ||
257 | char errorname[IPT_FUNCTION_MAXNAMELEN]; | ||
258 | }; | 249 | }; |
259 | 250 | ||
260 | struct ipt_error { | 251 | struct ipt_error { |
261 | struct ipt_entry entry; | 252 | struct ipt_entry entry; |
262 | struct ipt_error_target target; | 253 | struct xt_error_target target; |
263 | }; | 254 | }; |
264 | 255 | ||
265 | #define IPT_ENTRY_INIT(__size) \ | 256 | #define IPT_ENTRY_INIT(__size) \ |
@@ -271,7 +262,7 @@ struct ipt_error { | |||
271 | #define IPT_STANDARD_INIT(__verdict) \ | 262 | #define IPT_STANDARD_INIT(__verdict) \ |
272 | { \ | 263 | { \ |
273 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ | 264 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_standard)), \ |
274 | .target = XT_TARGET_INIT(IPT_STANDARD_TARGET, \ | 265 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
275 | sizeof(struct xt_standard_target)), \ | 266 | sizeof(struct xt_standard_target)), \ |
276 | .target.verdict = -(__verdict) - 1, \ | 267 | .target.verdict = -(__verdict) - 1, \ |
277 | } | 268 | } |
@@ -279,8 +270,8 @@ struct ipt_error { | |||
279 | #define IPT_ERROR_INIT \ | 270 | #define IPT_ERROR_INIT \ |
280 | { \ | 271 | { \ |
281 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ | 272 | .entry = IPT_ENTRY_INIT(sizeof(struct ipt_error)), \ |
282 | .target = XT_TARGET_INIT(IPT_ERROR_TARGET, \ | 273 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
283 | sizeof(struct ipt_error_target)), \ | 274 | sizeof(struct xt_error_target)), \ |
284 | .target.errorname = "ERROR", \ | 275 | .target.errorname = "ERROR", \ |
285 | } | 276 | } |
286 | 277 | ||
@@ -291,8 +282,6 @@ extern unsigned int ipt_do_table(struct sk_buff *skb, | |||
291 | const struct net_device *out, | 282 | const struct net_device *out, |
292 | struct xt_table *table); | 283 | struct xt_table *table); |
293 | 284 | ||
294 | #define IPT_ALIGN(s) XT_ALIGN(s) | ||
295 | |||
296 | #ifdef CONFIG_COMPAT | 285 | #ifdef CONFIG_COMPAT |
297 | #include <net/compat.h> | 286 | #include <net/compat.h> |
298 | 287 | ||
@@ -307,14 +296,12 @@ struct compat_ipt_entry { | |||
307 | }; | 296 | }; |
308 | 297 | ||
309 | /* Helper functions */ | 298 | /* Helper functions */ |
310 | static inline struct ipt_entry_target * | 299 | static inline struct xt_entry_target * |
311 | compat_ipt_get_target(struct compat_ipt_entry *e) | 300 | compat_ipt_get_target(struct compat_ipt_entry *e) |
312 | { | 301 | { |
313 | return (void *)e + e->target_offset; | 302 | return (void *)e + e->target_offset; |
314 | } | 303 | } |
315 | 304 | ||
316 | #define COMPAT_IPT_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
317 | |||
318 | #endif /* CONFIG_COMPAT */ | 305 | #endif /* CONFIG_COMPAT */ |
319 | #endif /*__KERNEL__*/ | 306 | #endif /*__KERNEL__*/ |
320 | #endif /* _IPTABLES_H */ | 307 | #endif /* _IPTABLES_H */ |
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h index 18442ff19c07..c9784f7a9c1f 100644 --- a/include/linux/netfilter_ipv6/ip6_tables.h +++ b/include/linux/netfilter_ipv6/ip6_tables.h | |||
@@ -27,13 +27,42 @@ | |||
27 | 27 | ||
28 | #include <linux/netfilter/x_tables.h> | 28 | #include <linux/netfilter/x_tables.h> |
29 | 29 | ||
30 | #ifndef __KERNEL__ | ||
30 | #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN | 31 | #define IP6T_FUNCTION_MAXNAMELEN XT_FUNCTION_MAXNAMELEN |
31 | #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN | 32 | #define IP6T_TABLE_MAXNAMELEN XT_TABLE_MAXNAMELEN |
32 | |||
33 | #define ip6t_match xt_match | 33 | #define ip6t_match xt_match |
34 | #define ip6t_target xt_target | 34 | #define ip6t_target xt_target |
35 | #define ip6t_table xt_table | 35 | #define ip6t_table xt_table |
36 | #define ip6t_get_revision xt_get_revision | 36 | #define ip6t_get_revision xt_get_revision |
37 | #define ip6t_entry_match xt_entry_match | ||
38 | #define ip6t_entry_target xt_entry_target | ||
39 | #define ip6t_standard_target xt_standard_target | ||
40 | #define ip6t_error_target xt_error_target | ||
41 | #define ip6t_counters xt_counters | ||
42 | #define IP6T_CONTINUE XT_CONTINUE | ||
43 | #define IP6T_RETURN XT_RETURN | ||
44 | |||
45 | /* Pre-iptables-1.4.0 */ | ||
46 | #include <linux/netfilter/xt_tcpudp.h> | ||
47 | #define ip6t_tcp xt_tcp | ||
48 | #define ip6t_udp xt_udp | ||
49 | #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
50 | #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
51 | #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
52 | #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
53 | #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK | ||
54 | #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
55 | #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
56 | #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK | ||
57 | |||
58 | #define ip6t_counters_info xt_counters_info | ||
59 | #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET | ||
60 | #define IP6T_ERROR_TARGET XT_ERROR_TARGET | ||
61 | #define IP6T_MATCH_ITERATE(e, fn, args...) \ | ||
62 | XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) | ||
63 | #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
64 | XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) | ||
65 | #endif | ||
37 | 66 | ||
38 | /* Yes, Virginia, you have to zero the padding. */ | 67 | /* Yes, Virginia, you have to zero the padding. */ |
39 | struct ip6t_ip6 { | 68 | struct ip6t_ip6 { |
@@ -62,12 +91,6 @@ struct ip6t_ip6 { | |||
62 | u_int8_t invflags; | 91 | u_int8_t invflags; |
63 | }; | 92 | }; |
64 | 93 | ||
65 | #define ip6t_entry_match xt_entry_match | ||
66 | #define ip6t_entry_target xt_entry_target | ||
67 | #define ip6t_standard_target xt_standard_target | ||
68 | |||
69 | #define ip6t_counters xt_counters | ||
70 | |||
71 | /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ | 94 | /* Values for "flag" field in struct ip6t_ip6 (general ip6 structure). */ |
72 | #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper | 95 | #define IP6T_F_PROTO 0x01 /* Set if rule cares about upper |
73 | protocols */ | 96 | protocols */ |
@@ -112,17 +135,12 @@ struct ip6t_entry { | |||
112 | /* Standard entry */ | 135 | /* Standard entry */ |
113 | struct ip6t_standard { | 136 | struct ip6t_standard { |
114 | struct ip6t_entry entry; | 137 | struct ip6t_entry entry; |
115 | struct ip6t_standard_target target; | 138 | struct xt_standard_target target; |
116 | }; | ||
117 | |||
118 | struct ip6t_error_target { | ||
119 | struct ip6t_entry_target target; | ||
120 | char errorname[IP6T_FUNCTION_MAXNAMELEN]; | ||
121 | }; | 139 | }; |
122 | 140 | ||
123 | struct ip6t_error { | 141 | struct ip6t_error { |
124 | struct ip6t_entry entry; | 142 | struct ip6t_entry entry; |
125 | struct ip6t_error_target target; | 143 | struct xt_error_target target; |
126 | }; | 144 | }; |
127 | 145 | ||
128 | #define IP6T_ENTRY_INIT(__size) \ | 146 | #define IP6T_ENTRY_INIT(__size) \ |
@@ -134,16 +152,16 @@ struct ip6t_error { | |||
134 | #define IP6T_STANDARD_INIT(__verdict) \ | 152 | #define IP6T_STANDARD_INIT(__verdict) \ |
135 | { \ | 153 | { \ |
136 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ | 154 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_standard)), \ |
137 | .target = XT_TARGET_INIT(IP6T_STANDARD_TARGET, \ | 155 | .target = XT_TARGET_INIT(XT_STANDARD_TARGET, \ |
138 | sizeof(struct ip6t_standard_target)), \ | 156 | sizeof(struct xt_standard_target)), \ |
139 | .target.verdict = -(__verdict) - 1, \ | 157 | .target.verdict = -(__verdict) - 1, \ |
140 | } | 158 | } |
141 | 159 | ||
142 | #define IP6T_ERROR_INIT \ | 160 | #define IP6T_ERROR_INIT \ |
143 | { \ | 161 | { \ |
144 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ | 162 | .entry = IP6T_ENTRY_INIT(sizeof(struct ip6t_error)), \ |
145 | .target = XT_TARGET_INIT(IP6T_ERROR_TARGET, \ | 163 | .target = XT_TARGET_INIT(XT_ERROR_TARGET, \ |
146 | sizeof(struct ip6t_error_target)), \ | 164 | sizeof(struct xt_error_target)), \ |
147 | .target.errorname = "ERROR", \ | 165 | .target.errorname = "ERROR", \ |
148 | } | 166 | } |
149 | 167 | ||
@@ -166,30 +184,6 @@ struct ip6t_error { | |||
166 | #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) | 184 | #define IP6T_SO_GET_REVISION_TARGET (IP6T_BASE_CTL + 5) |
167 | #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET | 185 | #define IP6T_SO_GET_MAX IP6T_SO_GET_REVISION_TARGET |
168 | 186 | ||
169 | /* CONTINUE verdict for targets */ | ||
170 | #define IP6T_CONTINUE XT_CONTINUE | ||
171 | |||
172 | /* For standard target */ | ||
173 | #define IP6T_RETURN XT_RETURN | ||
174 | |||
175 | /* TCP/UDP matching stuff */ | ||
176 | #include <linux/netfilter/xt_tcpudp.h> | ||
177 | |||
178 | #define ip6t_tcp xt_tcp | ||
179 | #define ip6t_udp xt_udp | ||
180 | |||
181 | /* Values for "inv" field in struct ipt_tcp. */ | ||
182 | #define IP6T_TCP_INV_SRCPT XT_TCP_INV_SRCPT | ||
183 | #define IP6T_TCP_INV_DSTPT XT_TCP_INV_DSTPT | ||
184 | #define IP6T_TCP_INV_FLAGS XT_TCP_INV_FLAGS | ||
185 | #define IP6T_TCP_INV_OPTION XT_TCP_INV_OPTION | ||
186 | #define IP6T_TCP_INV_MASK XT_TCP_INV_MASK | ||
187 | |||
188 | /* Values for "invflags" field in struct ipt_udp. */ | ||
189 | #define IP6T_UDP_INV_SRCPT XT_UDP_INV_SRCPT | ||
190 | #define IP6T_UDP_INV_DSTPT XT_UDP_INV_DSTPT | ||
191 | #define IP6T_UDP_INV_MASK XT_UDP_INV_MASK | ||
192 | |||
193 | /* ICMP matching stuff */ | 187 | /* ICMP matching stuff */ |
194 | struct ip6t_icmp { | 188 | struct ip6t_icmp { |
195 | u_int8_t type; /* type to match */ | 189 | u_int8_t type; /* type to match */ |
@@ -203,7 +197,7 @@ struct ip6t_icmp { | |||
203 | /* The argument to IP6T_SO_GET_INFO */ | 197 | /* The argument to IP6T_SO_GET_INFO */ |
204 | struct ip6t_getinfo { | 198 | struct ip6t_getinfo { |
205 | /* Which table: caller fills this in. */ | 199 | /* Which table: caller fills this in. */ |
206 | char name[IP6T_TABLE_MAXNAMELEN]; | 200 | char name[XT_TABLE_MAXNAMELEN]; |
207 | 201 | ||
208 | /* Kernel fills these in. */ | 202 | /* Kernel fills these in. */ |
209 | /* Which hook entry points are valid: bitmask */ | 203 | /* Which hook entry points are valid: bitmask */ |
@@ -225,7 +219,7 @@ struct ip6t_getinfo { | |||
225 | /* The argument to IP6T_SO_SET_REPLACE. */ | 219 | /* The argument to IP6T_SO_SET_REPLACE. */ |
226 | struct ip6t_replace { | 220 | struct ip6t_replace { |
227 | /* Which table. */ | 221 | /* Which table. */ |
228 | char name[IP6T_TABLE_MAXNAMELEN]; | 222 | char name[XT_TABLE_MAXNAMELEN]; |
229 | 223 | ||
230 | /* Which hook entry points are valid: bitmask. You can't | 224 | /* Which hook entry points are valid: bitmask. You can't |
231 | change this. */ | 225 | change this. */ |
@@ -253,13 +247,10 @@ struct ip6t_replace { | |||
253 | struct ip6t_entry entries[0]; | 247 | struct ip6t_entry entries[0]; |
254 | }; | 248 | }; |
255 | 249 | ||
256 | /* The argument to IP6T_SO_ADD_COUNTERS. */ | ||
257 | #define ip6t_counters_info xt_counters_info | ||
258 | |||
259 | /* The argument to IP6T_SO_GET_ENTRIES. */ | 250 | /* The argument to IP6T_SO_GET_ENTRIES. */ |
260 | struct ip6t_get_entries { | 251 | struct ip6t_get_entries { |
261 | /* Which table: user fills this in. */ | 252 | /* Which table: user fills this in. */ |
262 | char name[IP6T_TABLE_MAXNAMELEN]; | 253 | char name[XT_TABLE_MAXNAMELEN]; |
263 | 254 | ||
264 | /* User fills this in: total entry size. */ | 255 | /* User fills this in: total entry size. */ |
265 | unsigned int size; | 256 | unsigned int size; |
@@ -268,28 +259,13 @@ struct ip6t_get_entries { | |||
268 | struct ip6t_entry entrytable[0]; | 259 | struct ip6t_entry entrytable[0]; |
269 | }; | 260 | }; |
270 | 261 | ||
271 | /* Standard return verdict, or do jump. */ | ||
272 | #define IP6T_STANDARD_TARGET XT_STANDARD_TARGET | ||
273 | /* Error verdict. */ | ||
274 | #define IP6T_ERROR_TARGET XT_ERROR_TARGET | ||
275 | |||
276 | /* Helper functions */ | 262 | /* Helper functions */ |
277 | static __inline__ struct ip6t_entry_target * | 263 | static __inline__ struct xt_entry_target * |
278 | ip6t_get_target(struct ip6t_entry *e) | 264 | ip6t_get_target(struct ip6t_entry *e) |
279 | { | 265 | { |
280 | return (void *)e + e->target_offset; | 266 | return (void *)e + e->target_offset; |
281 | } | 267 | } |
282 | 268 | ||
283 | #ifndef __KERNEL__ | ||
284 | /* fn returns 0 to continue iteration */ | ||
285 | #define IP6T_MATCH_ITERATE(e, fn, args...) \ | ||
286 | XT_MATCH_ITERATE(struct ip6t_entry, e, fn, ## args) | ||
287 | |||
288 | /* fn returns 0 to continue iteration */ | ||
289 | #define IP6T_ENTRY_ITERATE(entries, size, fn, args...) \ | ||
290 | XT_ENTRY_ITERATE(struct ip6t_entry, entries, size, fn, ## args) | ||
291 | #endif | ||
292 | |||
293 | /* | 269 | /* |
294 | * Main firewall chains definitions and global var's definitions. | 270 | * Main firewall chains definitions and global var's definitions. |
295 | */ | 271 | */ |
@@ -316,8 +292,6 @@ extern int ip6t_ext_hdr(u8 nexthdr); | |||
316 | extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, | 292 | extern int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, |
317 | int target, unsigned short *fragoff); | 293 | int target, unsigned short *fragoff); |
318 | 294 | ||
319 | #define IP6T_ALIGN(s) XT_ALIGN(s) | ||
320 | |||
321 | #ifdef CONFIG_COMPAT | 295 | #ifdef CONFIG_COMPAT |
322 | #include <net/compat.h> | 296 | #include <net/compat.h> |
323 | 297 | ||
@@ -331,14 +305,12 @@ struct compat_ip6t_entry { | |||
331 | unsigned char elems[0]; | 305 | unsigned char elems[0]; |
332 | }; | 306 | }; |
333 | 307 | ||
334 | static inline struct ip6t_entry_target * | 308 | static inline struct xt_entry_target * |
335 | compat_ip6t_get_target(struct compat_ip6t_entry *e) | 309 | compat_ip6t_get_target(struct compat_ip6t_entry *e) |
336 | { | 310 | { |
337 | return (void *)e + e->target_offset; | 311 | return (void *)e + e->target_offset; |
338 | } | 312 | } |
339 | 313 | ||
340 | #define COMPAT_IP6T_ALIGN(s) COMPAT_XT_ALIGN(s) | ||
341 | |||
342 | #endif /* CONFIG_COMPAT */ | 314 | #endif /* CONFIG_COMPAT */ |
343 | #endif /*__KERNEL__*/ | 315 | #endif /*__KERNEL__*/ |
344 | #endif /* _IP6_TABLES_H */ | 316 | #endif /* _IP6_TABLES_H */ |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 59d066936ab9..123566912d73 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -27,8 +27,6 @@ | |||
27 | 27 | ||
28 | #define MAX_LINKS 32 | 28 | #define MAX_LINKS 32 |
29 | 29 | ||
30 | struct net; | ||
31 | |||
32 | struct sockaddr_nl { | 30 | struct sockaddr_nl { |
33 | sa_family_t nl_family; /* AF_NETLINK */ | 31 | sa_family_t nl_family; /* AF_NETLINK */ |
34 | unsigned short nl_pad; /* zero */ | 32 | unsigned short nl_pad; /* zero */ |
@@ -151,6 +149,8 @@ struct nlattr { | |||
151 | #include <linux/capability.h> | 149 | #include <linux/capability.h> |
152 | #include <linux/skbuff.h> | 150 | #include <linux/skbuff.h> |
153 | 151 | ||
152 | struct net; | ||
153 | |||
154 | static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) | 154 | static inline struct nlmsghdr *nlmsg_hdr(const struct sk_buff *skb) |
155 | { | 155 | { |
156 | return (struct nlmsghdr *)skb->data; | 156 | return (struct nlmsghdr *)skb->data; |
diff --git a/include/linux/netpoll.h b/include/linux/netpoll.h index 791d5109f34c..79358bb712c6 100644 --- a/include/linux/netpoll.h +++ b/include/linux/netpoll.h | |||
@@ -14,7 +14,6 @@ | |||
14 | 14 | ||
15 | struct netpoll { | 15 | struct netpoll { |
16 | struct net_device *dev; | 16 | struct net_device *dev; |
17 | struct net_device *real_dev; | ||
18 | char dev_name[IFNAMSIZ]; | 17 | char dev_name[IFNAMSIZ]; |
19 | const char *name; | 18 | const char *name; |
20 | void (*rx_hook)(struct netpoll *, int, char *, int); | 19 | void (*rx_hook)(struct netpoll *, int, char *, int); |
@@ -53,7 +52,13 @@ void netpoll_set_trap(int trap); | |||
53 | void __netpoll_cleanup(struct netpoll *np); | 52 | void __netpoll_cleanup(struct netpoll *np); |
54 | void netpoll_cleanup(struct netpoll *np); | 53 | void netpoll_cleanup(struct netpoll *np); |
55 | int __netpoll_rx(struct sk_buff *skb); | 54 | int __netpoll_rx(struct sk_buff *skb); |
56 | void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb); | 55 | void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb, |
56 | struct net_device *dev); | ||
57 | static inline void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb) | ||
58 | { | ||
59 | netpoll_send_skb_on_dev(np, skb, np->dev); | ||
60 | } | ||
61 | |||
57 | 62 | ||
58 | 63 | ||
59 | #ifdef CONFIG_NETPOLL | 64 | #ifdef CONFIG_NETPOLL |
@@ -63,20 +68,20 @@ static inline bool netpoll_rx(struct sk_buff *skb) | |||
63 | unsigned long flags; | 68 | unsigned long flags; |
64 | bool ret = false; | 69 | bool ret = false; |
65 | 70 | ||
66 | rcu_read_lock_bh(); | 71 | local_irq_save(flags); |
67 | npinfo = rcu_dereference_bh(skb->dev->npinfo); | 72 | npinfo = rcu_dereference_bh(skb->dev->npinfo); |
68 | 73 | ||
69 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) | 74 | if (!npinfo || (list_empty(&npinfo->rx_np) && !npinfo->rx_flags)) |
70 | goto out; | 75 | goto out; |
71 | 76 | ||
72 | spin_lock_irqsave(&npinfo->rx_lock, flags); | 77 | spin_lock(&npinfo->rx_lock); |
73 | /* check rx_flags again with the lock held */ | 78 | /* check rx_flags again with the lock held */ |
74 | if (npinfo->rx_flags && __netpoll_rx(skb)) | 79 | if (npinfo->rx_flags && __netpoll_rx(skb)) |
75 | ret = true; | 80 | ret = true; |
76 | spin_unlock_irqrestore(&npinfo->rx_lock, flags); | 81 | spin_unlock(&npinfo->rx_lock); |
77 | 82 | ||
78 | out: | 83 | out: |
79 | rcu_read_unlock_bh(); | 84 | local_irq_restore(flags); |
80 | return ret; | 85 | return ret; |
81 | } | 86 | } |
82 | 87 | ||
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h index 508f8cf6da37..d0edf7d823ae 100644 --- a/include/linux/nfs_fs.h +++ b/include/linux/nfs_fs.h | |||
@@ -185,7 +185,7 @@ struct nfs_inode { | |||
185 | struct nfs4_cached_acl *nfs4_acl; | 185 | struct nfs4_cached_acl *nfs4_acl; |
186 | /* NFSv4 state */ | 186 | /* NFSv4 state */ |
187 | struct list_head open_states; | 187 | struct list_head open_states; |
188 | struct nfs_delegation *delegation; | 188 | struct nfs_delegation __rcu *delegation; |
189 | fmode_t delegation_state; | 189 | fmode_t delegation_state; |
190 | struct rw_semaphore rwsem; | 190 | struct rw_semaphore rwsem; |
191 | #endif /* CONFIG_NFS_V4*/ | 191 | #endif /* CONFIG_NFS_V4*/ |
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h index f5487b6f91ed..227e49dd5720 100644 --- a/include/linux/nilfs2_fs.h +++ b/include/linux/nilfs2_fs.h | |||
@@ -4,16 +4,16 @@ | |||
4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. | 4 | * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify | 6 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 7 | * it under the terms of the GNU Lesser General Public License as published |
8 | * the Free Software Foundation; either version 2 of the License, or | 8 | * by the Free Software Foundation; either version 2.1 of the License, or |
9 | * (at your option) any later version. | 9 | * (at your option) any later version. |
10 | * | 10 | * |
11 | * This program is distributed in the hope that it will be useful, | 11 | * This program is distributed in the hope that it will be useful, |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | * GNU General Public License for more details. | 14 | * GNU Lesser General Public License for more details. |
15 | * | 15 | * |
16 | * You should have received a copy of the GNU General Public License | 16 | * You should have received a copy of the GNU Lesser General Public License |
17 | * along with this program; if not, write to the Free Software | 17 | * along with this program; if not, write to the Free Software |
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA | 18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA |
19 | * | 19 | * |
@@ -147,7 +147,6 @@ struct nilfs_super_root { | |||
147 | #define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ | 147 | #define NILFS_MOUNT_ERRORS_CONT 0x0010 /* Continue on errors */ |
148 | #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ | 148 | #define NILFS_MOUNT_ERRORS_RO 0x0020 /* Remount fs ro on errors */ |
149 | #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ | 149 | #define NILFS_MOUNT_ERRORS_PANIC 0x0040 /* Panic on errors */ |
150 | #define NILFS_MOUNT_SNAPSHOT 0x0080 /* Snapshot flag */ | ||
151 | #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ | 150 | #define NILFS_MOUNT_BARRIER 0x1000 /* Use block barriers */ |
152 | #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order | 151 | #define NILFS_MOUNT_STRICT_ORDER 0x2000 /* Apply strict in-order |
153 | semantics also for data */ | 152 | semantics also for data */ |
@@ -229,6 +228,7 @@ struct nilfs_super_block { | |||
229 | */ | 228 | */ |
230 | #define NILFS_CURRENT_REV 2 /* current major revision */ | 229 | #define NILFS_CURRENT_REV 2 /* current major revision */ |
231 | #define NILFS_MINOR_REV 0 /* minor revision */ | 230 | #define NILFS_MINOR_REV 0 /* minor revision */ |
231 | #define NILFS_MIN_SUPP_REV 2 /* minimum supported revision */ | ||
232 | 232 | ||
233 | /* | 233 | /* |
234 | * Feature set definitions | 234 | * Feature set definitions |
@@ -270,6 +270,14 @@ struct nilfs_super_block { | |||
270 | segments */ | 270 | segments */ |
271 | 271 | ||
272 | /* | 272 | /* |
273 | * We call DAT, cpfile, and sufile root metadata files. Inodes of | ||
274 | * these files are written in super root block instead of ifile, and | ||
275 | * garbage collector doesn't keep any past versions of these files. | ||
276 | */ | ||
277 | #define NILFS_ROOT_METADATA_FILE(ino) \ | ||
278 | ((ino) >= NILFS_DAT_INO && (ino) <= NILFS_SUFILE_INO) | ||
279 | |||
280 | /* | ||
273 | * bytes offset of secondary super block | 281 | * bytes offset of secondary super block |
274 | */ | 282 | */ |
275 | #define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) | 283 | #define NILFS_SB2_OFFSET_BYTES(devsize) ((((devsize) >> 12) - 1) << 12) |
diff --git a/include/linux/nl80211.h b/include/linux/nl80211.h index 2c8701687336..0edb2566c14c 100644 --- a/include/linux/nl80211.h +++ b/include/linux/nl80211.h | |||
@@ -40,6 +40,43 @@ | |||
40 | */ | 40 | */ |
41 | 41 | ||
42 | /** | 42 | /** |
43 | * DOC: Frame transmission/registration support | ||
44 | * | ||
45 | * Frame transmission and registration support exists to allow userspace | ||
46 | * management entities such as wpa_supplicant react to management frames | ||
47 | * that are not being handled by the kernel. This includes, for example, | ||
48 | * certain classes of action frames that cannot be handled in the kernel | ||
49 | * for various reasons. | ||
50 | * | ||
51 | * Frame registration is done on a per-interface basis and registrations | ||
52 | * cannot be removed other than by closing the socket. It is possible to | ||
53 | * specify a registration filter to register, for example, only for a | ||
54 | * certain type of action frame. In particular with action frames, those | ||
55 | * that userspace registers for will not be returned as unhandled by the | ||
56 | * driver, so that the registered application has to take responsibility | ||
57 | * for doing that. | ||
58 | * | ||
59 | * The type of frame that can be registered for is also dependent on the | ||
60 | * driver and interface type. The frame types are advertised in wiphy | ||
61 | * attributes so applications know what to expect. | ||
62 | * | ||
63 | * NOTE: When an interface changes type while registrations are active, | ||
64 | * these registrations are ignored until the interface type is | ||
65 | * changed again. This means that changing the interface type can | ||
66 | * lead to a situation that couldn't otherwise be produced, but | ||
67 | * any such registrations will be dormant in the sense that they | ||
68 | * will not be serviced, i.e. they will not receive any frames. | ||
69 | * | ||
70 | * Frame transmission allows userspace to send for example the required | ||
71 | * responses to action frames. It is subject to some sanity checking, | ||
72 | * but many frames can be transmitted. When a frame was transmitted, its | ||
73 | * status is indicated to the sending socket. | ||
74 | * | ||
75 | * For more technical details, see the corresponding command descriptions | ||
76 | * below. | ||
77 | */ | ||
78 | |||
79 | /** | ||
43 | * enum nl80211_commands - supported nl80211 commands | 80 | * enum nl80211_commands - supported nl80211 commands |
44 | * | 81 | * |
45 | * @NL80211_CMD_UNSPEC: unspecified command to catch errors | 82 | * @NL80211_CMD_UNSPEC: unspecified command to catch errors |
@@ -258,7 +295,9 @@ | |||
258 | * auth and assoc steps. For this, you need to specify the SSID in a | 295 | * auth and assoc steps. For this, you need to specify the SSID in a |
259 | * %NL80211_ATTR_SSID attribute, and can optionally specify the association | 296 | * %NL80211_ATTR_SSID attribute, and can optionally specify the association |
260 | * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, | 297 | * IEs in %NL80211_ATTR_IE, %NL80211_ATTR_AUTH_TYPE, %NL80211_ATTR_MAC, |
261 | * %NL80211_ATTR_WIPHY_FREQ and %NL80211_ATTR_CONTROL_PORT. | 298 | * %NL80211_ATTR_WIPHY_FREQ, %NL80211_ATTR_CONTROL_PORT, |
299 | * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE and | ||
300 | * %NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT. | ||
262 | * It is also sent as an event, with the BSSID and response IEs when the | 301 | * It is also sent as an event, with the BSSID and response IEs when the |
263 | * connection is established or failed to be established. This can be | 302 | * connection is established or failed to be established. This can be |
264 | * determined by the STATUS_CODE attribute. | 303 | * determined by the STATUS_CODE attribute. |
@@ -276,8 +315,8 @@ | |||
276 | * channel for the specified amount of time. This can be used to do | 315 | * channel for the specified amount of time. This can be used to do |
277 | * off-channel operations like transmit a Public Action frame and wait for | 316 | * off-channel operations like transmit a Public Action frame and wait for |
278 | * a response while being associated to an AP on another channel. | 317 | * a response while being associated to an AP on another channel. |
279 | * %NL80211_ATTR_WIPHY or %NL80211_ATTR_IFINDEX is used to specify which | 318 | * %NL80211_ATTR_IFINDEX is used to specify which interface (and thus |
280 | * radio is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the | 319 | * radio) is used. %NL80211_ATTR_WIPHY_FREQ is used to specify the |
281 | * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be | 320 | * frequency for the operation and %NL80211_ATTR_WIPHY_CHANNEL_TYPE may be |
282 | * optionally used to specify additional channel parameters. | 321 | * optionally used to specify additional channel parameters. |
283 | * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds | 322 | * %NL80211_ATTR_DURATION is used to specify the duration in milliseconds |
@@ -301,16 +340,20 @@ | |||
301 | * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface | 340 | * rate selection. %NL80211_ATTR_IFINDEX is used to specify the interface |
302 | * and @NL80211_ATTR_TX_RATES the set of allowed rates. | 341 | * and @NL80211_ATTR_TX_RATES the set of allowed rates. |
303 | * | 342 | * |
304 | * @NL80211_CMD_REGISTER_ACTION: Register for receiving certain action frames | 343 | * @NL80211_CMD_REGISTER_FRAME: Register for receiving certain mgmt frames |
305 | * (via @NL80211_CMD_ACTION) for processing in userspace. This command | 344 | * (via @NL80211_CMD_FRAME) for processing in userspace. This command |
306 | * requires an interface index and a match attribute containing the first | 345 | * requires an interface index, a frame type attribute (optional for |
307 | * few bytes of the frame that should match, e.g. a single byte for only | 346 | * backward compatibility reasons, if not given assumes action frames) |
308 | * a category match or four bytes for vendor frames including the OUI. | 347 | * and a match attribute containing the first few bytes of the frame |
309 | * The registration cannot be dropped, but is removed automatically | 348 | * that should match, e.g. a single byte for only a category match or |
310 | * when the netlink socket is closed. Multiple registrations can be made. | 349 | * four bytes for vendor frames including the OUI. The registration |
311 | * @NL80211_CMD_ACTION: Action frame TX request and RX notification. This | 350 | * cannot be dropped, but is removed automatically when the netlink |
312 | * command is used both as a request to transmit an Action frame and as an | 351 | * socket is closed. Multiple registrations can be made. |
313 | * event indicating reception of an Action frame that was not processed in | 352 | * @NL80211_CMD_REGISTER_ACTION: Alias for @NL80211_CMD_REGISTER_FRAME for |
353 | * backward compatibility | ||
354 | * @NL80211_CMD_FRAME: Management frame TX request and RX notification. This | ||
355 | * command is used both as a request to transmit a management frame and | ||
356 | * as an event indicating reception of a frame that was not processed in | ||
314 | * kernel code, but is for us (i.e., which may need to be processed in a | 357 | * kernel code, but is for us (i.e., which may need to be processed in a |
315 | * user space application). %NL80211_ATTR_FRAME is used to specify the | 358 | * user space application). %NL80211_ATTR_FRAME is used to specify the |
316 | * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and | 359 | * frame contents (including header). %NL80211_ATTR_WIPHY_FREQ (and |
@@ -320,11 +363,14 @@ | |||
320 | * operational channel). When called, this operation returns a cookie | 363 | * operational channel). When called, this operation returns a cookie |
321 | * (%NL80211_ATTR_COOKIE) that will be included with the TX status event | 364 | * (%NL80211_ATTR_COOKIE) that will be included with the TX status event |
322 | * pertaining to the TX request. | 365 | * pertaining to the TX request. |
323 | * @NL80211_CMD_ACTION_TX_STATUS: Report TX status of an Action frame | 366 | * @NL80211_CMD_ACTION: Alias for @NL80211_CMD_FRAME for backward compatibility. |
324 | * transmitted with %NL80211_CMD_ACTION. %NL80211_ATTR_COOKIE identifies | 367 | * @NL80211_CMD_FRAME_TX_STATUS: Report TX status of a management frame |
368 | * transmitted with %NL80211_CMD_FRAME. %NL80211_ATTR_COOKIE identifies | ||
325 | * the TX command and %NL80211_ATTR_FRAME includes the contents of the | 369 | * the TX command and %NL80211_ATTR_FRAME includes the contents of the |
326 | * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged | 370 | * frame. %NL80211_ATTR_ACK flag is included if the recipient acknowledged |
327 | * the frame. | 371 | * the frame. |
372 | * @NL80211_CMD_ACTION_TX_STATUS: Alias for @NL80211_CMD_FRAME_TX_STATUS for | ||
373 | * backward compatibility. | ||
328 | * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command | 374 | * @NL80211_CMD_SET_CQM: Connection quality monitor configuration. This command |
329 | * is used to configure connection quality monitoring notification trigger | 375 | * is used to configure connection quality monitoring notification trigger |
330 | * levels. | 376 | * levels. |
@@ -341,6 +387,8 @@ | |||
341 | * of any other interfaces, and other interfaces will again take | 387 | * of any other interfaces, and other interfaces will again take |
342 | * precedence when they are used. | 388 | * precedence when they are used. |
343 | * | 389 | * |
390 | * @NL80211_CMD_SET_WDS_PEER: Set the MAC address of the peer on a WDS interface. | ||
391 | * | ||
344 | * @NL80211_CMD_MAX: highest used command number | 392 | * @NL80211_CMD_MAX: highest used command number |
345 | * @__NL80211_CMD_AFTER_LAST: internal use | 393 | * @__NL80211_CMD_AFTER_LAST: internal use |
346 | */ | 394 | */ |
@@ -429,9 +477,12 @@ enum nl80211_commands { | |||
429 | 477 | ||
430 | NL80211_CMD_SET_TX_BITRATE_MASK, | 478 | NL80211_CMD_SET_TX_BITRATE_MASK, |
431 | 479 | ||
432 | NL80211_CMD_REGISTER_ACTION, | 480 | NL80211_CMD_REGISTER_FRAME, |
433 | NL80211_CMD_ACTION, | 481 | NL80211_CMD_REGISTER_ACTION = NL80211_CMD_REGISTER_FRAME, |
434 | NL80211_CMD_ACTION_TX_STATUS, | 482 | NL80211_CMD_FRAME, |
483 | NL80211_CMD_ACTION = NL80211_CMD_FRAME, | ||
484 | NL80211_CMD_FRAME_TX_STATUS, | ||
485 | NL80211_CMD_ACTION_TX_STATUS = NL80211_CMD_FRAME_TX_STATUS, | ||
435 | 486 | ||
436 | NL80211_CMD_SET_POWER_SAVE, | 487 | NL80211_CMD_SET_POWER_SAVE, |
437 | NL80211_CMD_GET_POWER_SAVE, | 488 | NL80211_CMD_GET_POWER_SAVE, |
@@ -440,6 +491,7 @@ enum nl80211_commands { | |||
440 | NL80211_CMD_NOTIFY_CQM, | 491 | NL80211_CMD_NOTIFY_CQM, |
441 | 492 | ||
442 | NL80211_CMD_SET_CHANNEL, | 493 | NL80211_CMD_SET_CHANNEL, |
494 | NL80211_CMD_SET_WDS_PEER, | ||
443 | 495 | ||
444 | /* add new commands above here */ | 496 | /* add new commands above here */ |
445 | 497 | ||
@@ -639,6 +691,15 @@ enum nl80211_commands { | |||
639 | * request, the driver will assume that the port is unauthorized until | 691 | * request, the driver will assume that the port is unauthorized until |
640 | * authorized by user space. Otherwise, port is marked authorized by | 692 | * authorized by user space. Otherwise, port is marked authorized by |
641 | * default in station mode. | 693 | * default in station mode. |
694 | * @NL80211_ATTR_CONTROL_PORT_ETHERTYPE: A 16-bit value indicating the | ||
695 | * ethertype that will be used for key negotiation. It can be | ||
696 | * specified with the associate and connect commands. If it is not | ||
697 | * specified, the value defaults to 0x888E (PAE, 802.1X). This | ||
698 | * attribute is also used as a flag in the wiphy information to | ||
699 | * indicate that protocols other than PAE are supported. | ||
700 | * @NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT: When included along with | ||
701 | * %NL80211_ATTR_CONTROL_PORT_ETHERTYPE, indicates that the custom | ||
702 | * ethertype frames used for key negotiation must not be encrypted. | ||
642 | * | 703 | * |
643 | * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. | 704 | * @NL80211_ATTR_TESTDATA: Testmode data blob, passed through to the driver. |
644 | * We recommend using nested, driver-specific attributes within this. | 705 | * We recommend using nested, driver-specific attributes within this. |
@@ -708,7 +769,16 @@ enum nl80211_commands { | |||
708 | * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. | 769 | * is used with %NL80211_CMD_SET_TX_BITRATE_MASK. |
709 | * | 770 | * |
710 | * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain | 771 | * @NL80211_ATTR_FRAME_MATCH: A binary attribute which typically must contain |
711 | * at least one byte, currently used with @NL80211_CMD_REGISTER_ACTION. | 772 | * at least one byte, currently used with @NL80211_CMD_REGISTER_FRAME. |
773 | * @NL80211_ATTR_FRAME_TYPE: A u16 indicating the frame type/subtype for the | ||
774 | * @NL80211_CMD_REGISTER_FRAME command. | ||
775 | * @NL80211_ATTR_TX_FRAME_TYPES: wiphy capability attribute, which is a | ||
776 | * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing | ||
777 | * information about which frame types can be transmitted with | ||
778 | * %NL80211_CMD_FRAME. | ||
779 | * @NL80211_ATTR_RX_FRAME_TYPES: wiphy capability attribute, which is a | ||
780 | * nested attribute of %NL80211_ATTR_FRAME_TYPE attributes, containing | ||
781 | * information about which frame types can be registered for RX. | ||
712 | * | 782 | * |
713 | * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was | 783 | * @NL80211_ATTR_ACK: Flag attribute indicating that the frame was |
714 | * acknowledged by the recipient. | 784 | * acknowledged by the recipient. |
@@ -731,6 +801,9 @@ enum nl80211_commands { | |||
731 | * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING | 801 | * This is used in association with @NL80211_ATTR_WIPHY_TX_POWER_SETTING |
732 | * for non-automatic settings. | 802 | * for non-automatic settings. |
733 | * | 803 | * |
804 | * @NL80211_ATTR_SUPPORT_IBSS_RSN: The device supports IBSS RSN, which mostly | ||
805 | * means support for per-station GTKs. | ||
806 | * | ||
734 | * @NL80211_ATTR_MAX: highest attribute number currently defined | 807 | * @NL80211_ATTR_MAX: highest attribute number currently defined |
735 | * @__NL80211_ATTR_AFTER_LAST: internal use | 808 | * @__NL80211_ATTR_AFTER_LAST: internal use |
736 | */ | 809 | */ |
@@ -891,6 +964,15 @@ enum nl80211_attrs { | |||
891 | NL80211_ATTR_WIPHY_TX_POWER_SETTING, | 964 | NL80211_ATTR_WIPHY_TX_POWER_SETTING, |
892 | NL80211_ATTR_WIPHY_TX_POWER_LEVEL, | 965 | NL80211_ATTR_WIPHY_TX_POWER_LEVEL, |
893 | 966 | ||
967 | NL80211_ATTR_TX_FRAME_TYPES, | ||
968 | NL80211_ATTR_RX_FRAME_TYPES, | ||
969 | NL80211_ATTR_FRAME_TYPE, | ||
970 | |||
971 | NL80211_ATTR_CONTROL_PORT_ETHERTYPE, | ||
972 | NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT, | ||
973 | |||
974 | NL80211_ATTR_SUPPORT_IBSS_RSN, | ||
975 | |||
894 | /* add attributes here, update the policy in nl80211.c */ | 976 | /* add attributes here, update the policy in nl80211.c */ |
895 | 977 | ||
896 | __NL80211_ATTR_AFTER_LAST, | 978 | __NL80211_ATTR_AFTER_LAST, |
@@ -946,8 +1028,10 @@ enum nl80211_attrs { | |||
946 | * @NL80211_IFTYPE_WDS: wireless distribution interface | 1028 | * @NL80211_IFTYPE_WDS: wireless distribution interface |
947 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames | 1029 | * @NL80211_IFTYPE_MONITOR: monitor interface receiving all frames |
948 | * @NL80211_IFTYPE_MESH_POINT: mesh point | 1030 | * @NL80211_IFTYPE_MESH_POINT: mesh point |
1031 | * @NL80211_IFTYPE_P2P_CLIENT: P2P client | ||
1032 | * @NL80211_IFTYPE_P2P_GO: P2P group owner | ||
949 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined | 1033 | * @NL80211_IFTYPE_MAX: highest interface type number currently defined |
950 | * @__NL80211_IFTYPE_AFTER_LAST: internal use | 1034 | * @NUM_NL80211_IFTYPES: number of defined interface types |
951 | * | 1035 | * |
952 | * These values are used with the %NL80211_ATTR_IFTYPE | 1036 | * These values are used with the %NL80211_ATTR_IFTYPE |
953 | * to set the type of an interface. | 1037 | * to set the type of an interface. |
@@ -962,10 +1046,12 @@ enum nl80211_iftype { | |||
962 | NL80211_IFTYPE_WDS, | 1046 | NL80211_IFTYPE_WDS, |
963 | NL80211_IFTYPE_MONITOR, | 1047 | NL80211_IFTYPE_MONITOR, |
964 | NL80211_IFTYPE_MESH_POINT, | 1048 | NL80211_IFTYPE_MESH_POINT, |
1049 | NL80211_IFTYPE_P2P_CLIENT, | ||
1050 | NL80211_IFTYPE_P2P_GO, | ||
965 | 1051 | ||
966 | /* keep last */ | 1052 | /* keep last */ |
967 | __NL80211_IFTYPE_AFTER_LAST, | 1053 | NUM_NL80211_IFTYPES, |
968 | NL80211_IFTYPE_MAX = __NL80211_IFTYPE_AFTER_LAST - 1 | 1054 | NL80211_IFTYPE_MAX = NUM_NL80211_IFTYPES - 1 |
969 | }; | 1055 | }; |
970 | 1056 | ||
971 | /** | 1057 | /** |
@@ -974,11 +1060,14 @@ enum nl80211_iftype { | |||
974 | * Station flags. When a station is added to an AP interface, it is | 1060 | * Station flags. When a station is added to an AP interface, it is |
975 | * assumed to be already associated (and hence authenticated.) | 1061 | * assumed to be already associated (and hence authenticated.) |
976 | * | 1062 | * |
1063 | * @__NL80211_STA_FLAG_INVALID: attribute number 0 is reserved | ||
977 | * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) | 1064 | * @NL80211_STA_FLAG_AUTHORIZED: station is authorized (802.1X) |
978 | * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames | 1065 | * @NL80211_STA_FLAG_SHORT_PREAMBLE: station is capable of receiving frames |
979 | * with short barker preamble | 1066 | * with short barker preamble |
980 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable | 1067 | * @NL80211_STA_FLAG_WME: station is WME/QoS capable |
981 | * @NL80211_STA_FLAG_MFP: station uses management frame protection | 1068 | * @NL80211_STA_FLAG_MFP: station uses management frame protection |
1069 | * @NL80211_STA_FLAG_MAX: highest station flag number currently defined | ||
1070 | * @__NL80211_STA_FLAG_AFTER_LAST: internal use | ||
982 | */ | 1071 | */ |
983 | enum nl80211_sta_flags { | 1072 | enum nl80211_sta_flags { |
984 | __NL80211_STA_FLAG_INVALID, | 1073 | __NL80211_STA_FLAG_INVALID, |
@@ -1048,6 +1137,8 @@ enum nl80211_rate_info { | |||
1048 | * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) | 1137 | * @NL80211_STA_INFO_RX_PACKETS: total received packet (u32, from this station) |
1049 | * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this | 1138 | * @NL80211_STA_INFO_TX_PACKETS: total transmitted packets (u32, to this |
1050 | * station) | 1139 | * station) |
1140 | * @NL80211_STA_INFO_TX_RETRIES: total retries (u32, to this station) | ||
1141 | * @NL80211_STA_INFO_TX_FAILED: total failed packets (u32, to this station) | ||
1051 | */ | 1142 | */ |
1052 | enum nl80211_sta_info { | 1143 | enum nl80211_sta_info { |
1053 | __NL80211_STA_INFO_INVALID, | 1144 | __NL80211_STA_INFO_INVALID, |
@@ -1061,6 +1152,8 @@ enum nl80211_sta_info { | |||
1061 | NL80211_STA_INFO_TX_BITRATE, | 1152 | NL80211_STA_INFO_TX_BITRATE, |
1062 | NL80211_STA_INFO_RX_PACKETS, | 1153 | NL80211_STA_INFO_RX_PACKETS, |
1063 | NL80211_STA_INFO_TX_PACKETS, | 1154 | NL80211_STA_INFO_TX_PACKETS, |
1155 | NL80211_STA_INFO_TX_RETRIES, | ||
1156 | NL80211_STA_INFO_TX_FAILED, | ||
1064 | 1157 | ||
1065 | /* keep last */ | 1158 | /* keep last */ |
1066 | __NL80211_STA_INFO_AFTER_LAST, | 1159 | __NL80211_STA_INFO_AFTER_LAST, |
@@ -1091,14 +1184,17 @@ enum nl80211_mpath_flags { | |||
1091 | * information about a mesh path. | 1184 | * information about a mesh path. |
1092 | * | 1185 | * |
1093 | * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved | 1186 | * @__NL80211_MPATH_INFO_INVALID: attribute number 0 is reserved |
1094 | * @NL80211_ATTR_MPATH_FRAME_QLEN: number of queued frames for this destination | 1187 | * @NL80211_MPATH_INFO_FRAME_QLEN: number of queued frames for this destination |
1095 | * @NL80211_ATTR_MPATH_SN: destination sequence number | 1188 | * @NL80211_MPATH_INFO_SN: destination sequence number |
1096 | * @NL80211_ATTR_MPATH_METRIC: metric (cost) of this mesh path | 1189 | * @NL80211_MPATH_INFO_METRIC: metric (cost) of this mesh path |
1097 | * @NL80211_ATTR_MPATH_EXPTIME: expiration time for the path, in msec from now | 1190 | * @NL80211_MPATH_INFO_EXPTIME: expiration time for the path, in msec from now |
1098 | * @NL80211_ATTR_MPATH_FLAGS: mesh path flags, enumerated in | 1191 | * @NL80211_MPATH_INFO_FLAGS: mesh path flags, enumerated in |
1099 | * &enum nl80211_mpath_flags; | 1192 | * &enum nl80211_mpath_flags; |
1100 | * @NL80211_ATTR_MPATH_DISCOVERY_TIMEOUT: total path discovery timeout, in msec | 1193 | * @NL80211_MPATH_INFO_DISCOVERY_TIMEOUT: total path discovery timeout, in msec |
1101 | * @NL80211_ATTR_MPATH_DISCOVERY_RETRIES: mesh path discovery retries | 1194 | * @NL80211_MPATH_INFO_DISCOVERY_RETRIES: mesh path discovery retries |
1195 | * @NL80211_MPATH_INFO_MAX: highest mesh path information attribute number | ||
1196 | * currently defind | ||
1197 | * @__NL80211_MPATH_INFO_AFTER_LAST: internal use | ||
1102 | */ | 1198 | */ |
1103 | enum nl80211_mpath_info { | 1199 | enum nl80211_mpath_info { |
1104 | __NL80211_MPATH_INFO_INVALID, | 1200 | __NL80211_MPATH_INFO_INVALID, |
@@ -1127,6 +1223,8 @@ enum nl80211_mpath_info { | |||
1127 | * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE | 1223 | * @NL80211_BAND_ATTR_HT_CAPA: HT capabilities, as in the HT information IE |
1128 | * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n | 1224 | * @NL80211_BAND_ATTR_HT_AMPDU_FACTOR: A-MPDU factor, as in 11n |
1129 | * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n | 1225 | * @NL80211_BAND_ATTR_HT_AMPDU_DENSITY: A-MPDU density, as in 11n |
1226 | * @NL80211_BAND_ATTR_MAX: highest band attribute currently defined | ||
1227 | * @__NL80211_BAND_ATTR_AFTER_LAST: internal use | ||
1130 | */ | 1228 | */ |
1131 | enum nl80211_band_attr { | 1229 | enum nl80211_band_attr { |
1132 | __NL80211_BAND_ATTR_INVALID, | 1230 | __NL80211_BAND_ATTR_INVALID, |
@@ -1147,6 +1245,7 @@ enum nl80211_band_attr { | |||
1147 | 1245 | ||
1148 | /** | 1246 | /** |
1149 | * enum nl80211_frequency_attr - frequency attributes | 1247 | * enum nl80211_frequency_attr - frequency attributes |
1248 | * @__NL80211_FREQUENCY_ATTR_INVALID: attribute number 0 is reserved | ||
1150 | * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz | 1249 | * @NL80211_FREQUENCY_ATTR_FREQ: Frequency in MHz |
1151 | * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current | 1250 | * @NL80211_FREQUENCY_ATTR_DISABLED: Channel is disabled in current |
1152 | * regulatory domain. | 1251 | * regulatory domain. |
@@ -1158,6 +1257,9 @@ enum nl80211_band_attr { | |||
1158 | * on this channel in current regulatory domain. | 1257 | * on this channel in current regulatory domain. |
1159 | * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm | 1258 | * @NL80211_FREQUENCY_ATTR_MAX_TX_POWER: Maximum transmission power in mBm |
1160 | * (100 * dBm). | 1259 | * (100 * dBm). |
1260 | * @NL80211_FREQUENCY_ATTR_MAX: highest frequency attribute number | ||
1261 | * currently defined | ||
1262 | * @__NL80211_FREQUENCY_ATTR_AFTER_LAST: internal use | ||
1161 | */ | 1263 | */ |
1162 | enum nl80211_frequency_attr { | 1264 | enum nl80211_frequency_attr { |
1163 | __NL80211_FREQUENCY_ATTR_INVALID, | 1265 | __NL80211_FREQUENCY_ATTR_INVALID, |
@@ -1177,9 +1279,13 @@ enum nl80211_frequency_attr { | |||
1177 | 1279 | ||
1178 | /** | 1280 | /** |
1179 | * enum nl80211_bitrate_attr - bitrate attributes | 1281 | * enum nl80211_bitrate_attr - bitrate attributes |
1282 | * @__NL80211_BITRATE_ATTR_INVALID: attribute number 0 is reserved | ||
1180 | * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps | 1283 | * @NL80211_BITRATE_ATTR_RATE: Bitrate in units of 100 kbps |
1181 | * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported | 1284 | * @NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE: Short preamble supported |
1182 | * in 2.4 GHz band. | 1285 | * in 2.4 GHz band. |
1286 | * @NL80211_BITRATE_ATTR_MAX: highest bitrate attribute number | ||
1287 | * currently defined | ||
1288 | * @__NL80211_BITRATE_ATTR_AFTER_LAST: internal use | ||
1183 | */ | 1289 | */ |
1184 | enum nl80211_bitrate_attr { | 1290 | enum nl80211_bitrate_attr { |
1185 | __NL80211_BITRATE_ATTR_INVALID, | 1291 | __NL80211_BITRATE_ATTR_INVALID, |
@@ -1235,6 +1341,7 @@ enum nl80211_reg_type { | |||
1235 | 1341 | ||
1236 | /** | 1342 | /** |
1237 | * enum nl80211_reg_rule_attr - regulatory rule attributes | 1343 | * enum nl80211_reg_rule_attr - regulatory rule attributes |
1344 | * @__NL80211_REG_RULE_ATTR_INVALID: attribute number 0 is reserved | ||
1238 | * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional | 1345 | * @NL80211_ATTR_REG_RULE_FLAGS: a set of flags which specify additional |
1239 | * considerations for a given frequency range. These are the | 1346 | * considerations for a given frequency range. These are the |
1240 | * &enum nl80211_reg_rule_flags. | 1347 | * &enum nl80211_reg_rule_flags. |
@@ -1251,6 +1358,9 @@ enum nl80211_reg_type { | |||
1251 | * If you don't have one then don't send this. | 1358 | * If you don't have one then don't send this. |
1252 | * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for | 1359 | * @NL80211_ATTR_POWER_RULE_MAX_EIRP: the maximum allowed EIRP for |
1253 | * a given frequency range. The value is in mBm (100 * dBm). | 1360 | * a given frequency range. The value is in mBm (100 * dBm). |
1361 | * @NL80211_REG_RULE_ATTR_MAX: highest regulatory rule attribute number | ||
1362 | * currently defined | ||
1363 | * @__NL80211_REG_RULE_ATTR_AFTER_LAST: internal use | ||
1254 | */ | 1364 | */ |
1255 | enum nl80211_reg_rule_attr { | 1365 | enum nl80211_reg_rule_attr { |
1256 | __NL80211_REG_RULE_ATTR_INVALID, | 1366 | __NL80211_REG_RULE_ATTR_INVALID, |
@@ -1302,11 +1412,31 @@ enum nl80211_reg_rule_flags { | |||
1302 | * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved | 1412 | * @__NL80211_SURVEY_INFO_INVALID: attribute number 0 is reserved |
1303 | * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel | 1413 | * @NL80211_SURVEY_INFO_FREQUENCY: center frequency of channel |
1304 | * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) | 1414 | * @NL80211_SURVEY_INFO_NOISE: noise level of channel (u8, dBm) |
1415 | * @NL80211_SURVEY_INFO_IN_USE: channel is currently being used | ||
1416 | * @NL80211_SURVEY_INFO_CHANNEL_TIME: amount of time (in ms) that the radio | ||
1417 | * spent on this channel | ||
1418 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY: amount of the time the primary | ||
1419 | * channel was sensed busy (either due to activity or energy detect) | ||
1420 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: amount of time the extension | ||
1421 | * channel was sensed busy | ||
1422 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_RX: amount of time the radio spent | ||
1423 | * receiving data | ||
1424 | * @NL80211_SURVEY_INFO_CHANNEL_TIME_TX: amount of time the radio spent | ||
1425 | * transmitting data | ||
1426 | * @NL80211_SURVEY_INFO_MAX: highest survey info attribute number | ||
1427 | * currently defined | ||
1428 | * @__NL80211_SURVEY_INFO_AFTER_LAST: internal use | ||
1305 | */ | 1429 | */ |
1306 | enum nl80211_survey_info { | 1430 | enum nl80211_survey_info { |
1307 | __NL80211_SURVEY_INFO_INVALID, | 1431 | __NL80211_SURVEY_INFO_INVALID, |
1308 | NL80211_SURVEY_INFO_FREQUENCY, | 1432 | NL80211_SURVEY_INFO_FREQUENCY, |
1309 | NL80211_SURVEY_INFO_NOISE, | 1433 | NL80211_SURVEY_INFO_NOISE, |
1434 | NL80211_SURVEY_INFO_IN_USE, | ||
1435 | NL80211_SURVEY_INFO_CHANNEL_TIME, | ||
1436 | NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, | ||
1437 | NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, | ||
1438 | NL80211_SURVEY_INFO_CHANNEL_TIME_RX, | ||
1439 | NL80211_SURVEY_INFO_CHANNEL_TIME_TX, | ||
1310 | 1440 | ||
1311 | /* keep last */ | 1441 | /* keep last */ |
1312 | __NL80211_SURVEY_INFO_AFTER_LAST, | 1442 | __NL80211_SURVEY_INFO_AFTER_LAST, |
@@ -1466,6 +1596,7 @@ enum nl80211_channel_type { | |||
1466 | * enum nl80211_bss - netlink attributes for a BSS | 1596 | * enum nl80211_bss - netlink attributes for a BSS |
1467 | * | 1597 | * |
1468 | * @__NL80211_BSS_INVALID: invalid | 1598 | * @__NL80211_BSS_INVALID: invalid |
1599 | * @NL80211_BSS_BSSID: BSSID of the BSS (6 octets) | ||
1469 | * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) | 1600 | * @NL80211_BSS_FREQUENCY: frequency in MHz (u32) |
1470 | * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) | 1601 | * @NL80211_BSS_TSF: TSF of the received probe response/beacon (u64) |
1471 | * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) | 1602 | * @NL80211_BSS_BEACON_INTERVAL: beacon interval of the (I)BSS (u16) |
@@ -1509,6 +1640,12 @@ enum nl80211_bss { | |||
1509 | 1640 | ||
1510 | /** | 1641 | /** |
1511 | * enum nl80211_bss_status - BSS "status" | 1642 | * enum nl80211_bss_status - BSS "status" |
1643 | * @NL80211_BSS_STATUS_AUTHENTICATED: Authenticated with this BSS. | ||
1644 | * @NL80211_BSS_STATUS_ASSOCIATED: Associated with this BSS. | ||
1645 | * @NL80211_BSS_STATUS_IBSS_JOINED: Joined to this IBSS. | ||
1646 | * | ||
1647 | * The BSS status is a BSS attribute in scan dumps, which | ||
1648 | * indicates the status the interface has wrt. this BSS. | ||
1512 | */ | 1649 | */ |
1513 | enum nl80211_bss_status { | 1650 | enum nl80211_bss_status { |
1514 | NL80211_BSS_STATUS_AUTHENTICATED, | 1651 | NL80211_BSS_STATUS_AUTHENTICATED, |
@@ -1546,11 +1683,14 @@ enum nl80211_auth_type { | |||
1546 | * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key | 1683 | * @NL80211_KEYTYPE_GROUP: Group (broadcast/multicast) key |
1547 | * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key | 1684 | * @NL80211_KEYTYPE_PAIRWISE: Pairwise (unicast/individual) key |
1548 | * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) | 1685 | * @NL80211_KEYTYPE_PEERKEY: PeerKey (DLS) |
1686 | * @NUM_NL80211_KEYTYPES: number of defined key types | ||
1549 | */ | 1687 | */ |
1550 | enum nl80211_key_type { | 1688 | enum nl80211_key_type { |
1551 | NL80211_KEYTYPE_GROUP, | 1689 | NL80211_KEYTYPE_GROUP, |
1552 | NL80211_KEYTYPE_PAIRWISE, | 1690 | NL80211_KEYTYPE_PAIRWISE, |
1553 | NL80211_KEYTYPE_PEERKEY, | 1691 | NL80211_KEYTYPE_PEERKEY, |
1692 | |||
1693 | NUM_NL80211_KEYTYPES | ||
1554 | }; | 1694 | }; |
1555 | 1695 | ||
1556 | /** | 1696 | /** |
@@ -1581,6 +1721,9 @@ enum nl80211_wpa_versions { | |||
1581 | * CCMP keys, each six bytes in little endian | 1721 | * CCMP keys, each six bytes in little endian |
1582 | * @NL80211_KEY_DEFAULT: flag indicating default key | 1722 | * @NL80211_KEY_DEFAULT: flag indicating default key |
1583 | * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key | 1723 | * @NL80211_KEY_DEFAULT_MGMT: flag indicating default management key |
1724 | * @NL80211_KEY_TYPE: the key type from enum nl80211_key_type, if not | ||
1725 | * specified the default depends on whether a MAC address was | ||
1726 | * given with the command using the key or not (u32) | ||
1584 | * @__NL80211_KEY_AFTER_LAST: internal | 1727 | * @__NL80211_KEY_AFTER_LAST: internal |
1585 | * @NL80211_KEY_MAX: highest key attribute | 1728 | * @NL80211_KEY_MAX: highest key attribute |
1586 | */ | 1729 | */ |
@@ -1592,6 +1735,7 @@ enum nl80211_key_attributes { | |||
1592 | NL80211_KEY_SEQ, | 1735 | NL80211_KEY_SEQ, |
1593 | NL80211_KEY_DEFAULT, | 1736 | NL80211_KEY_DEFAULT, |
1594 | NL80211_KEY_DEFAULT_MGMT, | 1737 | NL80211_KEY_DEFAULT_MGMT, |
1738 | NL80211_KEY_TYPE, | ||
1595 | 1739 | ||
1596 | /* keep last */ | 1740 | /* keep last */ |
1597 | __NL80211_KEY_AFTER_LAST, | 1741 | __NL80211_KEY_AFTER_LAST, |
@@ -1619,8 +1763,8 @@ enum nl80211_tx_rate_attributes { | |||
1619 | 1763 | ||
1620 | /** | 1764 | /** |
1621 | * enum nl80211_band - Frequency band | 1765 | * enum nl80211_band - Frequency band |
1622 | * @NL80211_BAND_2GHZ - 2.4 GHz ISM band | 1766 | * @NL80211_BAND_2GHZ: 2.4 GHz ISM band |
1623 | * @NL80211_BAND_5GHZ - around 5 GHz band (4.9 - 5.7 GHz) | 1767 | * @NL80211_BAND_5GHZ: around 5 GHz band (4.9 - 5.7 GHz) |
1624 | */ | 1768 | */ |
1625 | enum nl80211_band { | 1769 | enum nl80211_band { |
1626 | NL80211_BAND_2GHZ, | 1770 | NL80211_BAND_2GHZ, |
@@ -1658,9 +1802,9 @@ enum nl80211_attr_cqm { | |||
1658 | 1802 | ||
1659 | /** | 1803 | /** |
1660 | * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event | 1804 | * enum nl80211_cqm_rssi_threshold_event - RSSI threshold event |
1661 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW - The RSSI level is lower than the | 1805 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW: The RSSI level is lower than the |
1662 | * configured threshold | 1806 | * configured threshold |
1663 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH - The RSSI is higher than the | 1807 | * @NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH: The RSSI is higher than the |
1664 | * configured threshold | 1808 | * configured threshold |
1665 | */ | 1809 | */ |
1666 | enum nl80211_cqm_rssi_threshold_event { | 1810 | enum nl80211_cqm_rssi_threshold_event { |
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index b2f1a4d83550..2026f9e1ceb8 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -49,28 +49,28 @@ | |||
49 | 49 | ||
50 | struct notifier_block { | 50 | struct notifier_block { |
51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); | 51 | int (*notifier_call)(struct notifier_block *, unsigned long, void *); |
52 | struct notifier_block *next; | 52 | struct notifier_block __rcu *next; |
53 | int priority; | 53 | int priority; |
54 | }; | 54 | }; |
55 | 55 | ||
56 | struct atomic_notifier_head { | 56 | struct atomic_notifier_head { |
57 | spinlock_t lock; | 57 | spinlock_t lock; |
58 | struct notifier_block *head; | 58 | struct notifier_block __rcu *head; |
59 | }; | 59 | }; |
60 | 60 | ||
61 | struct blocking_notifier_head { | 61 | struct blocking_notifier_head { |
62 | struct rw_semaphore rwsem; | 62 | struct rw_semaphore rwsem; |
63 | struct notifier_block *head; | 63 | struct notifier_block __rcu *head; |
64 | }; | 64 | }; |
65 | 65 | ||
66 | struct raw_notifier_head { | 66 | struct raw_notifier_head { |
67 | struct notifier_block *head; | 67 | struct notifier_block __rcu *head; |
68 | }; | 68 | }; |
69 | 69 | ||
70 | struct srcu_notifier_head { | 70 | struct srcu_notifier_head { |
71 | struct mutex mutex; | 71 | struct mutex mutex; |
72 | struct srcu_struct srcu; | 72 | struct srcu_struct srcu; |
73 | struct notifier_block *head; | 73 | struct notifier_block __rcu *head; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ | 76 | #define ATOMIC_INIT_NOTIFIER_HEAD(name) do { \ |
diff --git a/include/linux/opp.h b/include/linux/opp.h new file mode 100644 index 000000000000..5449945d589f --- /dev/null +++ b/include/linux/opp.h | |||
@@ -0,0 +1,105 @@ | |||
1 | /* | ||
2 | * Generic OPP Interface | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Texas Instruments Incorporated. | ||
5 | * Nishanth Menon | ||
6 | * Romit Dasgupta | ||
7 | * Kevin Hilman | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #ifndef __LINUX_OPP_H__ | ||
15 | #define __LINUX_OPP_H__ | ||
16 | |||
17 | #include <linux/err.h> | ||
18 | #include <linux/cpufreq.h> | ||
19 | |||
20 | struct opp; | ||
21 | |||
22 | #if defined(CONFIG_PM_OPP) | ||
23 | |||
24 | unsigned long opp_get_voltage(struct opp *opp); | ||
25 | |||
26 | unsigned long opp_get_freq(struct opp *opp); | ||
27 | |||
28 | int opp_get_opp_count(struct device *dev); | ||
29 | |||
30 | struct opp *opp_find_freq_exact(struct device *dev, unsigned long freq, | ||
31 | bool available); | ||
32 | |||
33 | struct opp *opp_find_freq_floor(struct device *dev, unsigned long *freq); | ||
34 | |||
35 | struct opp *opp_find_freq_ceil(struct device *dev, unsigned long *freq); | ||
36 | |||
37 | int opp_add(struct device *dev, unsigned long freq, unsigned long u_volt); | ||
38 | |||
39 | int opp_enable(struct device *dev, unsigned long freq); | ||
40 | |||
41 | int opp_disable(struct device *dev, unsigned long freq); | ||
42 | |||
43 | #else | ||
44 | static inline unsigned long opp_get_voltage(struct opp *opp) | ||
45 | { | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static inline unsigned long opp_get_freq(struct opp *opp) | ||
50 | { | ||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | static inline int opp_get_opp_count(struct device *dev) | ||
55 | { | ||
56 | return 0; | ||
57 | } | ||
58 | |||
59 | static inline struct opp *opp_find_freq_exact(struct device *dev, | ||
60 | unsigned long freq, bool available) | ||
61 | { | ||
62 | return ERR_PTR(-EINVAL); | ||
63 | } | ||
64 | |||
65 | static inline struct opp *opp_find_freq_floor(struct device *dev, | ||
66 | unsigned long *freq) | ||
67 | { | ||
68 | return ERR_PTR(-EINVAL); | ||
69 | } | ||
70 | |||
71 | static inline struct opp *opp_find_freq_ceil(struct device *dev, | ||
72 | unsigned long *freq) | ||
73 | { | ||
74 | return ERR_PTR(-EINVAL); | ||
75 | } | ||
76 | |||
77 | static inline int opp_add(struct device *dev, unsigned long freq, | ||
78 | unsigned long u_volt) | ||
79 | { | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | |||
83 | static inline int opp_enable(struct device *dev, unsigned long freq) | ||
84 | { | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static inline int opp_disable(struct device *dev, unsigned long freq) | ||
89 | { | ||
90 | return 0; | ||
91 | } | ||
92 | #endif /* CONFIG_PM */ | ||
93 | |||
94 | #if defined(CONFIG_CPU_FREQ) && defined(CONFIG_PM_OPP) | ||
95 | int opp_init_cpufreq_table(struct device *dev, | ||
96 | struct cpufreq_frequency_table **table); | ||
97 | #else | ||
98 | static inline int opp_init_cpufreq_table(struct device *dev, | ||
99 | struct cpufreq_frequency_table **table) | ||
100 | { | ||
101 | return -EINVAL; | ||
102 | } | ||
103 | #endif /* CONFIG_CPU_FREQ */ | ||
104 | |||
105 | #endif /* __LINUX_OPP_H__ */ | ||
diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h index 5171639ecf0f..32fb81212fd1 100644 --- a/include/linux/oprofile.h +++ b/include/linux/oprofile.h | |||
@@ -15,6 +15,7 @@ | |||
15 | 15 | ||
16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
17 | #include <linux/spinlock.h> | 17 | #include <linux/spinlock.h> |
18 | #include <linux/init.h> | ||
18 | #include <asm/atomic.h> | 19 | #include <asm/atomic.h> |
19 | 20 | ||
20 | /* Each escaped entry is prefixed by ESCAPE_CODE | 21 | /* Each escaped entry is prefixed by ESCAPE_CODE |
@@ -185,4 +186,10 @@ int oprofile_add_data(struct op_entry *entry, unsigned long val); | |||
185 | int oprofile_add_data64(struct op_entry *entry, u64 val); | 186 | int oprofile_add_data64(struct op_entry *entry, u64 val); |
186 | int oprofile_write_commit(struct op_entry *entry); | 187 | int oprofile_write_commit(struct op_entry *entry); |
187 | 188 | ||
189 | #ifdef CONFIG_PERF_EVENTS | ||
190 | int __init oprofile_perf_init(struct oprofile_operations *ops); | ||
191 | void oprofile_perf_exit(void); | ||
192 | char *op_name_from_perf_id(void); | ||
193 | #endif /* CONFIG_PERF_EVENTS */ | ||
194 | |||
188 | #endif /* OPROFILE_H */ | 195 | #endif /* OPROFILE_H */ |
diff --git a/include/linux/padata.h b/include/linux/padata.h index bdcd1e9eacea..4633b2f726b6 100644 --- a/include/linux/padata.h +++ b/include/linux/padata.h | |||
@@ -127,8 +127,8 @@ struct padata_cpumask { | |||
127 | */ | 127 | */ |
128 | struct parallel_data { | 128 | struct parallel_data { |
129 | struct padata_instance *pinst; | 129 | struct padata_instance *pinst; |
130 | struct padata_parallel_queue *pqueue; | 130 | struct padata_parallel_queue __percpu *pqueue; |
131 | struct padata_serial_queue *squeue; | 131 | struct padata_serial_queue __percpu *squeue; |
132 | atomic_t seq_nr; | 132 | atomic_t seq_nr; |
133 | atomic_t reorder_objects; | 133 | atomic_t reorder_objects; |
134 | atomic_t refcnt; | 134 | atomic_t refcnt; |
diff --git a/include/linux/pci.h b/include/linux/pci.h index b1d17956a153..c8d95e369ff4 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -1214,6 +1214,9 @@ static inline struct pci_dev *pci_get_bus_and_slot(unsigned int bus, | |||
1214 | unsigned int devfn) | 1214 | unsigned int devfn) |
1215 | { return NULL; } | 1215 | { return NULL; } |
1216 | 1216 | ||
1217 | static inline int pci_domain_nr(struct pci_bus *bus) | ||
1218 | { return 0; } | ||
1219 | |||
1217 | #define dev_is_pci(d) (false) | 1220 | #define dev_is_pci(d) (false) |
1218 | #define dev_is_pf(d) (false) | 1221 | #define dev_is_pf(d) (false) |
1219 | #define dev_num_vf(d) (0) | 1222 | #define dev_num_vf(d) (0) |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index f6a3b2d36cad..b4c3d1b50037 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -393,6 +393,9 @@ | |||
393 | #define PCI_DEVICE_ID_VLSI_82C147 0x0105 | 393 | #define PCI_DEVICE_ID_VLSI_82C147 0x0105 |
394 | #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 | 394 | #define PCI_DEVICE_ID_VLSI_VAS96011 0x0702 |
395 | 395 | ||
396 | /* AMD RD890 Chipset */ | ||
397 | #define PCI_DEVICE_ID_RD890_IOMMU 0x5a23 | ||
398 | |||
396 | #define PCI_VENDOR_ID_ADL 0x1005 | 399 | #define PCI_VENDOR_ID_ADL 0x1005 |
397 | #define PCI_DEVICE_ID_ADL_2301 0x2301 | 400 | #define PCI_DEVICE_ID_ADL_2301 0x2301 |
398 | 401 | ||
@@ -514,6 +517,7 @@ | |||
514 | #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 | 517 | #define PCI_DEVICE_ID_AMD_11H_NB_DRAM 0x1302 |
515 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 | 518 | #define PCI_DEVICE_ID_AMD_11H_NB_MISC 0x1303 |
516 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 | 519 | #define PCI_DEVICE_ID_AMD_11H_NB_LINK 0x1304 |
520 | #define PCI_DEVICE_ID_AMD_15H_NB_MISC 0x1603 | ||
517 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 | 521 | #define PCI_DEVICE_ID_AMD_LANCE 0x2000 |
518 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 | 522 | #define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001 |
519 | #define PCI_DEVICE_ID_AMD_SCSI 0x2020 | 523 | #define PCI_DEVICE_ID_AMD_SCSI 0x2020 |
@@ -739,6 +743,7 @@ | |||
739 | #define PCI_DEVICE_ID_HP_CISSC 0x3230 | 743 | #define PCI_DEVICE_ID_HP_CISSC 0x3230 |
740 | #define PCI_DEVICE_ID_HP_CISSD 0x3238 | 744 | #define PCI_DEVICE_ID_HP_CISSD 0x3238 |
741 | #define PCI_DEVICE_ID_HP_CISSE 0x323a | 745 | #define PCI_DEVICE_ID_HP_CISSE 0x323a |
746 | #define PCI_DEVICE_ID_HP_CISSF 0x323b | ||
742 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 | 747 | #define PCI_DEVICE_ID_HP_ZX2_IOC 0x4031 |
743 | 748 | ||
744 | #define PCI_VENDOR_ID_PCTECH 0x1042 | 749 | #define PCI_VENDOR_ID_PCTECH 0x1042 |
@@ -815,7 +820,7 @@ | |||
815 | 820 | ||
816 | #define PCI_VENDOR_ID_ANIGMA 0x1051 | 821 | #define PCI_VENDOR_ID_ANIGMA 0x1051 |
817 | #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 | 822 | #define PCI_DEVICE_ID_ANIGMA_MC145575 0x0100 |
818 | 823 | ||
819 | #define PCI_VENDOR_ID_EFAR 0x1055 | 824 | #define PCI_VENDOR_ID_EFAR 0x1055 |
820 | #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 | 825 | #define PCI_DEVICE_ID_EFAR_SLC90E66_1 0x9130 |
821 | #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 | 826 | #define PCI_DEVICE_ID_EFAR_SLC90E66_3 0x9463 |
@@ -1446,7 +1451,7 @@ | |||
1446 | 1451 | ||
1447 | #define PCI_VENDOR_ID_ZIATECH 0x1138 | 1452 | #define PCI_VENDOR_ID_ZIATECH 0x1138 |
1448 | #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 | 1453 | #define PCI_DEVICE_ID_ZIATECH_5550_HC 0x5550 |
1449 | 1454 | ||
1450 | 1455 | ||
1451 | #define PCI_VENDOR_ID_SYSKONNECT 0x1148 | 1456 | #define PCI_VENDOR_ID_SYSKONNECT 0x1148 |
1452 | #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 | 1457 | #define PCI_DEVICE_ID_SYSKONNECT_TR 0x4200 |
@@ -1600,8 +1605,8 @@ | |||
1600 | #define PCI_DEVICE_ID_RP8OCTA 0x0005 | 1605 | #define PCI_DEVICE_ID_RP8OCTA 0x0005 |
1601 | #define PCI_DEVICE_ID_RP8J 0x0006 | 1606 | #define PCI_DEVICE_ID_RP8J 0x0006 |
1602 | #define PCI_DEVICE_ID_RP4J 0x0007 | 1607 | #define PCI_DEVICE_ID_RP4J 0x0007 |
1603 | #define PCI_DEVICE_ID_RP8SNI 0x0008 | 1608 | #define PCI_DEVICE_ID_RP8SNI 0x0008 |
1604 | #define PCI_DEVICE_ID_RP16SNI 0x0009 | 1609 | #define PCI_DEVICE_ID_RP16SNI 0x0009 |
1605 | #define PCI_DEVICE_ID_RPP4 0x000A | 1610 | #define PCI_DEVICE_ID_RPP4 0x000A |
1606 | #define PCI_DEVICE_ID_RPP8 0x000B | 1611 | #define PCI_DEVICE_ID_RPP8 0x000B |
1607 | #define PCI_DEVICE_ID_RP4M 0x000D | 1612 | #define PCI_DEVICE_ID_RP4M 0x000D |
@@ -1611,9 +1616,9 @@ | |||
1611 | #define PCI_DEVICE_ID_URP8INTF 0x0802 | 1616 | #define PCI_DEVICE_ID_URP8INTF 0x0802 |
1612 | #define PCI_DEVICE_ID_URP16INTF 0x0803 | 1617 | #define PCI_DEVICE_ID_URP16INTF 0x0803 |
1613 | #define PCI_DEVICE_ID_URP8OCTA 0x0805 | 1618 | #define PCI_DEVICE_ID_URP8OCTA 0x0805 |
1614 | #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C | 1619 | #define PCI_DEVICE_ID_UPCI_RM3_8PORT 0x080C |
1615 | #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D | 1620 | #define PCI_DEVICE_ID_UPCI_RM3_4PORT 0x080D |
1616 | #define PCI_DEVICE_ID_CRP16INTF 0x0903 | 1621 | #define PCI_DEVICE_ID_CRP16INTF 0x0903 |
1617 | 1622 | ||
1618 | #define PCI_VENDOR_ID_CYCLADES 0x120e | 1623 | #define PCI_VENDOR_ID_CYCLADES 0x120e |
1619 | #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 | 1624 | #define PCI_DEVICE_ID_CYCLOM_Y_Lo 0x0100 |
@@ -2139,7 +2144,7 @@ | |||
2139 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 | 2144 | #define PCI_DEVICE_ID_RASTEL_2PORT 0x2000 |
2140 | 2145 | ||
2141 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 | 2146 | #define PCI_VENDOR_ID_ZOLTRIX 0x15b0 |
2142 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 | 2147 | #define PCI_DEVICE_ID_ZOLTRIX_2BD0 0x2bd0 |
2143 | 2148 | ||
2144 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 | 2149 | #define PCI_VENDOR_ID_MELLANOX 0x15b3 |
2145 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 | 2150 | #define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 |
@@ -2189,6 +2194,9 @@ | |||
2189 | #define PCI_VENDOR_ID_ARIMA 0x161f | 2194 | #define PCI_VENDOR_ID_ARIMA 0x161f |
2190 | 2195 | ||
2191 | #define PCI_VENDOR_ID_BROCADE 0x1657 | 2196 | #define PCI_VENDOR_ID_BROCADE 0x1657 |
2197 | #define PCI_DEVICE_ID_BROCADE_CT 0x0014 | ||
2198 | #define PCI_DEVICE_ID_BROCADE_FC_8G1P 0x0017 | ||
2199 | #define PCI_DEVICE_ID_BROCADE_CT_FC 0x0021 | ||
2192 | 2200 | ||
2193 | #define PCI_VENDOR_ID_SIBYTE 0x166d | 2201 | #define PCI_VENDOR_ID_SIBYTE 0x166d |
2194 | #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 | 2202 | #define PCI_DEVICE_ID_BCM1250_PCI 0x0001 |
@@ -2260,6 +2268,13 @@ | |||
2260 | 2268 | ||
2261 | #define PCI_VENDOR_ID_SILAN 0x1904 | 2269 | #define PCI_VENDOR_ID_SILAN 0x1904 |
2262 | 2270 | ||
2271 | #define PCI_VENDOR_ID_RENESAS 0x1912 | ||
2272 | #define PCI_DEVICE_ID_RENESAS_SH7781 0x0001 | ||
2273 | #define PCI_DEVICE_ID_RENESAS_SH7780 0x0002 | ||
2274 | #define PCI_DEVICE_ID_RENESAS_SH7763 0x0004 | ||
2275 | #define PCI_DEVICE_ID_RENESAS_SH7785 0x0007 | ||
2276 | #define PCI_DEVICE_ID_RENESAS_SH7786 0x0010 | ||
2277 | |||
2263 | #define PCI_VENDOR_ID_TDI 0x192E | 2278 | #define PCI_VENDOR_ID_TDI 0x192E |
2264 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 | 2279 | #define PCI_DEVICE_ID_TDI_EHCI 0x0101 |
2265 | 2280 | ||
@@ -2300,6 +2315,8 @@ | |||
2300 | #define PCI_DEVICE_ID_P2010 0x0079 | 2315 | #define PCI_DEVICE_ID_P2010 0x0079 |
2301 | #define PCI_DEVICE_ID_P1020E 0x0100 | 2316 | #define PCI_DEVICE_ID_P1020E 0x0100 |
2302 | #define PCI_DEVICE_ID_P1020 0x0101 | 2317 | #define PCI_DEVICE_ID_P1020 0x0101 |
2318 | #define PCI_DEVICE_ID_P1021E 0x0102 | ||
2319 | #define PCI_DEVICE_ID_P1021 0x0103 | ||
2303 | #define PCI_DEVICE_ID_P1011E 0x0108 | 2320 | #define PCI_DEVICE_ID_P1011E 0x0108 |
2304 | #define PCI_DEVICE_ID_P1011 0x0109 | 2321 | #define PCI_DEVICE_ID_P1011 0x0109 |
2305 | #define PCI_DEVICE_ID_P1022E 0x0110 | 2322 | #define PCI_DEVICE_ID_P1022E 0x0110 |
@@ -2310,6 +2327,14 @@ | |||
2310 | #define PCI_DEVICE_ID_P4080 0x0401 | 2327 | #define PCI_DEVICE_ID_P4080 0x0401 |
2311 | #define PCI_DEVICE_ID_P4040E 0x0408 | 2328 | #define PCI_DEVICE_ID_P4040E 0x0408 |
2312 | #define PCI_DEVICE_ID_P4040 0x0409 | 2329 | #define PCI_DEVICE_ID_P4040 0x0409 |
2330 | #define PCI_DEVICE_ID_P2040E 0x0410 | ||
2331 | #define PCI_DEVICE_ID_P2040 0x0411 | ||
2332 | #define PCI_DEVICE_ID_P3041E 0x041E | ||
2333 | #define PCI_DEVICE_ID_P3041 0x041F | ||
2334 | #define PCI_DEVICE_ID_P5020E 0x0420 | ||
2335 | #define PCI_DEVICE_ID_P5020 0x0421 | ||
2336 | #define PCI_DEVICE_ID_P5010E 0x0428 | ||
2337 | #define PCI_DEVICE_ID_P5010 0x0429 | ||
2313 | #define PCI_DEVICE_ID_MPC8641 0x7010 | 2338 | #define PCI_DEVICE_ID_MPC8641 0x7010 |
2314 | #define PCI_DEVICE_ID_MPC8641D 0x7011 | 2339 | #define PCI_DEVICE_ID_MPC8641D 0x7011 |
2315 | #define PCI_DEVICE_ID_MPC8610 0x7018 | 2340 | #define PCI_DEVICE_ID_MPC8610 0x7018 |
@@ -2413,7 +2438,7 @@ | |||
2413 | #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 | 2438 | #define PCI_DEVICE_ID_INTEL_82815_MC 0x1130 |
2414 | #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 | 2439 | #define PCI_DEVICE_ID_INTEL_82815_CGC 0x1132 |
2415 | #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 | 2440 | #define PCI_DEVICE_ID_INTEL_82092AA_0 0x1221 |
2416 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 | 2441 | #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 |
2417 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d | 2442 | #define PCI_DEVICE_ID_INTEL_7205_0 0x255d |
2418 | #define PCI_DEVICE_ID_INTEL_82437 0x122d | 2443 | #define PCI_DEVICE_ID_INTEL_82437 0x122d |
2419 | #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e | 2444 | #define PCI_DEVICE_ID_INTEL_82371FB_0 0x122e |
@@ -2616,6 +2641,9 @@ | |||
2616 | #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 | 2641 | #define PCI_DEVICE_ID_INTEL_MCH_PC 0x3599 |
2617 | #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a | 2642 | #define PCI_DEVICE_ID_INTEL_MCH_PC1 0x359a |
2618 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e | 2643 | #define PCI_DEVICE_ID_INTEL_E7525_MCH 0x359e |
2644 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_ERR 0x360c | ||
2645 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB0 0x360f | ||
2646 | #define PCI_DEVICE_ID_INTEL_I7300_MCH_FB1 0x3610 | ||
2619 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b | 2647 | #define PCI_DEVICE_ID_INTEL_IOAT_CNB 0x360b |
2620 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c | 2648 | #define PCI_DEVICE_ID_INTEL_FBD_CNB 0x360c |
2621 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 | 2649 | #define PCI_DEVICE_ID_INTEL_IOAT_JSF0 0x3710 |
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h index ce2dc655cd1d..018db9a62ffe 100644 --- a/include/linux/percpu-defs.h +++ b/include/linux/percpu-defs.h | |||
@@ -139,6 +139,27 @@ | |||
139 | __aligned(PAGE_SIZE) | 139 | __aligned(PAGE_SIZE) |
140 | 140 | ||
141 | /* | 141 | /* |
142 | * Declaration/definition used for per-CPU variables that must be read mostly. | ||
143 | */ | ||
144 | #define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ | ||
145 | DECLARE_PER_CPU_SECTION(type, name, "..readmostly") | ||
146 | |||
147 | #define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ | ||
148 | DEFINE_PER_CPU_SECTION(type, name, "..readmostly") | ||
149 | |||
150 | /* | ||
151 | * Declaration/definition used for large per-CPU variables that must be | ||
152 | * aligned to something larger than the pagesize. | ||
153 | */ | ||
154 | #define DECLARE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \ | ||
155 | DECLARE_PER_CPU_SECTION(type, name, "..page_aligned") \ | ||
156 | __aligned(size) | ||
157 | |||
158 | #define DEFINE_PER_CPU_MULTIPAGE_ALIGNED(type, name, size) \ | ||
159 | DEFINE_PER_CPU_SECTION(type, name, "..page_aligned") \ | ||
160 | __aligned(size) | ||
161 | |||
162 | /* | ||
142 | * Intermodule exports for per-CPU variables. sparse forgets about | 163 | * Intermodule exports for per-CPU variables. sparse forgets about |
143 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to | 164 | * address space across EXPORT_SYMBOL(), change EXPORT_SYMBOL() to |
144 | * noop if __CHECKER__. | 165 | * noop if __CHECKER__. |
diff --git a/include/linux/percpu.h b/include/linux/percpu.h index b8b9084527b1..5095b834a6fb 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h | |||
@@ -39,10 +39,17 @@ | |||
39 | preempt_enable(); \ | 39 | preempt_enable(); \ |
40 | } while (0) | 40 | } while (0) |
41 | 41 | ||
42 | #ifdef CONFIG_SMP | 42 | #define get_cpu_ptr(var) ({ \ |
43 | preempt_disable(); \ | ||
44 | this_cpu_ptr(var); }) | ||
45 | |||
46 | #define put_cpu_ptr(var) do { \ | ||
47 | (void)(var); \ | ||
48 | preempt_enable(); \ | ||
49 | } while (0) | ||
43 | 50 | ||
44 | /* minimum unit size, also is the maximum supported allocation size */ | 51 | /* minimum unit size, also is the maximum supported allocation size */ |
45 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) | 52 | #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) |
46 | 53 | ||
47 | /* | 54 | /* |
48 | * Percpu allocator can serve percpu allocations before slab is | 55 | * Percpu allocator can serve percpu allocations before slab is |
@@ -137,37 +144,20 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size, | |||
137 | * dynamically allocated. Non-atomic access to the current CPU's | 144 | * dynamically allocated. Non-atomic access to the current CPU's |
138 | * version should probably be combined with get_cpu()/put_cpu(). | 145 | * version should probably be combined with get_cpu()/put_cpu(). |
139 | */ | 146 | */ |
147 | #ifdef CONFIG_SMP | ||
140 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) | 148 | #define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) |
149 | #else | ||
150 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); VERIFY_PERCPU_PTR((ptr)); }) | ||
151 | #endif | ||
141 | 152 | ||
142 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); | 153 | extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align); |
143 | extern bool is_kernel_percpu_address(unsigned long addr); | 154 | extern bool is_kernel_percpu_address(unsigned long addr); |
144 | 155 | ||
145 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | 156 | #if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
146 | extern void __init setup_per_cpu_areas(void); | 157 | extern void __init setup_per_cpu_areas(void); |
147 | #endif | 158 | #endif |
148 | extern void __init percpu_init_late(void); | 159 | extern void __init percpu_init_late(void); |
149 | 160 | ||
150 | #else /* CONFIG_SMP */ | ||
151 | |||
152 | #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); }) | ||
153 | |||
154 | /* can't distinguish from other static vars, always false */ | ||
155 | static inline bool is_kernel_percpu_address(unsigned long addr) | ||
156 | { | ||
157 | return false; | ||
158 | } | ||
159 | |||
160 | static inline void __init setup_per_cpu_areas(void) { } | ||
161 | |||
162 | static inline void __init percpu_init_late(void) { } | ||
163 | |||
164 | static inline void *pcpu_lpage_remapped(void *kaddr) | ||
165 | { | ||
166 | return NULL; | ||
167 | } | ||
168 | |||
169 | #endif /* CONFIG_SMP */ | ||
170 | |||
171 | extern void __percpu *__alloc_percpu(size_t size, size_t align); | 161 | extern void __percpu *__alloc_percpu(size_t size, size_t align); |
172 | extern void free_percpu(void __percpu *__pdata); | 162 | extern void free_percpu(void __percpu *__pdata); |
173 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); | 163 | extern phys_addr_t per_cpu_ptr_to_phys(void *addr); |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 716f99b682c1..057bf22a8323 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
@@ -486,6 +486,8 @@ struct perf_guest_info_callbacks { | |||
486 | #include <linux/workqueue.h> | 486 | #include <linux/workqueue.h> |
487 | #include <linux/ftrace.h> | 487 | #include <linux/ftrace.h> |
488 | #include <linux/cpu.h> | 488 | #include <linux/cpu.h> |
489 | #include <linux/irq_work.h> | ||
490 | #include <linux/jump_label_ref.h> | ||
489 | #include <asm/atomic.h> | 491 | #include <asm/atomic.h> |
490 | #include <asm/local.h> | 492 | #include <asm/local.h> |
491 | 493 | ||
@@ -529,16 +531,22 @@ struct hw_perf_event { | |||
529 | int last_cpu; | 531 | int last_cpu; |
530 | }; | 532 | }; |
531 | struct { /* software */ | 533 | struct { /* software */ |
532 | s64 remaining; | ||
533 | struct hrtimer hrtimer; | 534 | struct hrtimer hrtimer; |
534 | }; | 535 | }; |
535 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 536 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
536 | struct { /* breakpoint */ | 537 | struct { /* breakpoint */ |
537 | struct arch_hw_breakpoint info; | 538 | struct arch_hw_breakpoint info; |
538 | struct list_head bp_list; | 539 | struct list_head bp_list; |
540 | /* | ||
541 | * Crufty hack to avoid the chicken and egg | ||
542 | * problem hw_breakpoint has with context | ||
543 | * creation and event initalization. | ||
544 | */ | ||
545 | struct task_struct *bp_target; | ||
539 | }; | 546 | }; |
540 | #endif | 547 | #endif |
541 | }; | 548 | }; |
549 | int state; | ||
542 | local64_t prev_count; | 550 | local64_t prev_count; |
543 | u64 sample_period; | 551 | u64 sample_period; |
544 | u64 last_period; | 552 | u64 last_period; |
@@ -550,6 +558,13 @@ struct hw_perf_event { | |||
550 | #endif | 558 | #endif |
551 | }; | 559 | }; |
552 | 560 | ||
561 | /* | ||
562 | * hw_perf_event::state flags | ||
563 | */ | ||
564 | #define PERF_HES_STOPPED 0x01 /* the counter is stopped */ | ||
565 | #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */ | ||
566 | #define PERF_HES_ARCH 0x04 | ||
567 | |||
553 | struct perf_event; | 568 | struct perf_event; |
554 | 569 | ||
555 | /* | 570 | /* |
@@ -561,36 +576,70 @@ struct perf_event; | |||
561 | * struct pmu - generic performance monitoring unit | 576 | * struct pmu - generic performance monitoring unit |
562 | */ | 577 | */ |
563 | struct pmu { | 578 | struct pmu { |
564 | int (*enable) (struct perf_event *event); | 579 | struct list_head entry; |
565 | void (*disable) (struct perf_event *event); | 580 | |
566 | int (*start) (struct perf_event *event); | 581 | int * __percpu pmu_disable_count; |
567 | void (*stop) (struct perf_event *event); | 582 | struct perf_cpu_context * __percpu pmu_cpu_context; |
568 | void (*read) (struct perf_event *event); | 583 | int task_ctx_nr; |
569 | void (*unthrottle) (struct perf_event *event); | 584 | |
585 | /* | ||
586 | * Fully disable/enable this PMU, can be used to protect from the PMI | ||
587 | * as well as for lazy/batch writing of the MSRs. | ||
588 | */ | ||
589 | void (*pmu_enable) (struct pmu *pmu); /* optional */ | ||
590 | void (*pmu_disable) (struct pmu *pmu); /* optional */ | ||
570 | 591 | ||
571 | /* | 592 | /* |
572 | * Group events scheduling is treated as a transaction, add group | 593 | * Try and initialize the event for this PMU. |
573 | * events as a whole and perform one schedulability test. If the test | 594 | * Should return -ENOENT when the @event doesn't match this PMU. |
574 | * fails, roll back the whole group | ||
575 | */ | 595 | */ |
596 | int (*event_init) (struct perf_event *event); | ||
597 | |||
598 | #define PERF_EF_START 0x01 /* start the counter when adding */ | ||
599 | #define PERF_EF_RELOAD 0x02 /* reload the counter when starting */ | ||
600 | #define PERF_EF_UPDATE 0x04 /* update the counter when stopping */ | ||
576 | 601 | ||
577 | /* | 602 | /* |
578 | * Start the transaction, after this ->enable() doesn't need | 603 | * Adds/Removes a counter to/from the PMU, can be done inside |
579 | * to do schedulability tests. | 604 | * a transaction, see the ->*_txn() methods. |
580 | */ | 605 | */ |
581 | void (*start_txn) (const struct pmu *pmu); | 606 | int (*add) (struct perf_event *event, int flags); |
607 | void (*del) (struct perf_event *event, int flags); | ||
608 | |||
582 | /* | 609 | /* |
583 | * If ->start_txn() disabled the ->enable() schedulability test | 610 | * Starts/Stops a counter present on the PMU. The PMI handler |
611 | * should stop the counter when perf_event_overflow() returns | ||
612 | * !0. ->start() will be used to continue. | ||
613 | */ | ||
614 | void (*start) (struct perf_event *event, int flags); | ||
615 | void (*stop) (struct perf_event *event, int flags); | ||
616 | |||
617 | /* | ||
618 | * Updates the counter value of the event. | ||
619 | */ | ||
620 | void (*read) (struct perf_event *event); | ||
621 | |||
622 | /* | ||
623 | * Group events scheduling is treated as a transaction, add | ||
624 | * group events as a whole and perform one schedulability test. | ||
625 | * If the test fails, roll back the whole group | ||
626 | * | ||
627 | * Start the transaction, after this ->add() doesn't need to | ||
628 | * do schedulability tests. | ||
629 | */ | ||
630 | void (*start_txn) (struct pmu *pmu); /* optional */ | ||
631 | /* | ||
632 | * If ->start_txn() disabled the ->add() schedulability test | ||
584 | * then ->commit_txn() is required to perform one. On success | 633 | * then ->commit_txn() is required to perform one. On success |
585 | * the transaction is closed. On error the transaction is kept | 634 | * the transaction is closed. On error the transaction is kept |
586 | * open until ->cancel_txn() is called. | 635 | * open until ->cancel_txn() is called. |
587 | */ | 636 | */ |
588 | int (*commit_txn) (const struct pmu *pmu); | 637 | int (*commit_txn) (struct pmu *pmu); /* optional */ |
589 | /* | 638 | /* |
590 | * Will cancel the transaction, assumes ->disable() is called for | 639 | * Will cancel the transaction, assumes ->del() is called |
591 | * each successfull ->enable() during the transaction. | 640 | * for each successfull ->add() during the transaction. |
592 | */ | 641 | */ |
593 | void (*cancel_txn) (const struct pmu *pmu); | 642 | void (*cancel_txn) (struct pmu *pmu); /* optional */ |
594 | }; | 643 | }; |
595 | 644 | ||
596 | /** | 645 | /** |
@@ -631,11 +680,6 @@ struct perf_buffer { | |||
631 | void *data_pages[0]; | 680 | void *data_pages[0]; |
632 | }; | 681 | }; |
633 | 682 | ||
634 | struct perf_pending_entry { | ||
635 | struct perf_pending_entry *next; | ||
636 | void (*func)(struct perf_pending_entry *); | ||
637 | }; | ||
638 | |||
639 | struct perf_sample_data; | 683 | struct perf_sample_data; |
640 | 684 | ||
641 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, | 685 | typedef void (*perf_overflow_handler_t)(struct perf_event *, int, |
@@ -656,6 +700,7 @@ struct swevent_hlist { | |||
656 | 700 | ||
657 | #define PERF_ATTACH_CONTEXT 0x01 | 701 | #define PERF_ATTACH_CONTEXT 0x01 |
658 | #define PERF_ATTACH_GROUP 0x02 | 702 | #define PERF_ATTACH_GROUP 0x02 |
703 | #define PERF_ATTACH_TASK 0x04 | ||
659 | 704 | ||
660 | /** | 705 | /** |
661 | * struct perf_event - performance event kernel representation: | 706 | * struct perf_event - performance event kernel representation: |
@@ -669,7 +714,7 @@ struct perf_event { | |||
669 | int nr_siblings; | 714 | int nr_siblings; |
670 | int group_flags; | 715 | int group_flags; |
671 | struct perf_event *group_leader; | 716 | struct perf_event *group_leader; |
672 | const struct pmu *pmu; | 717 | struct pmu *pmu; |
673 | 718 | ||
674 | enum perf_event_active_state state; | 719 | enum perf_event_active_state state; |
675 | unsigned int attach_state; | 720 | unsigned int attach_state; |
@@ -743,7 +788,7 @@ struct perf_event { | |||
743 | int pending_wakeup; | 788 | int pending_wakeup; |
744 | int pending_kill; | 789 | int pending_kill; |
745 | int pending_disable; | 790 | int pending_disable; |
746 | struct perf_pending_entry pending; | 791 | struct irq_work pending; |
747 | 792 | ||
748 | atomic_t event_limit; | 793 | atomic_t event_limit; |
749 | 794 | ||
@@ -763,12 +808,19 @@ struct perf_event { | |||
763 | #endif /* CONFIG_PERF_EVENTS */ | 808 | #endif /* CONFIG_PERF_EVENTS */ |
764 | }; | 809 | }; |
765 | 810 | ||
811 | enum perf_event_context_type { | ||
812 | task_context, | ||
813 | cpu_context, | ||
814 | }; | ||
815 | |||
766 | /** | 816 | /** |
767 | * struct perf_event_context - event context structure | 817 | * struct perf_event_context - event context structure |
768 | * | 818 | * |
769 | * Used as a container for task events and CPU events as well: | 819 | * Used as a container for task events and CPU events as well: |
770 | */ | 820 | */ |
771 | struct perf_event_context { | 821 | struct perf_event_context { |
822 | enum perf_event_context_type type; | ||
823 | struct pmu *pmu; | ||
772 | /* | 824 | /* |
773 | * Protect the states of the events in the list, | 825 | * Protect the states of the events in the list, |
774 | * nr_active, and the list: | 826 | * nr_active, and the list: |
@@ -808,6 +860,12 @@ struct perf_event_context { | |||
808 | struct rcu_head rcu_head; | 860 | struct rcu_head rcu_head; |
809 | }; | 861 | }; |
810 | 862 | ||
863 | /* | ||
864 | * Number of contexts where an event can trigger: | ||
865 | * task, softirq, hardirq, nmi. | ||
866 | */ | ||
867 | #define PERF_NR_CONTEXTS 4 | ||
868 | |||
811 | /** | 869 | /** |
812 | * struct perf_event_cpu_context - per cpu event context structure | 870 | * struct perf_event_cpu_context - per cpu event context structure |
813 | */ | 871 | */ |
@@ -815,18 +873,9 @@ struct perf_cpu_context { | |||
815 | struct perf_event_context ctx; | 873 | struct perf_event_context ctx; |
816 | struct perf_event_context *task_ctx; | 874 | struct perf_event_context *task_ctx; |
817 | int active_oncpu; | 875 | int active_oncpu; |
818 | int max_pertask; | ||
819 | int exclusive; | 876 | int exclusive; |
820 | struct swevent_hlist *swevent_hlist; | 877 | struct list_head rotation_list; |
821 | struct mutex hlist_mutex; | 878 | int jiffies_interval; |
822 | int hlist_refcount; | ||
823 | |||
824 | /* | ||
825 | * Recursion avoidance: | ||
826 | * | ||
827 | * task, softirq, irq, nmi context | ||
828 | */ | ||
829 | int recursion[4]; | ||
830 | }; | 879 | }; |
831 | 880 | ||
832 | struct perf_output_handle { | 881 | struct perf_output_handle { |
@@ -842,26 +891,34 @@ struct perf_output_handle { | |||
842 | 891 | ||
843 | #ifdef CONFIG_PERF_EVENTS | 892 | #ifdef CONFIG_PERF_EVENTS |
844 | 893 | ||
845 | /* | 894 | extern int perf_pmu_register(struct pmu *pmu); |
846 | * Set by architecture code: | 895 | extern void perf_pmu_unregister(struct pmu *pmu); |
847 | */ | 896 | |
848 | extern int perf_max_events; | 897 | extern int perf_num_counters(void); |
898 | extern const char *perf_pmu_name(void); | ||
899 | extern void __perf_event_task_sched_in(struct task_struct *task); | ||
900 | extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
849 | 901 | ||
850 | extern const struct pmu *hw_perf_event_init(struct perf_event *event); | 902 | extern atomic_t perf_task_events; |
903 | |||
904 | static inline void perf_event_task_sched_in(struct task_struct *task) | ||
905 | { | ||
906 | COND_STMT(&perf_task_events, __perf_event_task_sched_in(task)); | ||
907 | } | ||
908 | |||
909 | static inline | ||
910 | void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next) | ||
911 | { | ||
912 | COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next)); | ||
913 | } | ||
851 | 914 | ||
852 | extern void perf_event_task_sched_in(struct task_struct *task); | ||
853 | extern void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next); | ||
854 | extern void perf_event_task_tick(struct task_struct *task); | ||
855 | extern int perf_event_init_task(struct task_struct *child); | 915 | extern int perf_event_init_task(struct task_struct *child); |
856 | extern void perf_event_exit_task(struct task_struct *child); | 916 | extern void perf_event_exit_task(struct task_struct *child); |
857 | extern void perf_event_free_task(struct task_struct *task); | 917 | extern void perf_event_free_task(struct task_struct *task); |
858 | extern void set_perf_event_pending(void); | 918 | extern void perf_event_delayed_put(struct task_struct *task); |
859 | extern void perf_event_do_pending(void); | ||
860 | extern void perf_event_print_debug(void); | 919 | extern void perf_event_print_debug(void); |
861 | extern void __perf_disable(void); | 920 | extern void perf_pmu_disable(struct pmu *pmu); |
862 | extern bool __perf_enable(void); | 921 | extern void perf_pmu_enable(struct pmu *pmu); |
863 | extern void perf_disable(void); | ||
864 | extern void perf_enable(void); | ||
865 | extern int perf_event_task_disable(void); | 922 | extern int perf_event_task_disable(void); |
866 | extern int perf_event_task_enable(void); | 923 | extern int perf_event_task_enable(void); |
867 | extern void perf_event_update_userpage(struct perf_event *event); | 924 | extern void perf_event_update_userpage(struct perf_event *event); |
@@ -869,7 +926,7 @@ extern int perf_event_release_kernel(struct perf_event *event); | |||
869 | extern struct perf_event * | 926 | extern struct perf_event * |
870 | perf_event_create_kernel_counter(struct perf_event_attr *attr, | 927 | perf_event_create_kernel_counter(struct perf_event_attr *attr, |
871 | int cpu, | 928 | int cpu, |
872 | pid_t pid, | 929 | struct task_struct *task, |
873 | perf_overflow_handler_t callback); | 930 | perf_overflow_handler_t callback); |
874 | extern u64 perf_event_read_value(struct perf_event *event, | 931 | extern u64 perf_event_read_value(struct perf_event *event, |
875 | u64 *enabled, u64 *running); | 932 | u64 *enabled, u64 *running); |
@@ -920,14 +977,7 @@ extern int perf_event_overflow(struct perf_event *event, int nmi, | |||
920 | */ | 977 | */ |
921 | static inline int is_software_event(struct perf_event *event) | 978 | static inline int is_software_event(struct perf_event *event) |
922 | { | 979 | { |
923 | switch (event->attr.type) { | 980 | return event->pmu->task_ctx_nr == perf_sw_context; |
924 | case PERF_TYPE_SOFTWARE: | ||
925 | case PERF_TYPE_TRACEPOINT: | ||
926 | /* for now the breakpoint stuff also works as software event */ | ||
927 | case PERF_TYPE_BREAKPOINT: | ||
928 | return 1; | ||
929 | } | ||
930 | return 0; | ||
931 | } | 981 | } |
932 | 982 | ||
933 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; | 983 | extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX]; |
@@ -954,18 +1004,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs) | |||
954 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); | 1004 | perf_arch_fetch_caller_regs(regs, CALLER_ADDR0); |
955 | } | 1005 | } |
956 | 1006 | ||
957 | static inline void | 1007 | static __always_inline void |
958 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) | 1008 | perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr) |
959 | { | 1009 | { |
960 | if (atomic_read(&perf_swevent_enabled[event_id])) { | 1010 | struct pt_regs hot_regs; |
961 | struct pt_regs hot_regs; | 1011 | |
962 | 1012 | JUMP_LABEL(&perf_swevent_enabled[event_id], have_event); | |
963 | if (!regs) { | 1013 | return; |
964 | perf_fetch_caller_regs(&hot_regs); | 1014 | |
965 | regs = &hot_regs; | 1015 | have_event: |
966 | } | 1016 | if (!regs) { |
967 | __perf_sw_event(event_id, nr, nmi, regs, addr); | 1017 | perf_fetch_caller_regs(&hot_regs); |
1018 | regs = &hot_regs; | ||
968 | } | 1019 | } |
1020 | __perf_sw_event(event_id, nr, nmi, regs, addr); | ||
969 | } | 1021 | } |
970 | 1022 | ||
971 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1023 | extern void perf_event_mmap(struct vm_area_struct *vma); |
@@ -976,7 +1028,21 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks | |||
976 | extern void perf_event_comm(struct task_struct *tsk); | 1028 | extern void perf_event_comm(struct task_struct *tsk); |
977 | extern void perf_event_fork(struct task_struct *tsk); | 1029 | extern void perf_event_fork(struct task_struct *tsk); |
978 | 1030 | ||
979 | extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs); | 1031 | /* Callchains */ |
1032 | DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry); | ||
1033 | |||
1034 | extern void perf_callchain_user(struct perf_callchain_entry *entry, | ||
1035 | struct pt_regs *regs); | ||
1036 | extern void perf_callchain_kernel(struct perf_callchain_entry *entry, | ||
1037 | struct pt_regs *regs); | ||
1038 | |||
1039 | |||
1040 | static inline void | ||
1041 | perf_callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
1042 | { | ||
1043 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
1044 | entry->ip[entry->nr++] = ip; | ||
1045 | } | ||
980 | 1046 | ||
981 | extern int sysctl_perf_event_paranoid; | 1047 | extern int sysctl_perf_event_paranoid; |
982 | extern int sysctl_perf_event_mlock; | 1048 | extern int sysctl_perf_event_mlock; |
@@ -1019,21 +1085,18 @@ extern int perf_swevent_get_recursion_context(void); | |||
1019 | extern void perf_swevent_put_recursion_context(int rctx); | 1085 | extern void perf_swevent_put_recursion_context(int rctx); |
1020 | extern void perf_event_enable(struct perf_event *event); | 1086 | extern void perf_event_enable(struct perf_event *event); |
1021 | extern void perf_event_disable(struct perf_event *event); | 1087 | extern void perf_event_disable(struct perf_event *event); |
1088 | extern void perf_event_task_tick(void); | ||
1022 | #else | 1089 | #else |
1023 | static inline void | 1090 | static inline void |
1024 | perf_event_task_sched_in(struct task_struct *task) { } | 1091 | perf_event_task_sched_in(struct task_struct *task) { } |
1025 | static inline void | 1092 | static inline void |
1026 | perf_event_task_sched_out(struct task_struct *task, | 1093 | perf_event_task_sched_out(struct task_struct *task, |
1027 | struct task_struct *next) { } | 1094 | struct task_struct *next) { } |
1028 | static inline void | ||
1029 | perf_event_task_tick(struct task_struct *task) { } | ||
1030 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } | 1095 | static inline int perf_event_init_task(struct task_struct *child) { return 0; } |
1031 | static inline void perf_event_exit_task(struct task_struct *child) { } | 1096 | static inline void perf_event_exit_task(struct task_struct *child) { } |
1032 | static inline void perf_event_free_task(struct task_struct *task) { } | 1097 | static inline void perf_event_free_task(struct task_struct *task) { } |
1033 | static inline void perf_event_do_pending(void) { } | 1098 | static inline void perf_event_delayed_put(struct task_struct *task) { } |
1034 | static inline void perf_event_print_debug(void) { } | 1099 | static inline void perf_event_print_debug(void) { } |
1035 | static inline void perf_disable(void) { } | ||
1036 | static inline void perf_enable(void) { } | ||
1037 | static inline int perf_event_task_disable(void) { return -EINVAL; } | 1100 | static inline int perf_event_task_disable(void) { return -EINVAL; } |
1038 | static inline int perf_event_task_enable(void) { return -EINVAL; } | 1101 | static inline int perf_event_task_enable(void) { return -EINVAL; } |
1039 | 1102 | ||
@@ -1056,6 +1119,7 @@ static inline int perf_swevent_get_recursion_context(void) { return -1; } | |||
1056 | static inline void perf_swevent_put_recursion_context(int rctx) { } | 1119 | static inline void perf_swevent_put_recursion_context(int rctx) { } |
1057 | static inline void perf_event_enable(struct perf_event *event) { } | 1120 | static inline void perf_event_enable(struct perf_event *event) { } |
1058 | static inline void perf_event_disable(struct perf_event *event) { } | 1121 | static inline void perf_event_disable(struct perf_event *event) { } |
1122 | static inline void perf_event_task_tick(void) { } | ||
1059 | #endif | 1123 | #endif |
1060 | 1124 | ||
1061 | #define perf_output_put(handle, x) \ | 1125 | #define perf_output_put(handle, x) \ |
diff --git a/include/linux/phonet.h b/include/linux/phonet.h index 24426c3d6b5a..26c8df786918 100644 --- a/include/linux/phonet.h +++ b/include/linux/phonet.h | |||
@@ -36,6 +36,9 @@ | |||
36 | /* Socket options for SOL_PNPIPE level */ | 36 | /* Socket options for SOL_PNPIPE level */ |
37 | #define PNPIPE_ENCAP 1 | 37 | #define PNPIPE_ENCAP 1 |
38 | #define PNPIPE_IFINDEX 2 | 38 | #define PNPIPE_IFINDEX 2 |
39 | #define PNPIPE_PIPE_HANDLE 3 | ||
40 | #define PNPIPE_ENABLE 4 | ||
41 | /* unused slot */ | ||
39 | 42 | ||
40 | #define PNADDR_ANY 0 | 43 | #define PNADDR_ANY 0 |
41 | #define PNADDR_BROADCAST 0xFC | 44 | #define PNADDR_BROADCAST 0xFC |
@@ -47,6 +50,8 @@ | |||
47 | 50 | ||
48 | /* ioctls */ | 51 | /* ioctls */ |
49 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) | 52 | #define SIOCPNGETOBJECT (SIOCPROTOPRIVATE + 0) |
53 | #define SIOCPNADDRESOURCE (SIOCPROTOPRIVATE + 14) | ||
54 | #define SIOCPNDELRESOURCE (SIOCPROTOPRIVATE + 15) | ||
50 | 55 | ||
51 | /* Phonet protocol header */ | 56 | /* Phonet protocol header */ |
52 | struct phonethdr { | 57 | struct phonethdr { |
@@ -56,7 +61,7 @@ struct phonethdr { | |||
56 | __be16 pn_length; | 61 | __be16 pn_length; |
57 | __u8 pn_robj; | 62 | __u8 pn_robj; |
58 | __u8 pn_sobj; | 63 | __u8 pn_sobj; |
59 | } __packed; | 64 | } __attribute__((packed)); |
60 | 65 | ||
61 | /* Common Phonet payload header */ | 66 | /* Common Phonet payload header */ |
62 | struct phonetmsg { | 67 | struct phonetmsg { |
@@ -98,7 +103,7 @@ struct sockaddr_pn { | |||
98 | __u8 spn_dev; | 103 | __u8 spn_dev; |
99 | __u8 spn_resource; | 104 | __u8 spn_resource; |
100 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; | 105 | __u8 spn_zero[sizeof(struct sockaddr) - sizeof(sa_family_t) - 3]; |
101 | } __packed; | 106 | } __attribute__((packed)); |
102 | 107 | ||
103 | /* Well known address */ | 108 | /* Well known address */ |
104 | #define PN_DEV_PC 0x10 | 109 | #define PN_DEV_PC 0x10 |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 6b0a782c6224..a6e047a04f79 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -116,7 +116,7 @@ struct mii_bus { | |||
116 | /* list of all PHYs on bus */ | 116 | /* list of all PHYs on bus */ |
117 | struct phy_device *phy_map[PHY_MAX_ADDR]; | 117 | struct phy_device *phy_map[PHY_MAX_ADDR]; |
118 | 118 | ||
119 | /* Phy addresses to be ignored when probing */ | 119 | /* PHY addresses to be ignored when probing */ |
120 | u32 phy_mask; | 120 | u32 phy_mask; |
121 | 121 | ||
122 | /* | 122 | /* |
@@ -283,7 +283,7 @@ struct phy_device { | |||
283 | 283 | ||
284 | phy_interface_t interface; | 284 | phy_interface_t interface; |
285 | 285 | ||
286 | /* Bus address of the PHY (0-32) */ | 286 | /* Bus address of the PHY (0-31) */ |
287 | int addr; | 287 | int addr; |
288 | 288 | ||
289 | /* | 289 | /* |
diff --git a/include/linux/pkt_cls.h b/include/linux/pkt_cls.h index 7f6ba8658abe..defbde203d07 100644 --- a/include/linux/pkt_cls.h +++ b/include/linux/pkt_cls.h | |||
@@ -332,6 +332,7 @@ enum { | |||
332 | FLOW_KEY_SKUID, | 332 | FLOW_KEY_SKUID, |
333 | FLOW_KEY_SKGID, | 333 | FLOW_KEY_SKGID, |
334 | FLOW_KEY_VLAN_TAG, | 334 | FLOW_KEY_VLAN_TAG, |
335 | FLOW_KEY_RXHASH, | ||
335 | __FLOW_KEY_MAX, | 336 | __FLOW_KEY_MAX, |
336 | }; | 337 | }; |
337 | 338 | ||
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index d7ecad0093bb..2e700ec0601f 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
@@ -138,6 +138,9 @@ extern struct platform_device *platform_create_bundle(struct platform_driver *dr | |||
138 | struct resource *res, unsigned int n_res, | 138 | struct resource *res, unsigned int n_res, |
139 | const void *data, size_t size); | 139 | const void *data, size_t size); |
140 | 140 | ||
141 | extern const struct dev_pm_ops * platform_bus_get_pm_ops(void); | ||
142 | extern void platform_bus_set_pm_ops(const struct dev_pm_ops *pm); | ||
143 | |||
141 | /* early platform driver interface */ | 144 | /* early platform driver interface */ |
142 | struct early_platform_driver { | 145 | struct early_platform_driver { |
143 | const char *class_str; | 146 | const char *class_str; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 52e8c55ff314..40f3f45702ba 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
@@ -41,6 +41,12 @@ extern void (*pm_power_off_prepare)(void); | |||
41 | 41 | ||
42 | struct device; | 42 | struct device; |
43 | 43 | ||
44 | #ifdef CONFIG_PM | ||
45 | extern const char power_group_name[]; /* = "power" */ | ||
46 | #else | ||
47 | #define power_group_name NULL | ||
48 | #endif | ||
49 | |||
44 | typedef struct pm_message { | 50 | typedef struct pm_message { |
45 | int event; | 51 | int event; |
46 | } pm_message_t; | 52 | } pm_message_t; |
@@ -438,6 +444,9 @@ enum rpm_status { | |||
438 | * | 444 | * |
439 | * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback | 445 | * RPM_REQ_SUSPEND Run the device bus type's ->runtime_suspend() callback |
440 | * | 446 | * |
447 | * RPM_REQ_AUTOSUSPEND Same as RPM_REQ_SUSPEND, but not until the device has | ||
448 | * been inactive for as long as power.autosuspend_delay | ||
449 | * | ||
441 | * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback | 450 | * RPM_REQ_RESUME Run the device bus type's ->runtime_resume() callback |
442 | */ | 451 | */ |
443 | 452 | ||
@@ -445,26 +454,28 @@ enum rpm_request { | |||
445 | RPM_REQ_NONE = 0, | 454 | RPM_REQ_NONE = 0, |
446 | RPM_REQ_IDLE, | 455 | RPM_REQ_IDLE, |
447 | RPM_REQ_SUSPEND, | 456 | RPM_REQ_SUSPEND, |
457 | RPM_REQ_AUTOSUSPEND, | ||
448 | RPM_REQ_RESUME, | 458 | RPM_REQ_RESUME, |
449 | }; | 459 | }; |
450 | 460 | ||
461 | struct wakeup_source; | ||
462 | |||
451 | struct dev_pm_info { | 463 | struct dev_pm_info { |
452 | pm_message_t power_state; | 464 | pm_message_t power_state; |
453 | unsigned int can_wakeup:1; | 465 | unsigned int can_wakeup:1; |
454 | unsigned int should_wakeup:1; | ||
455 | unsigned async_suspend:1; | 466 | unsigned async_suspend:1; |
456 | enum dpm_state status; /* Owned by the PM core */ | 467 | enum dpm_state status; /* Owned by the PM core */ |
468 | spinlock_t lock; | ||
457 | #ifdef CONFIG_PM_SLEEP | 469 | #ifdef CONFIG_PM_SLEEP |
458 | struct list_head entry; | 470 | struct list_head entry; |
459 | struct completion completion; | 471 | struct completion completion; |
460 | unsigned long wakeup_count; | 472 | struct wakeup_source *wakeup; |
461 | #endif | 473 | #endif |
462 | #ifdef CONFIG_PM_RUNTIME | 474 | #ifdef CONFIG_PM_RUNTIME |
463 | struct timer_list suspend_timer; | 475 | struct timer_list suspend_timer; |
464 | unsigned long timer_expires; | 476 | unsigned long timer_expires; |
465 | struct work_struct work; | 477 | struct work_struct work; |
466 | wait_queue_head_t wait_queue; | 478 | wait_queue_head_t wait_queue; |
467 | spinlock_t lock; | ||
468 | atomic_t usage_count; | 479 | atomic_t usage_count; |
469 | atomic_t child_count; | 480 | atomic_t child_count; |
470 | unsigned int disable_depth:3; | 481 | unsigned int disable_depth:3; |
@@ -474,9 +485,14 @@ struct dev_pm_info { | |||
474 | unsigned int deferred_resume:1; | 485 | unsigned int deferred_resume:1; |
475 | unsigned int run_wake:1; | 486 | unsigned int run_wake:1; |
476 | unsigned int runtime_auto:1; | 487 | unsigned int runtime_auto:1; |
488 | unsigned int no_callbacks:1; | ||
489 | unsigned int use_autosuspend:1; | ||
490 | unsigned int timer_autosuspends:1; | ||
477 | enum rpm_request request; | 491 | enum rpm_request request; |
478 | enum rpm_status runtime_status; | 492 | enum rpm_status runtime_status; |
479 | int runtime_error; | 493 | int runtime_error; |
494 | int autosuspend_delay; | ||
495 | unsigned long last_busy; | ||
480 | unsigned long active_jiffies; | 496 | unsigned long active_jiffies; |
481 | unsigned long suspended_jiffies; | 497 | unsigned long suspended_jiffies; |
482 | unsigned long accounting_timestamp; | 498 | unsigned long accounting_timestamp; |
@@ -558,12 +574,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret); | |||
558 | __suspend_report_result(__func__, fn, ret); \ | 574 | __suspend_report_result(__func__, fn, ret); \ |
559 | } while (0) | 575 | } while (0) |
560 | 576 | ||
561 | extern void device_pm_wait_for_dev(struct device *sub, struct device *dev); | 577 | extern int device_pm_wait_for_dev(struct device *sub, struct device *dev); |
562 | |||
563 | /* drivers/base/power/wakeup.c */ | ||
564 | extern void pm_wakeup_event(struct device *dev, unsigned int msec); | ||
565 | extern void pm_stay_awake(struct device *dev); | ||
566 | extern void pm_relax(void); | ||
567 | #else /* !CONFIG_PM_SLEEP */ | 578 | #else /* !CONFIG_PM_SLEEP */ |
568 | 579 | ||
569 | #define device_pm_lock() do {} while (0) | 580 | #define device_pm_lock() do {} while (0) |
@@ -576,11 +587,10 @@ static inline int dpm_suspend_start(pm_message_t state) | |||
576 | 587 | ||
577 | #define suspend_report_result(fn, ret) do {} while (0) | 588 | #define suspend_report_result(fn, ret) do {} while (0) |
578 | 589 | ||
579 | static inline void device_pm_wait_for_dev(struct device *a, struct device *b) {} | 590 | static inline int device_pm_wait_for_dev(struct device *a, struct device *b) |
580 | 591 | { | |
581 | static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | 592 | return 0; |
582 | static inline void pm_stay_awake(struct device *dev) {} | 593 | } |
583 | static inline void pm_relax(void) {} | ||
584 | #endif /* !CONFIG_PM_SLEEP */ | 594 | #endif /* !CONFIG_PM_SLEEP */ |
585 | 595 | ||
586 | /* How to reorder dpm_list after device_move() */ | 596 | /* How to reorder dpm_list after device_move() */ |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 6e81888c6222..3ec2358f8692 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
@@ -12,18 +12,24 @@ | |||
12 | #include <linux/device.h> | 12 | #include <linux/device.h> |
13 | #include <linux/pm.h> | 13 | #include <linux/pm.h> |
14 | 14 | ||
15 | #include <linux/jiffies.h> | ||
16 | |||
17 | /* Runtime PM flag argument bits */ | ||
18 | #define RPM_ASYNC 0x01 /* Request is asynchronous */ | ||
19 | #define RPM_NOWAIT 0x02 /* Don't wait for concurrent | ||
20 | state change */ | ||
21 | #define RPM_GET_PUT 0x04 /* Increment/decrement the | ||
22 | usage_count */ | ||
23 | #define RPM_AUTO 0x08 /* Use autosuspend_delay */ | ||
24 | |||
15 | #ifdef CONFIG_PM_RUNTIME | 25 | #ifdef CONFIG_PM_RUNTIME |
16 | 26 | ||
17 | extern struct workqueue_struct *pm_wq; | 27 | extern struct workqueue_struct *pm_wq; |
18 | 28 | ||
19 | extern int pm_runtime_idle(struct device *dev); | 29 | extern int __pm_runtime_idle(struct device *dev, int rpmflags); |
20 | extern int pm_runtime_suspend(struct device *dev); | 30 | extern int __pm_runtime_suspend(struct device *dev, int rpmflags); |
21 | extern int pm_runtime_resume(struct device *dev); | 31 | extern int __pm_runtime_resume(struct device *dev, int rpmflags); |
22 | extern int pm_request_idle(struct device *dev); | ||
23 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); | 32 | extern int pm_schedule_suspend(struct device *dev, unsigned int delay); |
24 | extern int pm_request_resume(struct device *dev); | ||
25 | extern int __pm_runtime_get(struct device *dev, bool sync); | ||
26 | extern int __pm_runtime_put(struct device *dev, bool sync); | ||
27 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); | 33 | extern int __pm_runtime_set_status(struct device *dev, unsigned int status); |
28 | extern int pm_runtime_barrier(struct device *dev); | 34 | extern int pm_runtime_barrier(struct device *dev); |
29 | extern void pm_runtime_enable(struct device *dev); | 35 | extern void pm_runtime_enable(struct device *dev); |
@@ -33,6 +39,10 @@ extern void pm_runtime_forbid(struct device *dev); | |||
33 | extern int pm_generic_runtime_idle(struct device *dev); | 39 | extern int pm_generic_runtime_idle(struct device *dev); |
34 | extern int pm_generic_runtime_suspend(struct device *dev); | 40 | extern int pm_generic_runtime_suspend(struct device *dev); |
35 | extern int pm_generic_runtime_resume(struct device *dev); | 41 | extern int pm_generic_runtime_resume(struct device *dev); |
42 | extern void pm_runtime_no_callbacks(struct device *dev); | ||
43 | extern void __pm_runtime_use_autosuspend(struct device *dev, bool use); | ||
44 | extern void pm_runtime_set_autosuspend_delay(struct device *dev, int delay); | ||
45 | extern unsigned long pm_runtime_autosuspend_expiration(struct device *dev); | ||
36 | 46 | ||
37 | static inline bool pm_children_suspended(struct device *dev) | 47 | static inline bool pm_children_suspended(struct device *dev) |
38 | { | 48 | { |
@@ -70,19 +80,29 @@ static inline bool pm_runtime_suspended(struct device *dev) | |||
70 | return dev->power.runtime_status == RPM_SUSPENDED; | 80 | return dev->power.runtime_status == RPM_SUSPENDED; |
71 | } | 81 | } |
72 | 82 | ||
83 | static inline void pm_runtime_mark_last_busy(struct device *dev) | ||
84 | { | ||
85 | ACCESS_ONCE(dev->power.last_busy) = jiffies; | ||
86 | } | ||
87 | |||
73 | #else /* !CONFIG_PM_RUNTIME */ | 88 | #else /* !CONFIG_PM_RUNTIME */ |
74 | 89 | ||
75 | static inline int pm_runtime_idle(struct device *dev) { return -ENOSYS; } | 90 | static inline int __pm_runtime_idle(struct device *dev, int rpmflags) |
76 | static inline int pm_runtime_suspend(struct device *dev) { return -ENOSYS; } | 91 | { |
77 | static inline int pm_runtime_resume(struct device *dev) { return 0; } | 92 | return -ENOSYS; |
78 | static inline int pm_request_idle(struct device *dev) { return -ENOSYS; } | 93 | } |
94 | static inline int __pm_runtime_suspend(struct device *dev, int rpmflags) | ||
95 | { | ||
96 | return -ENOSYS; | ||
97 | } | ||
98 | static inline int __pm_runtime_resume(struct device *dev, int rpmflags) | ||
99 | { | ||
100 | return 1; | ||
101 | } | ||
79 | static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) | 102 | static inline int pm_schedule_suspend(struct device *dev, unsigned int delay) |
80 | { | 103 | { |
81 | return -ENOSYS; | 104 | return -ENOSYS; |
82 | } | 105 | } |
83 | static inline int pm_request_resume(struct device *dev) { return 0; } | ||
84 | static inline int __pm_runtime_get(struct device *dev, bool sync) { return 1; } | ||
85 | static inline int __pm_runtime_put(struct device *dev, bool sync) { return 0; } | ||
86 | static inline int __pm_runtime_set_status(struct device *dev, | 106 | static inline int __pm_runtime_set_status(struct device *dev, |
87 | unsigned int status) { return 0; } | 107 | unsigned int status) { return 0; } |
88 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } | 108 | static inline int pm_runtime_barrier(struct device *dev) { return 0; } |
@@ -102,27 +122,82 @@ static inline bool pm_runtime_suspended(struct device *dev) { return false; } | |||
102 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } | 122 | static inline int pm_generic_runtime_idle(struct device *dev) { return 0; } |
103 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } | 123 | static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; } |
104 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } | 124 | static inline int pm_generic_runtime_resume(struct device *dev) { return 0; } |
125 | static inline void pm_runtime_no_callbacks(struct device *dev) {} | ||
126 | |||
127 | static inline void pm_runtime_mark_last_busy(struct device *dev) {} | ||
128 | static inline void __pm_runtime_use_autosuspend(struct device *dev, | ||
129 | bool use) {} | ||
130 | static inline void pm_runtime_set_autosuspend_delay(struct device *dev, | ||
131 | int delay) {} | ||
132 | static inline unsigned long pm_runtime_autosuspend_expiration( | ||
133 | struct device *dev) { return 0; } | ||
105 | 134 | ||
106 | #endif /* !CONFIG_PM_RUNTIME */ | 135 | #endif /* !CONFIG_PM_RUNTIME */ |
107 | 136 | ||
137 | static inline int pm_runtime_idle(struct device *dev) | ||
138 | { | ||
139 | return __pm_runtime_idle(dev, 0); | ||
140 | } | ||
141 | |||
142 | static inline int pm_runtime_suspend(struct device *dev) | ||
143 | { | ||
144 | return __pm_runtime_suspend(dev, 0); | ||
145 | } | ||
146 | |||
147 | static inline int pm_runtime_autosuspend(struct device *dev) | ||
148 | { | ||
149 | return __pm_runtime_suspend(dev, RPM_AUTO); | ||
150 | } | ||
151 | |||
152 | static inline int pm_runtime_resume(struct device *dev) | ||
153 | { | ||
154 | return __pm_runtime_resume(dev, 0); | ||
155 | } | ||
156 | |||
157 | static inline int pm_request_idle(struct device *dev) | ||
158 | { | ||
159 | return __pm_runtime_idle(dev, RPM_ASYNC); | ||
160 | } | ||
161 | |||
162 | static inline int pm_request_resume(struct device *dev) | ||
163 | { | ||
164 | return __pm_runtime_resume(dev, RPM_ASYNC); | ||
165 | } | ||
166 | |||
167 | static inline int pm_request_autosuspend(struct device *dev) | ||
168 | { | ||
169 | return __pm_runtime_suspend(dev, RPM_ASYNC | RPM_AUTO); | ||
170 | } | ||
171 | |||
108 | static inline int pm_runtime_get(struct device *dev) | 172 | static inline int pm_runtime_get(struct device *dev) |
109 | { | 173 | { |
110 | return __pm_runtime_get(dev, false); | 174 | return __pm_runtime_resume(dev, RPM_GET_PUT | RPM_ASYNC); |
111 | } | 175 | } |
112 | 176 | ||
113 | static inline int pm_runtime_get_sync(struct device *dev) | 177 | static inline int pm_runtime_get_sync(struct device *dev) |
114 | { | 178 | { |
115 | return __pm_runtime_get(dev, true); | 179 | return __pm_runtime_resume(dev, RPM_GET_PUT); |
116 | } | 180 | } |
117 | 181 | ||
118 | static inline int pm_runtime_put(struct device *dev) | 182 | static inline int pm_runtime_put(struct device *dev) |
119 | { | 183 | { |
120 | return __pm_runtime_put(dev, false); | 184 | return __pm_runtime_idle(dev, RPM_GET_PUT | RPM_ASYNC); |
185 | } | ||
186 | |||
187 | static inline int pm_runtime_put_autosuspend(struct device *dev) | ||
188 | { | ||
189 | return __pm_runtime_suspend(dev, | ||
190 | RPM_GET_PUT | RPM_ASYNC | RPM_AUTO); | ||
121 | } | 191 | } |
122 | 192 | ||
123 | static inline int pm_runtime_put_sync(struct device *dev) | 193 | static inline int pm_runtime_put_sync(struct device *dev) |
124 | { | 194 | { |
125 | return __pm_runtime_put(dev, true); | 195 | return __pm_runtime_idle(dev, RPM_GET_PUT); |
196 | } | ||
197 | |||
198 | static inline int pm_runtime_put_sync_autosuspend(struct device *dev) | ||
199 | { | ||
200 | return __pm_runtime_suspend(dev, RPM_GET_PUT | RPM_AUTO); | ||
126 | } | 201 | } |
127 | 202 | ||
128 | static inline int pm_runtime_set_active(struct device *dev) | 203 | static inline int pm_runtime_set_active(struct device *dev) |
@@ -140,4 +215,14 @@ static inline void pm_runtime_disable(struct device *dev) | |||
140 | __pm_runtime_disable(dev, true); | 215 | __pm_runtime_disable(dev, true); |
141 | } | 216 | } |
142 | 217 | ||
218 | static inline void pm_runtime_use_autosuspend(struct device *dev) | ||
219 | { | ||
220 | __pm_runtime_use_autosuspend(dev, true); | ||
221 | } | ||
222 | |||
223 | static inline void pm_runtime_dont_use_autosuspend(struct device *dev) | ||
224 | { | ||
225 | __pm_runtime_use_autosuspend(dev, false); | ||
226 | } | ||
227 | |||
143 | #endif | 228 | #endif |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 76aca48722ae..9cff00dd6b63 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
@@ -2,6 +2,7 @@ | |||
2 | * pm_wakeup.h - Power management wakeup interface | 2 | * pm_wakeup.h - Power management wakeup interface |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Alan Stern | 4 | * Copyright (C) 2008 Alan Stern |
5 | * Copyright (C) 2010 Rafael J. Wysocki, Novell Inc. | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify | 7 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
@@ -27,19 +28,77 @@ | |||
27 | 28 | ||
28 | #include <linux/types.h> | 29 | #include <linux/types.h> |
29 | 30 | ||
30 | #ifdef CONFIG_PM | 31 | /** |
31 | 32 | * struct wakeup_source - Representation of wakeup sources | |
32 | /* Changes to device_may_wakeup take effect on the next pm state change. | ||
33 | * | 33 | * |
34 | * By default, most devices should leave wakeup disabled. The exceptions | 34 | * @total_time: Total time this wakeup source has been active. |
35 | * are devices that everyone expects to be wakeup sources: keyboards, | 35 | * @max_time: Maximum time this wakeup source has been continuously active. |
36 | * power buttons, possibly network interfaces, etc. | 36 | * @last_time: Monotonic clock when the wakeup source's was activated last time. |
37 | * @event_count: Number of signaled wakeup events. | ||
38 | * @active_count: Number of times the wakeup sorce was activated. | ||
39 | * @relax_count: Number of times the wakeup sorce was deactivated. | ||
40 | * @hit_count: Number of times the wakeup sorce might abort system suspend. | ||
41 | * @active: Status of the wakeup source. | ||
37 | */ | 42 | */ |
38 | static inline void device_init_wakeup(struct device *dev, bool val) | 43 | struct wakeup_source { |
44 | char *name; | ||
45 | struct list_head entry; | ||
46 | spinlock_t lock; | ||
47 | struct timer_list timer; | ||
48 | unsigned long timer_expires; | ||
49 | ktime_t total_time; | ||
50 | ktime_t max_time; | ||
51 | ktime_t last_time; | ||
52 | unsigned long event_count; | ||
53 | unsigned long active_count; | ||
54 | unsigned long relax_count; | ||
55 | unsigned long hit_count; | ||
56 | unsigned int active:1; | ||
57 | }; | ||
58 | |||
59 | #ifdef CONFIG_PM_SLEEP | ||
60 | |||
61 | /* | ||
62 | * Changes to device_may_wakeup take effect on the next pm state change. | ||
63 | */ | ||
64 | |||
65 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | ||
66 | { | ||
67 | dev->power.can_wakeup = capable; | ||
68 | } | ||
69 | |||
70 | static inline bool device_can_wakeup(struct device *dev) | ||
71 | { | ||
72 | return dev->power.can_wakeup; | ||
73 | } | ||
74 | |||
75 | |||
76 | |||
77 | static inline bool device_may_wakeup(struct device *dev) | ||
39 | { | 78 | { |
40 | dev->power.can_wakeup = dev->power.should_wakeup = val; | 79 | return dev->power.can_wakeup && !!dev->power.wakeup; |
41 | } | 80 | } |
42 | 81 | ||
82 | /* drivers/base/power/wakeup.c */ | ||
83 | extern struct wakeup_source *wakeup_source_create(const char *name); | ||
84 | extern void wakeup_source_destroy(struct wakeup_source *ws); | ||
85 | extern void wakeup_source_add(struct wakeup_source *ws); | ||
86 | extern void wakeup_source_remove(struct wakeup_source *ws); | ||
87 | extern struct wakeup_source *wakeup_source_register(const char *name); | ||
88 | extern void wakeup_source_unregister(struct wakeup_source *ws); | ||
89 | extern int device_wakeup_enable(struct device *dev); | ||
90 | extern int device_wakeup_disable(struct device *dev); | ||
91 | extern int device_init_wakeup(struct device *dev, bool val); | ||
92 | extern int device_set_wakeup_enable(struct device *dev, bool enable); | ||
93 | extern void __pm_stay_awake(struct wakeup_source *ws); | ||
94 | extern void pm_stay_awake(struct device *dev); | ||
95 | extern void __pm_relax(struct wakeup_source *ws); | ||
96 | extern void pm_relax(struct device *dev); | ||
97 | extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec); | ||
98 | extern void pm_wakeup_event(struct device *dev, unsigned int msec); | ||
99 | |||
100 | #else /* !CONFIG_PM_SLEEP */ | ||
101 | |||
43 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | 102 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) |
44 | { | 103 | { |
45 | dev->power.can_wakeup = capable; | 104 | dev->power.can_wakeup = capable; |
@@ -50,43 +109,63 @@ static inline bool device_can_wakeup(struct device *dev) | |||
50 | return dev->power.can_wakeup; | 109 | return dev->power.can_wakeup; |
51 | } | 110 | } |
52 | 111 | ||
53 | static inline void device_set_wakeup_enable(struct device *dev, bool enable) | 112 | static inline bool device_may_wakeup(struct device *dev) |
54 | { | 113 | { |
55 | dev->power.should_wakeup = enable; | 114 | return false; |
56 | } | 115 | } |
57 | 116 | ||
58 | static inline bool device_may_wakeup(struct device *dev) | 117 | static inline struct wakeup_source *wakeup_source_create(const char *name) |
59 | { | 118 | { |
60 | return dev->power.can_wakeup && dev->power.should_wakeup; | 119 | return NULL; |
61 | } | 120 | } |
62 | 121 | ||
63 | #else /* !CONFIG_PM */ | 122 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} |
123 | |||
124 | static inline void wakeup_source_add(struct wakeup_source *ws) {} | ||
64 | 125 | ||
65 | /* For some reason the following routines work even without CONFIG_PM */ | 126 | static inline void wakeup_source_remove(struct wakeup_source *ws) {} |
66 | static inline void device_init_wakeup(struct device *dev, bool val) | 127 | |
128 | static inline struct wakeup_source *wakeup_source_register(const char *name) | ||
67 | { | 129 | { |
68 | dev->power.can_wakeup = val; | 130 | return NULL; |
69 | } | 131 | } |
70 | 132 | ||
71 | static inline void device_set_wakeup_capable(struct device *dev, bool capable) | 133 | static inline void wakeup_source_unregister(struct wakeup_source *ws) {} |
134 | |||
135 | static inline int device_wakeup_enable(struct device *dev) | ||
72 | { | 136 | { |
73 | dev->power.can_wakeup = capable; | 137 | return -EINVAL; |
74 | } | 138 | } |
75 | 139 | ||
76 | static inline bool device_can_wakeup(struct device *dev) | 140 | static inline int device_wakeup_disable(struct device *dev) |
77 | { | 141 | { |
78 | return dev->power.can_wakeup; | 142 | return 0; |
79 | } | 143 | } |
80 | 144 | ||
81 | static inline void device_set_wakeup_enable(struct device *dev, bool enable) | 145 | static inline int device_init_wakeup(struct device *dev, bool val) |
82 | { | 146 | { |
147 | dev->power.can_wakeup = val; | ||
148 | return val ? -EINVAL : 0; | ||
83 | } | 149 | } |
84 | 150 | ||
85 | static inline bool device_may_wakeup(struct device *dev) | 151 | |
152 | static inline int device_set_wakeup_enable(struct device *dev, bool enable) | ||
86 | { | 153 | { |
87 | return false; | 154 | return -EINVAL; |
88 | } | 155 | } |
89 | 156 | ||
90 | #endif /* !CONFIG_PM */ | 157 | static inline void __pm_stay_awake(struct wakeup_source *ws) {} |
158 | |||
159 | static inline void pm_stay_awake(struct device *dev) {} | ||
160 | |||
161 | static inline void __pm_relax(struct wakeup_source *ws) {} | ||
162 | |||
163 | static inline void pm_relax(struct device *dev) {} | ||
164 | |||
165 | static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {} | ||
166 | |||
167 | static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {} | ||
168 | |||
169 | #endif /* !CONFIG_PM_SLEEP */ | ||
91 | 170 | ||
92 | #endif /* _LINUX_PM_WAKEUP_H */ | 171 | #endif /* _LINUX_PM_WAKEUP_H */ |
diff --git a/include/linux/pxa168_eth.h b/include/linux/pxa168_eth.h new file mode 100644 index 000000000000..18d75e795606 --- /dev/null +++ b/include/linux/pxa168_eth.h | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | *pxa168 ethernet platform device data definition file. | ||
3 | */ | ||
4 | #ifndef __LINUX_PXA168_ETH_H | ||
5 | #define __LINUX_PXA168_ETH_H | ||
6 | |||
7 | struct pxa168_eth_platform_data { | ||
8 | int port_number; | ||
9 | int phy_addr; | ||
10 | |||
11 | /* | ||
12 | * If speed is 0, then speed and duplex are autonegotiated. | ||
13 | */ | ||
14 | int speed; /* 0, SPEED_10, SPEED_100 */ | ||
15 | int duplex; /* DUPLEX_HALF or DUPLEX_FULL */ | ||
16 | |||
17 | /* | ||
18 | * Override default RX/TX queue sizes if nonzero. | ||
19 | */ | ||
20 | int rx_queue_size; | ||
21 | int tx_queue_size; | ||
22 | |||
23 | /* | ||
24 | * init callback is used for board specific initialization | ||
25 | * e.g on Aspenite its used to initialize the PHY transceiver. | ||
26 | */ | ||
27 | int (*init)(void); | ||
28 | }; | ||
29 | |||
30 | #endif /* __LINUX_PXA168_ETH_H */ | ||
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h index d50ba858cfe0..d1a9193960f1 100644 --- a/include/linux/quotaops.h +++ b/include/linux/quotaops.h | |||
@@ -274,8 +274,14 @@ static inline int dquot_alloc_space(struct inode *inode, qsize_t nr) | |||
274 | int ret; | 274 | int ret; |
275 | 275 | ||
276 | ret = dquot_alloc_space_nodirty(inode, nr); | 276 | ret = dquot_alloc_space_nodirty(inode, nr); |
277 | if (!ret) | 277 | if (!ret) { |
278 | mark_inode_dirty_sync(inode); | 278 | /* |
279 | * Mark inode fully dirty. Since we are allocating blocks, inode | ||
280 | * would become fully dirty soon anyway and it reportedly | ||
281 | * reduces inode_lock contention. | ||
282 | */ | ||
283 | mark_inode_dirty(inode); | ||
284 | } | ||
279 | return ret; | 285 | return ret; |
280 | } | 286 | } |
281 | 287 | ||
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 634b8e674ac5..a39cbed9ee17 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -47,6 +47,8 @@ static inline void *radix_tree_indirect_to_ptr(void *ptr) | |||
47 | { | 47 | { |
48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); | 48 | return (void *)((unsigned long)ptr & ~RADIX_TREE_INDIRECT_PTR); |
49 | } | 49 | } |
50 | #define radix_tree_indirect_to_ptr(ptr) \ | ||
51 | radix_tree_indirect_to_ptr((void __force *)(ptr)) | ||
50 | 52 | ||
51 | static inline int radix_tree_is_indirect_ptr(void *ptr) | 53 | static inline int radix_tree_is_indirect_ptr(void *ptr) |
52 | { | 54 | { |
@@ -61,7 +63,7 @@ static inline int radix_tree_is_indirect_ptr(void *ptr) | |||
61 | struct radix_tree_root { | 63 | struct radix_tree_root { |
62 | unsigned int height; | 64 | unsigned int height; |
63 | gfp_t gfp_mask; | 65 | gfp_t gfp_mask; |
64 | struct radix_tree_node *rnode; | 66 | struct radix_tree_node __rcu *rnode; |
65 | }; | 67 | }; |
66 | 68 | ||
67 | #define RADIX_TREE_INIT(mask) { \ | 69 | #define RADIX_TREE_INIT(mask) { \ |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 4ec3b38ce9c5..f31ef61f1c65 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
@@ -10,6 +10,21 @@ | |||
10 | #include <linux/rcupdate.h> | 10 | #include <linux/rcupdate.h> |
11 | 11 | ||
12 | /* | 12 | /* |
13 | * Why is there no list_empty_rcu()? Because list_empty() serves this | ||
14 | * purpose. The list_empty() function fetches the RCU-protected pointer | ||
15 | * and compares it to the address of the list head, but neither dereferences | ||
16 | * this pointer itself nor provides this pointer to the caller. Therefore, | ||
17 | * it is not necessary to use rcu_dereference(), so that list_empty() can | ||
18 | * be used anywhere you would want to use a list_empty_rcu(). | ||
19 | */ | ||
20 | |||
21 | /* | ||
22 | * return the ->next pointer of a list_head in an rcu safe | ||
23 | * way, we must not access it directly | ||
24 | */ | ||
25 | #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next))) | ||
26 | |||
27 | /* | ||
13 | * Insert a new entry between two known consecutive entries. | 28 | * Insert a new entry between two known consecutive entries. |
14 | * | 29 | * |
15 | * This is only for internal list manipulation where we know | 30 | * This is only for internal list manipulation where we know |
@@ -20,7 +35,7 @@ static inline void __list_add_rcu(struct list_head *new, | |||
20 | { | 35 | { |
21 | new->next = next; | 36 | new->next = next; |
22 | new->prev = prev; | 37 | new->prev = prev; |
23 | rcu_assign_pointer(prev->next, new); | 38 | rcu_assign_pointer(list_next_rcu(prev), new); |
24 | next->prev = new; | 39 | next->prev = new; |
25 | } | 40 | } |
26 | 41 | ||
@@ -138,7 +153,7 @@ static inline void list_replace_rcu(struct list_head *old, | |||
138 | { | 153 | { |
139 | new->next = old->next; | 154 | new->next = old->next; |
140 | new->prev = old->prev; | 155 | new->prev = old->prev; |
141 | rcu_assign_pointer(new->prev->next, new); | 156 | rcu_assign_pointer(list_next_rcu(new->prev), new); |
142 | new->next->prev = new; | 157 | new->next->prev = new; |
143 | old->prev = LIST_POISON2; | 158 | old->prev = LIST_POISON2; |
144 | } | 159 | } |
@@ -193,7 +208,7 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
193 | */ | 208 | */ |
194 | 209 | ||
195 | last->next = at; | 210 | last->next = at; |
196 | rcu_assign_pointer(head->next, first); | 211 | rcu_assign_pointer(list_next_rcu(head), first); |
197 | first->prev = head; | 212 | first->prev = head; |
198 | at->prev = last; | 213 | at->prev = last; |
199 | } | 214 | } |
@@ -208,7 +223,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
208 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). | 223 | * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). |
209 | */ | 224 | */ |
210 | #define list_entry_rcu(ptr, type, member) \ | 225 | #define list_entry_rcu(ptr, type, member) \ |
211 | container_of(rcu_dereference_raw(ptr), type, member) | 226 | ({typeof (*ptr) __rcu *__ptr = (typeof (*ptr) __rcu __force *)ptr; \ |
227 | container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member); \ | ||
228 | }) | ||
212 | 229 | ||
213 | /** | 230 | /** |
214 | * list_first_entry_rcu - get the first element from a list | 231 | * list_first_entry_rcu - get the first element from a list |
@@ -225,9 +242,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
225 | list_entry_rcu((ptr)->next, type, member) | 242 | list_entry_rcu((ptr)->next, type, member) |
226 | 243 | ||
227 | #define __list_for_each_rcu(pos, head) \ | 244 | #define __list_for_each_rcu(pos, head) \ |
228 | for (pos = rcu_dereference_raw((head)->next); \ | 245 | for (pos = rcu_dereference_raw(list_next_rcu(head)); \ |
229 | pos != (head); \ | 246 | pos != (head); \ |
230 | pos = rcu_dereference_raw(pos->next)) | 247 | pos = rcu_dereference_raw(list_next_rcu((pos))) |
231 | 248 | ||
232 | /** | 249 | /** |
233 | * list_for_each_entry_rcu - iterate over rcu list of given type | 250 | * list_for_each_entry_rcu - iterate over rcu list of given type |
@@ -257,9 +274,9 @@ static inline void list_splice_init_rcu(struct list_head *list, | |||
257 | * as long as the traversal is guarded by rcu_read_lock(). | 274 | * as long as the traversal is guarded by rcu_read_lock(). |
258 | */ | 275 | */ |
259 | #define list_for_each_continue_rcu(pos, head) \ | 276 | #define list_for_each_continue_rcu(pos, head) \ |
260 | for ((pos) = rcu_dereference_raw((pos)->next); \ | 277 | for ((pos) = rcu_dereference_raw(list_next_rcu(pos)); \ |
261 | prefetch((pos)->next), (pos) != (head); \ | 278 | prefetch((pos)->next), (pos) != (head); \ |
262 | (pos) = rcu_dereference_raw((pos)->next)) | 279 | (pos) = rcu_dereference_raw(list_next_rcu(pos))) |
263 | 280 | ||
264 | /** | 281 | /** |
265 | * list_for_each_entry_continue_rcu - continue iteration over list of given type | 282 | * list_for_each_entry_continue_rcu - continue iteration over list of given type |
@@ -314,12 +331,19 @@ static inline void hlist_replace_rcu(struct hlist_node *old, | |||
314 | 331 | ||
315 | new->next = next; | 332 | new->next = next; |
316 | new->pprev = old->pprev; | 333 | new->pprev = old->pprev; |
317 | rcu_assign_pointer(*new->pprev, new); | 334 | rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new); |
318 | if (next) | 335 | if (next) |
319 | new->next->pprev = &new->next; | 336 | new->next->pprev = &new->next; |
320 | old->pprev = LIST_POISON2; | 337 | old->pprev = LIST_POISON2; |
321 | } | 338 | } |
322 | 339 | ||
340 | /* | ||
341 | * return the first or the next element in an RCU protected hlist | ||
342 | */ | ||
343 | #define hlist_first_rcu(head) (*((struct hlist_node __rcu **)(&(head)->first))) | ||
344 | #define hlist_next_rcu(node) (*((struct hlist_node __rcu **)(&(node)->next))) | ||
345 | #define hlist_pprev_rcu(node) (*((struct hlist_node __rcu **)((node)->pprev))) | ||
346 | |||
323 | /** | 347 | /** |
324 | * hlist_add_head_rcu | 348 | * hlist_add_head_rcu |
325 | * @n: the element to add to the hash list. | 349 | * @n: the element to add to the hash list. |
@@ -346,7 +370,7 @@ static inline void hlist_add_head_rcu(struct hlist_node *n, | |||
346 | 370 | ||
347 | n->next = first; | 371 | n->next = first; |
348 | n->pprev = &h->first; | 372 | n->pprev = &h->first; |
349 | rcu_assign_pointer(h->first, n); | 373 | rcu_assign_pointer(hlist_first_rcu(h), n); |
350 | if (first) | 374 | if (first) |
351 | first->pprev = &n->next; | 375 | first->pprev = &n->next; |
352 | } | 376 | } |
@@ -374,7 +398,7 @@ static inline void hlist_add_before_rcu(struct hlist_node *n, | |||
374 | { | 398 | { |
375 | n->pprev = next->pprev; | 399 | n->pprev = next->pprev; |
376 | n->next = next; | 400 | n->next = next; |
377 | rcu_assign_pointer(*(n->pprev), n); | 401 | rcu_assign_pointer(hlist_pprev_rcu(n), n); |
378 | next->pprev = &n->next; | 402 | next->pprev = &n->next; |
379 | } | 403 | } |
380 | 404 | ||
@@ -401,15 +425,15 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
401 | { | 425 | { |
402 | n->next = prev->next; | 426 | n->next = prev->next; |
403 | n->pprev = &prev->next; | 427 | n->pprev = &prev->next; |
404 | rcu_assign_pointer(prev->next, n); | 428 | rcu_assign_pointer(hlist_next_rcu(prev), n); |
405 | if (n->next) | 429 | if (n->next) |
406 | n->next->pprev = &n->next; | 430 | n->next->pprev = &n->next; |
407 | } | 431 | } |
408 | 432 | ||
409 | #define __hlist_for_each_rcu(pos, head) \ | 433 | #define __hlist_for_each_rcu(pos, head) \ |
410 | for (pos = rcu_dereference((head)->first); \ | 434 | for (pos = rcu_dereference(hlist_first_rcu(head)); \ |
411 | pos && ({ prefetch(pos->next); 1; }); \ | 435 | pos && ({ prefetch(pos->next); 1; }); \ |
412 | pos = rcu_dereference(pos->next)) | 436 | pos = rcu_dereference(hlist_next_rcu(pos))) |
413 | 437 | ||
414 | /** | 438 | /** |
415 | * hlist_for_each_entry_rcu - iterate over rcu list of given type | 439 | * hlist_for_each_entry_rcu - iterate over rcu list of given type |
@@ -422,11 +446,11 @@ static inline void hlist_add_after_rcu(struct hlist_node *prev, | |||
422 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() | 446 | * the _rcu list-mutation primitives such as hlist_add_head_rcu() |
423 | * as long as the traversal is guarded by rcu_read_lock(). | 447 | * as long as the traversal is guarded by rcu_read_lock(). |
424 | */ | 448 | */ |
425 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ | 449 | #define hlist_for_each_entry_rcu(tpos, pos, head, member) \ |
426 | for (pos = rcu_dereference_raw((head)->first); \ | 450 | for (pos = rcu_dereference_raw(hlist_first_rcu(head)); \ |
427 | pos && ({ prefetch(pos->next); 1; }) && \ | 451 | pos && ({ prefetch(pos->next); 1; }) && \ |
428 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ | 452 | ({ tpos = hlist_entry(pos, typeof(*tpos), member); 1; }); \ |
429 | pos = rcu_dereference_raw(pos->next)) | 453 | pos = rcu_dereference_raw(hlist_next_rcu(pos))) |
430 | 454 | ||
431 | /** | 455 | /** |
432 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type | 456 | * hlist_for_each_entry_rcu_bh - iterate over rcu list of given type |
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index b70ffe53cb9f..2ae13714828b 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h | |||
@@ -37,6 +37,12 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n) | |||
37 | } | 37 | } |
38 | } | 38 | } |
39 | 39 | ||
40 | #define hlist_nulls_first_rcu(head) \ | ||
41 | (*((struct hlist_nulls_node __rcu __force **)&(head)->first)) | ||
42 | |||
43 | #define hlist_nulls_next_rcu(node) \ | ||
44 | (*((struct hlist_nulls_node __rcu __force **)&(node)->next)) | ||
45 | |||
40 | /** | 46 | /** |
41 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization | 47 | * hlist_nulls_del_rcu - deletes entry from hash list without re-initialization |
42 | * @n: the element to delete from the hash list. | 48 | * @n: the element to delete from the hash list. |
@@ -88,7 +94,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
88 | 94 | ||
89 | n->next = first; | 95 | n->next = first; |
90 | n->pprev = &h->first; | 96 | n->pprev = &h->first; |
91 | rcu_assign_pointer(h->first, n); | 97 | rcu_assign_pointer(hlist_nulls_first_rcu(h), n); |
92 | if (!is_a_nulls(first)) | 98 | if (!is_a_nulls(first)) |
93 | first->pprev = &n->next; | 99 | first->pprev = &n->next; |
94 | } | 100 | } |
@@ -100,11 +106,11 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, | |||
100 | * @member: the name of the hlist_nulls_node within the struct. | 106 | * @member: the name of the hlist_nulls_node within the struct. |
101 | * | 107 | * |
102 | */ | 108 | */ |
103 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ | 109 | #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ |
104 | for (pos = rcu_dereference_raw((head)->first); \ | 110 | for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ |
105 | (!is_a_nulls(pos)) && \ | 111 | (!is_a_nulls(pos)) && \ |
106 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ | 112 | ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ |
107 | pos = rcu_dereference_raw(pos->next)) | 113 | pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) |
108 | 114 | ||
109 | #endif | 115 | #endif |
110 | #endif | 116 | #endif |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 9fbc54a2585d..03cda7bed985 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -41,11 +41,15 @@ | |||
41 | #include <linux/lockdep.h> | 41 | #include <linux/lockdep.h> |
42 | #include <linux/completion.h> | 42 | #include <linux/completion.h> |
43 | #include <linux/debugobjects.h> | 43 | #include <linux/debugobjects.h> |
44 | #include <linux/compiler.h> | ||
44 | 45 | ||
45 | #ifdef CONFIG_RCU_TORTURE_TEST | 46 | #ifdef CONFIG_RCU_TORTURE_TEST |
46 | extern int rcutorture_runnable; /* for sysctl */ | 47 | extern int rcutorture_runnable; /* for sysctl */ |
47 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 48 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
48 | 49 | ||
50 | #define ULONG_CMP_GE(a, b) (ULONG_MAX / 2 >= (a) - (b)) | ||
51 | #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) | ||
52 | |||
49 | /** | 53 | /** |
50 | * struct rcu_head - callback structure for use with RCU | 54 | * struct rcu_head - callback structure for use with RCU |
51 | * @next: next update requests in a list | 55 | * @next: next update requests in a list |
@@ -57,29 +61,94 @@ struct rcu_head { | |||
57 | }; | 61 | }; |
58 | 62 | ||
59 | /* Exported common interfaces */ | 63 | /* Exported common interfaces */ |
60 | extern void rcu_barrier(void); | 64 | extern void call_rcu_sched(struct rcu_head *head, |
65 | void (*func)(struct rcu_head *rcu)); | ||
66 | extern void synchronize_sched(void); | ||
61 | extern void rcu_barrier_bh(void); | 67 | extern void rcu_barrier_bh(void); |
62 | extern void rcu_barrier_sched(void); | 68 | extern void rcu_barrier_sched(void); |
63 | extern void synchronize_sched_expedited(void); | 69 | extern void synchronize_sched_expedited(void); |
64 | extern int sched_expedited_torture_stats(char *page); | 70 | extern int sched_expedited_torture_stats(char *page); |
65 | 71 | ||
72 | static inline void __rcu_read_lock_bh(void) | ||
73 | { | ||
74 | local_bh_disable(); | ||
75 | } | ||
76 | |||
77 | static inline void __rcu_read_unlock_bh(void) | ||
78 | { | ||
79 | local_bh_enable(); | ||
80 | } | ||
81 | |||
82 | #ifdef CONFIG_PREEMPT_RCU | ||
83 | |||
84 | extern void __rcu_read_lock(void); | ||
85 | extern void __rcu_read_unlock(void); | ||
86 | void synchronize_rcu(void); | ||
87 | |||
88 | /* | ||
89 | * Defined as a macro as it is a very low level header included from | ||
90 | * areas that don't even know about current. This gives the rcu_read_lock() | ||
91 | * nesting depth, but makes sense only if CONFIG_PREEMPT_RCU -- in other | ||
92 | * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. | ||
93 | */ | ||
94 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
95 | |||
96 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
97 | |||
98 | static inline void __rcu_read_lock(void) | ||
99 | { | ||
100 | preempt_disable(); | ||
101 | } | ||
102 | |||
103 | static inline void __rcu_read_unlock(void) | ||
104 | { | ||
105 | preempt_enable(); | ||
106 | } | ||
107 | |||
108 | static inline void synchronize_rcu(void) | ||
109 | { | ||
110 | synchronize_sched(); | ||
111 | } | ||
112 | |||
113 | static inline int rcu_preempt_depth(void) | ||
114 | { | ||
115 | return 0; | ||
116 | } | ||
117 | |||
118 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
119 | |||
66 | /* Internal to kernel */ | 120 | /* Internal to kernel */ |
67 | extern void rcu_init(void); | 121 | extern void rcu_init(void); |
122 | extern void rcu_sched_qs(int cpu); | ||
123 | extern void rcu_bh_qs(int cpu); | ||
124 | extern void rcu_check_callbacks(int cpu, int user); | ||
125 | struct notifier_block; | ||
126 | |||
127 | #ifdef CONFIG_NO_HZ | ||
128 | |||
129 | extern void rcu_enter_nohz(void); | ||
130 | extern void rcu_exit_nohz(void); | ||
131 | |||
132 | #else /* #ifdef CONFIG_NO_HZ */ | ||
133 | |||
134 | static inline void rcu_enter_nohz(void) | ||
135 | { | ||
136 | } | ||
137 | |||
138 | static inline void rcu_exit_nohz(void) | ||
139 | { | ||
140 | } | ||
141 | |||
142 | #endif /* #else #ifdef CONFIG_NO_HZ */ | ||
68 | 143 | ||
69 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 144 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
70 | #include <linux/rcutree.h> | 145 | #include <linux/rcutree.h> |
71 | #elif defined(CONFIG_TINY_RCU) | 146 | #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) |
72 | #include <linux/rcutiny.h> | 147 | #include <linux/rcutiny.h> |
73 | #else | 148 | #else |
74 | #error "Unknown RCU implementation specified to kernel configuration" | 149 | #error "Unknown RCU implementation specified to kernel configuration" |
75 | #endif | 150 | #endif |
76 | 151 | ||
77 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | ||
78 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | ||
79 | #define INIT_RCU_HEAD(ptr) do { \ | ||
80 | (ptr)->next = NULL; (ptr)->func = NULL; \ | ||
81 | } while (0) | ||
82 | |||
83 | /* | 152 | /* |
84 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic | 153 | * init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic |
85 | * initialization and destruction of rcu_head on the stack. rcu_head structures | 154 | * initialization and destruction of rcu_head on the stack. rcu_head structures |
@@ -120,14 +189,15 @@ extern struct lockdep_map rcu_sched_lock_map; | |||
120 | extern int debug_lockdep_rcu_enabled(void); | 189 | extern int debug_lockdep_rcu_enabled(void); |
121 | 190 | ||
122 | /** | 191 | /** |
123 | * rcu_read_lock_held - might we be in RCU read-side critical section? | 192 | * rcu_read_lock_held() - might we be in RCU read-side critical section? |
124 | * | 193 | * |
125 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU | 194 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an RCU |
126 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, | 195 | * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, |
127 | * this assumes we are in an RCU read-side critical section unless it can | 196 | * this assumes we are in an RCU read-side critical section unless it can |
128 | * prove otherwise. | 197 | * prove otherwise. This is useful for debug checks in functions that |
198 | * require that they be called within an RCU read-side critical section. | ||
129 | * | 199 | * |
130 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 200 | * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot |
131 | * and while lockdep is disabled. | 201 | * and while lockdep is disabled. |
132 | */ | 202 | */ |
133 | static inline int rcu_read_lock_held(void) | 203 | static inline int rcu_read_lock_held(void) |
@@ -144,14 +214,16 @@ static inline int rcu_read_lock_held(void) | |||
144 | extern int rcu_read_lock_bh_held(void); | 214 | extern int rcu_read_lock_bh_held(void); |
145 | 215 | ||
146 | /** | 216 | /** |
147 | * rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section? | 217 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
148 | * | 218 | * |
149 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an | 219 | * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an |
150 | * RCU-sched read-side critical section. In absence of | 220 | * RCU-sched read-side critical section. In absence of |
151 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side | 221 | * CONFIG_DEBUG_LOCK_ALLOC, this assumes we are in an RCU-sched read-side |
152 | * critical section unless it can prove otherwise. Note that disabling | 222 | * critical section unless it can prove otherwise. Note that disabling |
153 | * of preemption (including disabling irqs) counts as an RCU-sched | 223 | * of preemption (including disabling irqs) counts as an RCU-sched |
154 | * read-side critical section. | 224 | * read-side critical section. This is useful for debug checks in functions |
225 | * that required that they be called within an RCU-sched read-side | ||
226 | * critical section. | ||
155 | * | 227 | * |
156 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot | 228 | * Check debug_lockdep_rcu_enabled() to prevent false positives during boot |
157 | * and while lockdep is disabled. | 229 | * and while lockdep is disabled. |
@@ -211,7 +283,11 @@ static inline int rcu_read_lock_sched_held(void) | |||
211 | 283 | ||
212 | extern int rcu_my_thread_group_empty(void); | 284 | extern int rcu_my_thread_group_empty(void); |
213 | 285 | ||
214 | #define __do_rcu_dereference_check(c) \ | 286 | /** |
287 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | ||
288 | * @c: condition to check | ||
289 | */ | ||
290 | #define rcu_lockdep_assert(c) \ | ||
215 | do { \ | 291 | do { \ |
216 | static bool __warned; \ | 292 | static bool __warned; \ |
217 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ | 293 | if (debug_lockdep_rcu_enabled() && !__warned && !(c)) { \ |
@@ -220,41 +296,163 @@ extern int rcu_my_thread_group_empty(void); | |||
220 | } \ | 296 | } \ |
221 | } while (0) | 297 | } while (0) |
222 | 298 | ||
299 | #else /* #ifdef CONFIG_PROVE_RCU */ | ||
300 | |||
301 | #define rcu_lockdep_assert(c) do { } while (0) | ||
302 | |||
303 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
304 | |||
305 | /* | ||
306 | * Helper functions for rcu_dereference_check(), rcu_dereference_protected() | ||
307 | * and rcu_assign_pointer(). Some of these could be folded into their | ||
308 | * callers, but they are left separate in order to ease introduction of | ||
309 | * multiple flavors of pointers to match the multiple flavors of RCU | ||
310 | * (e.g., __rcu_bh, * __rcu_sched, and __srcu), should this make sense in | ||
311 | * the future. | ||
312 | */ | ||
313 | |||
314 | #ifdef __CHECKER__ | ||
315 | #define rcu_dereference_sparse(p, space) \ | ||
316 | ((void)(((typeof(*p) space *)p) == p)) | ||
317 | #else /* #ifdef __CHECKER__ */ | ||
318 | #define rcu_dereference_sparse(p, space) | ||
319 | #endif /* #else #ifdef __CHECKER__ */ | ||
320 | |||
321 | #define __rcu_access_pointer(p, space) \ | ||
322 | ({ \ | ||
323 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
324 | rcu_dereference_sparse(p, space); \ | ||
325 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
326 | }) | ||
327 | #define __rcu_dereference_check(p, c, space) \ | ||
328 | ({ \ | ||
329 | typeof(*p) *_________p1 = (typeof(*p)*__force )ACCESS_ONCE(p); \ | ||
330 | rcu_lockdep_assert(c); \ | ||
331 | rcu_dereference_sparse(p, space); \ | ||
332 | smp_read_barrier_depends(); \ | ||
333 | ((typeof(*p) __force __kernel *)(_________p1)); \ | ||
334 | }) | ||
335 | #define __rcu_dereference_protected(p, c, space) \ | ||
336 | ({ \ | ||
337 | rcu_lockdep_assert(c); \ | ||
338 | rcu_dereference_sparse(p, space); \ | ||
339 | ((typeof(*p) __force __kernel *)(p)); \ | ||
340 | }) | ||
341 | |||
342 | #define __rcu_dereference_index_check(p, c) \ | ||
343 | ({ \ | ||
344 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
345 | rcu_lockdep_assert(c); \ | ||
346 | smp_read_barrier_depends(); \ | ||
347 | (_________p1); \ | ||
348 | }) | ||
349 | #define __rcu_assign_pointer(p, v, space) \ | ||
350 | ({ \ | ||
351 | if (!__builtin_constant_p(v) || \ | ||
352 | ((v) != NULL)) \ | ||
353 | smp_wmb(); \ | ||
354 | (p) = (typeof(*v) __force space *)(v); \ | ||
355 | }) | ||
356 | |||
357 | |||
358 | /** | ||
359 | * rcu_access_pointer() - fetch RCU pointer with no dereferencing | ||
360 | * @p: The pointer to read | ||
361 | * | ||
362 | * Return the value of the specified RCU-protected pointer, but omit the | ||
363 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | ||
364 | * when the value of this pointer is accessed, but the pointer is not | ||
365 | * dereferenced, for example, when testing an RCU-protected pointer against | ||
366 | * NULL. Although rcu_access_pointer() may also be used in cases where | ||
367 | * update-side locks prevent the value of the pointer from changing, you | ||
368 | * should instead use rcu_dereference_protected() for this use case. | ||
369 | */ | ||
370 | #define rcu_access_pointer(p) __rcu_access_pointer((p), __rcu) | ||
371 | |||
223 | /** | 372 | /** |
224 | * rcu_dereference_check - rcu_dereference with debug checking | 373 | * rcu_dereference_check() - rcu_dereference with debug checking |
225 | * @p: The pointer to read, prior to dereferencing | 374 | * @p: The pointer to read, prior to dereferencing |
226 | * @c: The conditions under which the dereference will take place | 375 | * @c: The conditions under which the dereference will take place |
227 | * | 376 | * |
228 | * Do an rcu_dereference(), but check that the conditions under which the | 377 | * Do an rcu_dereference(), but check that the conditions under which the |
229 | * dereference will take place are correct. Typically the conditions indicate | 378 | * dereference will take place are correct. Typically the conditions |
230 | * the various locking conditions that should be held at that point. The check | 379 | * indicate the various locking conditions that should be held at that |
231 | * should return true if the conditions are satisfied. | 380 | * point. The check should return true if the conditions are satisfied. |
381 | * An implicit check for being in an RCU read-side critical section | ||
382 | * (rcu_read_lock()) is included. | ||
232 | * | 383 | * |
233 | * For example: | 384 | * For example: |
234 | * | 385 | * |
235 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 386 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock)); |
236 | * lockdep_is_held(&foo->lock)); | ||
237 | * | 387 | * |
238 | * could be used to indicate to lockdep that foo->bar may only be dereferenced | 388 | * could be used to indicate to lockdep that foo->bar may only be dereferenced |
239 | * if either the RCU read lock is held, or that the lock required to replace | 389 | * if either rcu_read_lock() is held, or that the lock required to replace |
240 | * the bar struct at foo->bar is held. | 390 | * the bar struct at foo->bar is held. |
241 | * | 391 | * |
242 | * Note that the list of conditions may also include indications of when a lock | 392 | * Note that the list of conditions may also include indications of when a lock |
243 | * need not be held, for example during initialisation or destruction of the | 393 | * need not be held, for example during initialisation or destruction of the |
244 | * target struct: | 394 | * target struct: |
245 | * | 395 | * |
246 | * bar = rcu_dereference_check(foo->bar, rcu_read_lock_held() || | 396 | * bar = rcu_dereference_check(foo->bar, lockdep_is_held(&foo->lock) || |
247 | * lockdep_is_held(&foo->lock) || | ||
248 | * atomic_read(&foo->usage) == 0); | 397 | * atomic_read(&foo->usage) == 0); |
398 | * | ||
399 | * Inserts memory barriers on architectures that require them | ||
400 | * (currently only the Alpha), prevents the compiler from refetching | ||
401 | * (and from merging fetches), and, more importantly, documents exactly | ||
402 | * which pointers are protected by RCU and checks that the pointer is | ||
403 | * annotated as __rcu. | ||
249 | */ | 404 | */ |
250 | #define rcu_dereference_check(p, c) \ | 405 | #define rcu_dereference_check(p, c) \ |
251 | ({ \ | 406 | __rcu_dereference_check((p), rcu_read_lock_held() || (c), __rcu) |
252 | __do_rcu_dereference_check(c); \ | 407 | |
253 | rcu_dereference_raw(p); \ | 408 | /** |
254 | }) | 409 | * rcu_dereference_bh_check() - rcu_dereference_bh with debug checking |
410 | * @p: The pointer to read, prior to dereferencing | ||
411 | * @c: The conditions under which the dereference will take place | ||
412 | * | ||
413 | * This is the RCU-bh counterpart to rcu_dereference_check(). | ||
414 | */ | ||
415 | #define rcu_dereference_bh_check(p, c) \ | ||
416 | __rcu_dereference_check((p), rcu_read_lock_bh_held() || (c), __rcu) | ||
255 | 417 | ||
256 | /** | 418 | /** |
257 | * rcu_dereference_protected - fetch RCU pointer when updates prevented | 419 | * rcu_dereference_sched_check() - rcu_dereference_sched with debug checking |
420 | * @p: The pointer to read, prior to dereferencing | ||
421 | * @c: The conditions under which the dereference will take place | ||
422 | * | ||
423 | * This is the RCU-sched counterpart to rcu_dereference_check(). | ||
424 | */ | ||
425 | #define rcu_dereference_sched_check(p, c) \ | ||
426 | __rcu_dereference_check((p), rcu_read_lock_sched_held() || (c), \ | ||
427 | __rcu) | ||
428 | |||
429 | #define rcu_dereference_raw(p) rcu_dereference_check(p, 1) /*@@@ needed? @@@*/ | ||
430 | |||
431 | /** | ||
432 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
433 | * @p: The pointer to read, prior to dereferencing | ||
434 | * @c: The conditions under which the dereference will take place | ||
435 | * | ||
436 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
437 | * This allows rcu_dereference_index_check() to be used on integers, | ||
438 | * which can then be used as array indices. Attempting to use | ||
439 | * rcu_dereference_check() on an integer will give compiler warnings | ||
440 | * because the sparse address-space mechanism relies on dereferencing | ||
441 | * the RCU-protected pointer. Dereferencing integers is not something | ||
442 | * that even gcc will put up with. | ||
443 | * | ||
444 | * Note that this function does not implicitly check for RCU read-side | ||
445 | * critical sections. If this function gains lots of uses, it might | ||
446 | * make sense to provide versions for each flavor of RCU, but it does | ||
447 | * not make sense as of early 2010. | ||
448 | */ | ||
449 | #define rcu_dereference_index_check(p, c) \ | ||
450 | __rcu_dereference_index_check((p), (c)) | ||
451 | |||
452 | /** | ||
453 | * rcu_dereference_protected() - fetch RCU pointer when updates prevented | ||
454 | * @p: The pointer to read, prior to dereferencing | ||
455 | * @c: The conditions under which the dereference will take place | ||
258 | * | 456 | * |
259 | * Return the value of the specified RCU-protected pointer, but omit | 457 | * Return the value of the specified RCU-protected pointer, but omit |
260 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This | 458 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(). This |
@@ -263,35 +461,61 @@ extern int rcu_my_thread_group_empty(void); | |||
263 | * prevent the compiler from repeating this reference or combining it | 461 | * prevent the compiler from repeating this reference or combining it |
264 | * with other references, so it should not be used without protection | 462 | * with other references, so it should not be used without protection |
265 | * of appropriate locks. | 463 | * of appropriate locks. |
464 | * | ||
465 | * This function is only for update-side use. Using this function | ||
466 | * when protected only by rcu_read_lock() will result in infrequent | ||
467 | * but very ugly failures. | ||
266 | */ | 468 | */ |
267 | #define rcu_dereference_protected(p, c) \ | 469 | #define rcu_dereference_protected(p, c) \ |
268 | ({ \ | 470 | __rcu_dereference_protected((p), (c), __rcu) |
269 | __do_rcu_dereference_check(c); \ | ||
270 | (p); \ | ||
271 | }) | ||
272 | 471 | ||
273 | #else /* #ifdef CONFIG_PROVE_RCU */ | 472 | /** |
473 | * rcu_dereference_bh_protected() - fetch RCU-bh pointer when updates prevented | ||
474 | * @p: The pointer to read, prior to dereferencing | ||
475 | * @c: The conditions under which the dereference will take place | ||
476 | * | ||
477 | * This is the RCU-bh counterpart to rcu_dereference_protected(). | ||
478 | */ | ||
479 | #define rcu_dereference_bh_protected(p, c) \ | ||
480 | __rcu_dereference_protected((p), (c), __rcu) | ||
274 | 481 | ||
275 | #define rcu_dereference_check(p, c) rcu_dereference_raw(p) | 482 | /** |
276 | #define rcu_dereference_protected(p, c) (p) | 483 | * rcu_dereference_sched_protected() - fetch RCU-sched pointer when updates prevented |
484 | * @p: The pointer to read, prior to dereferencing | ||
485 | * @c: The conditions under which the dereference will take place | ||
486 | * | ||
487 | * This is the RCU-sched counterpart to rcu_dereference_protected(). | ||
488 | */ | ||
489 | #define rcu_dereference_sched_protected(p, c) \ | ||
490 | __rcu_dereference_protected((p), (c), __rcu) | ||
277 | 491 | ||
278 | #endif /* #else #ifdef CONFIG_PROVE_RCU */ | ||
279 | 492 | ||
280 | /** | 493 | /** |
281 | * rcu_access_pointer - fetch RCU pointer with no dereferencing | 494 | * rcu_dereference() - fetch RCU-protected pointer for dereferencing |
495 | * @p: The pointer to read, prior to dereferencing | ||
282 | * | 496 | * |
283 | * Return the value of the specified RCU-protected pointer, but omit the | 497 | * This is a simple wrapper around rcu_dereference_check(). |
284 | * smp_read_barrier_depends() and keep the ACCESS_ONCE(). This is useful | 498 | */ |
285 | * when the value of this pointer is accessed, but the pointer is not | 499 | #define rcu_dereference(p) rcu_dereference_check(p, 0) |
286 | * dereferenced, for example, when testing an RCU-protected pointer against | 500 | |
287 | * NULL. This may also be used in cases where update-side locks prevent | 501 | /** |
288 | * the value of the pointer from changing, but rcu_dereference_protected() | 502 | * rcu_dereference_bh() - fetch an RCU-bh-protected pointer for dereferencing |
289 | * is a lighter-weight primitive for this use case. | 503 | * @p: The pointer to read, prior to dereferencing |
504 | * | ||
505 | * Makes rcu_dereference_check() do the dirty work. | ||
506 | */ | ||
507 | #define rcu_dereference_bh(p) rcu_dereference_bh_check(p, 0) | ||
508 | |||
509 | /** | ||
510 | * rcu_dereference_sched() - fetch RCU-sched-protected pointer for dereferencing | ||
511 | * @p: The pointer to read, prior to dereferencing | ||
512 | * | ||
513 | * Makes rcu_dereference_check() do the dirty work. | ||
290 | */ | 514 | */ |
291 | #define rcu_access_pointer(p) ACCESS_ONCE(p) | 515 | #define rcu_dereference_sched(p) rcu_dereference_sched_check(p, 0) |
292 | 516 | ||
293 | /** | 517 | /** |
294 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 518 | * rcu_read_lock() - mark the beginning of an RCU read-side critical section |
295 | * | 519 | * |
296 | * When synchronize_rcu() is invoked on one CPU while other CPUs | 520 | * When synchronize_rcu() is invoked on one CPU while other CPUs |
297 | * are within RCU read-side critical sections, then the | 521 | * are within RCU read-side critical sections, then the |
@@ -302,7 +526,7 @@ extern int rcu_my_thread_group_empty(void); | |||
302 | * until after the all the other CPUs exit their critical sections. | 526 | * until after the all the other CPUs exit their critical sections. |
303 | * | 527 | * |
304 | * Note, however, that RCU callbacks are permitted to run concurrently | 528 | * Note, however, that RCU callbacks are permitted to run concurrently |
305 | * with RCU read-side critical sections. One way that this can happen | 529 | * with new RCU read-side critical sections. One way that this can happen |
306 | * is via the following sequence of events: (1) CPU 0 enters an RCU | 530 | * is via the following sequence of events: (1) CPU 0 enters an RCU |
307 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register | 531 | * read-side critical section, (2) CPU 1 invokes call_rcu() to register |
308 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, | 532 | * an RCU callback, (3) CPU 0 exits the RCU read-side critical section, |
@@ -317,7 +541,20 @@ extern int rcu_my_thread_group_empty(void); | |||
317 | * will be deferred until the outermost RCU read-side critical section | 541 | * will be deferred until the outermost RCU read-side critical section |
318 | * completes. | 542 | * completes. |
319 | * | 543 | * |
320 | * It is illegal to block while in an RCU read-side critical section. | 544 | * You can avoid reading and understanding the next paragraph by |
545 | * following this rule: don't put anything in an rcu_read_lock() RCU | ||
546 | * read-side critical section that would block in a !PREEMPT kernel. | ||
547 | * But if you want the full story, read on! | ||
548 | * | ||
549 | * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU), it | ||
550 | * is illegal to block while in an RCU read-side critical section. In | ||
551 | * preemptible RCU implementations (TREE_PREEMPT_RCU and TINY_PREEMPT_RCU) | ||
552 | * in CONFIG_PREEMPT kernel builds, RCU read-side critical sections may | ||
553 | * be preempted, but explicit blocking is illegal. Finally, in preemptible | ||
554 | * RCU implementations in real-time (CONFIG_PREEMPT_RT) kernel builds, | ||
555 | * RCU read-side critical sections may be preempted and they may also | ||
556 | * block, but only when acquiring spinlocks that are subject to priority | ||
557 | * inheritance. | ||
321 | */ | 558 | */ |
322 | static inline void rcu_read_lock(void) | 559 | static inline void rcu_read_lock(void) |
323 | { | 560 | { |
@@ -337,7 +574,7 @@ static inline void rcu_read_lock(void) | |||
337 | */ | 574 | */ |
338 | 575 | ||
339 | /** | 576 | /** |
340 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 577 | * rcu_read_unlock() - marks the end of an RCU read-side critical section. |
341 | * | 578 | * |
342 | * See rcu_read_lock() for more information. | 579 | * See rcu_read_lock() for more information. |
343 | */ | 580 | */ |
@@ -349,15 +586,16 @@ static inline void rcu_read_unlock(void) | |||
349 | } | 586 | } |
350 | 587 | ||
351 | /** | 588 | /** |
352 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 589 | * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section |
353 | * | 590 | * |
354 | * This is equivalent of rcu_read_lock(), but to be used when updates | 591 | * This is equivalent of rcu_read_lock(), but to be used when updates |
355 | * are being done using call_rcu_bh(). Since call_rcu_bh() callbacks | 592 | * are being done using call_rcu_bh() or synchronize_rcu_bh(). Since |
356 | * consider completion of a softirq handler to be a quiescent state, | 593 | * both call_rcu_bh() and synchronize_rcu_bh() consider completion of a |
357 | * a process in RCU read-side critical section must be protected by | 594 | * softirq handler to be a quiescent state, a process in RCU read-side |
358 | * disabling softirqs. Read-side critical sections in interrupt context | 595 | * critical section must be protected by disabling softirqs. Read-side |
359 | * can use just rcu_read_lock(). | 596 | * critical sections in interrupt context can use just rcu_read_lock(), |
360 | * | 597 | * though this should at least be commented to avoid confusing people |
598 | * reading the code. | ||
361 | */ | 599 | */ |
362 | static inline void rcu_read_lock_bh(void) | 600 | static inline void rcu_read_lock_bh(void) |
363 | { | 601 | { |
@@ -379,13 +617,12 @@ static inline void rcu_read_unlock_bh(void) | |||
379 | } | 617 | } |
380 | 618 | ||
381 | /** | 619 | /** |
382 | * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section | 620 | * rcu_read_lock_sched() - mark the beginning of a RCU-sched critical section |
383 | * | 621 | * |
384 | * Should be used with either | 622 | * This is equivalent of rcu_read_lock(), but to be used when updates |
385 | * - synchronize_sched() | 623 | * are being done using call_rcu_sched() or synchronize_rcu_sched(). |
386 | * or | 624 | * Read-side critical sections can also be introduced by anything that |
387 | * - call_rcu_sched() and rcu_barrier_sched() | 625 | * disables preemption, including local_irq_disable() and friends. |
388 | * on the write-side to insure proper synchronization. | ||
389 | */ | 626 | */ |
390 | static inline void rcu_read_lock_sched(void) | 627 | static inline void rcu_read_lock_sched(void) |
391 | { | 628 | { |
@@ -420,54 +657,14 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
420 | preempt_enable_notrace(); | 657 | preempt_enable_notrace(); |
421 | } | 658 | } |
422 | 659 | ||
423 | |||
424 | /** | 660 | /** |
425 | * rcu_dereference_raw - fetch an RCU-protected pointer | 661 | * rcu_assign_pointer() - assign to RCU-protected pointer |
662 | * @p: pointer to assign to | ||
663 | * @v: value to assign (publish) | ||
426 | * | 664 | * |
427 | * The caller must be within some flavor of RCU read-side critical | 665 | * Assigns the specified value to the specified RCU-protected |
428 | * section, or must be otherwise preventing the pointer from changing, | 666 | * pointer, ensuring that any concurrent RCU readers will see |
429 | * for example, by holding an appropriate lock. This pointer may later | 667 | * any prior initialization. Returns the value assigned. |
430 | * be safely dereferenced. It is the caller's responsibility to have | ||
431 | * done the right thing, as this primitive does no checking of any kind. | ||
432 | * | ||
433 | * Inserts memory barriers on architectures that require them | ||
434 | * (currently only the Alpha), and, more importantly, documents | ||
435 | * exactly which pointers are protected by RCU. | ||
436 | */ | ||
437 | #define rcu_dereference_raw(p) ({ \ | ||
438 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
439 | smp_read_barrier_depends(); \ | ||
440 | (_________p1); \ | ||
441 | }) | ||
442 | |||
443 | /** | ||
444 | * rcu_dereference - fetch an RCU-protected pointer, checking for RCU | ||
445 | * | ||
446 | * Makes rcu_dereference_check() do the dirty work. | ||
447 | */ | ||
448 | #define rcu_dereference(p) \ | ||
449 | rcu_dereference_check(p, rcu_read_lock_held()) | ||
450 | |||
451 | /** | ||
452 | * rcu_dereference_bh - fetch an RCU-protected pointer, checking for RCU-bh | ||
453 | * | ||
454 | * Makes rcu_dereference_check() do the dirty work. | ||
455 | */ | ||
456 | #define rcu_dereference_bh(p) \ | ||
457 | rcu_dereference_check(p, rcu_read_lock_bh_held()) | ||
458 | |||
459 | /** | ||
460 | * rcu_dereference_sched - fetch RCU-protected pointer, checking for RCU-sched | ||
461 | * | ||
462 | * Makes rcu_dereference_check() do the dirty work. | ||
463 | */ | ||
464 | #define rcu_dereference_sched(p) \ | ||
465 | rcu_dereference_check(p, rcu_read_lock_sched_held()) | ||
466 | |||
467 | /** | ||
468 | * rcu_assign_pointer - assign (publicize) a pointer to a newly | ||
469 | * initialized structure that will be dereferenced by RCU read-side | ||
470 | * critical sections. Returns the value assigned. | ||
471 | * | 668 | * |
472 | * Inserts memory barriers on architectures that require them | 669 | * Inserts memory barriers on architectures that require them |
473 | * (pretty much all of them other than x86), and also prevents | 670 | * (pretty much all of them other than x86), and also prevents |
@@ -476,14 +673,17 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
476 | * call documents which pointers will be dereferenced by RCU read-side | 673 | * call documents which pointers will be dereferenced by RCU read-side |
477 | * code. | 674 | * code. |
478 | */ | 675 | */ |
479 | |||
480 | #define rcu_assign_pointer(p, v) \ | 676 | #define rcu_assign_pointer(p, v) \ |
481 | ({ \ | 677 | __rcu_assign_pointer((p), (v), __rcu) |
482 | if (!__builtin_constant_p(v) || \ | 678 | |
483 | ((v) != NULL)) \ | 679 | /** |
484 | smp_wmb(); \ | 680 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
485 | (p) = (v); \ | 681 | * |
486 | }) | 682 | * Initialize an RCU-protected pointer in such a way to avoid RCU-lockdep |
683 | * splats. | ||
684 | */ | ||
685 | #define RCU_INIT_POINTER(p, v) \ | ||
686 | p = (typeof(*v) __force __rcu *)(v) | ||
487 | 687 | ||
488 | /* Infrastructure to implement the synchronize_() primitives. */ | 688 | /* Infrastructure to implement the synchronize_() primitives. */ |
489 | 689 | ||
@@ -494,26 +694,37 @@ struct rcu_synchronize { | |||
494 | 694 | ||
495 | extern void wakeme_after_rcu(struct rcu_head *head); | 695 | extern void wakeme_after_rcu(struct rcu_head *head); |
496 | 696 | ||
697 | #ifdef CONFIG_PREEMPT_RCU | ||
698 | |||
497 | /** | 699 | /** |
498 | * call_rcu - Queue an RCU callback for invocation after a grace period. | 700 | * call_rcu() - Queue an RCU callback for invocation after a grace period. |
499 | * @head: structure to be used for queueing the RCU updates. | 701 | * @head: structure to be used for queueing the RCU updates. |
500 | * @func: actual update function to be invoked after the grace period | 702 | * @func: actual callback function to be invoked after the grace period |
501 | * | 703 | * |
502 | * The update function will be invoked some time after a full grace | 704 | * The callback function will be invoked some time after a full grace |
503 | * period elapses, in other words after all currently executing RCU | 705 | * period elapses, in other words after all pre-existing RCU read-side |
504 | * read-side critical sections have completed. RCU read-side critical | 706 | * critical sections have completed. However, the callback function |
707 | * might well execute concurrently with RCU read-side critical sections | ||
708 | * that started after call_rcu() was invoked. RCU read-side critical | ||
505 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | 709 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), |
506 | * and may be nested. | 710 | * and may be nested. |
507 | */ | 711 | */ |
508 | extern void call_rcu(struct rcu_head *head, | 712 | extern void call_rcu(struct rcu_head *head, |
509 | void (*func)(struct rcu_head *head)); | 713 | void (*func)(struct rcu_head *head)); |
510 | 714 | ||
715 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | ||
716 | |||
717 | /* In classic RCU, call_rcu() is just call_rcu_sched(). */ | ||
718 | #define call_rcu call_rcu_sched | ||
719 | |||
720 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | ||
721 | |||
511 | /** | 722 | /** |
512 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. | 723 | * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. |
513 | * @head: structure to be used for queueing the RCU updates. | 724 | * @head: structure to be used for queueing the RCU updates. |
514 | * @func: actual update function to be invoked after the grace period | 725 | * @func: actual callback function to be invoked after the grace period |
515 | * | 726 | * |
516 | * The update function will be invoked some time after a full grace | 727 | * The callback function will be invoked some time after a full grace |
517 | * period elapses, in other words after all currently executing RCU | 728 | * period elapses, in other words after all currently executing RCU |
518 | * read-side critical sections have completed. call_rcu_bh() assumes | 729 | * read-side critical sections have completed. call_rcu_bh() assumes |
519 | * that the read-side critical sections end on completion of a softirq | 730 | * that the read-side critical sections end on completion of a softirq |
@@ -566,37 +777,4 @@ static inline void debug_rcu_head_unqueue(struct rcu_head *head) | |||
566 | } | 777 | } |
567 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 778 | #endif /* #else !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
568 | 779 | ||
569 | #ifndef CONFIG_PROVE_RCU | ||
570 | #define __do_rcu_dereference_check(c) do { } while (0) | ||
571 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
572 | |||
573 | #define __rcu_dereference_index_check(p, c) \ | ||
574 | ({ \ | ||
575 | typeof(p) _________p1 = ACCESS_ONCE(p); \ | ||
576 | __do_rcu_dereference_check(c); \ | ||
577 | smp_read_barrier_depends(); \ | ||
578 | (_________p1); \ | ||
579 | }) | ||
580 | |||
581 | /** | ||
582 | * rcu_dereference_index_check() - rcu_dereference for indices with debug checking | ||
583 | * @p: The pointer to read, prior to dereferencing | ||
584 | * @c: The conditions under which the dereference will take place | ||
585 | * | ||
586 | * Similar to rcu_dereference_check(), but omits the sparse checking. | ||
587 | * This allows rcu_dereference_index_check() to be used on integers, | ||
588 | * which can then be used as array indices. Attempting to use | ||
589 | * rcu_dereference_check() on an integer will give compiler warnings | ||
590 | * because the sparse address-space mechanism relies on dereferencing | ||
591 | * the RCU-protected pointer. Dereferencing integers is not something | ||
592 | * that even gcc will put up with. | ||
593 | * | ||
594 | * Note that this function does not implicitly check for RCU read-side | ||
595 | * critical sections. If this function gains lots of uses, it might | ||
596 | * make sense to provide versions for each flavor of RCU, but it does | ||
597 | * not make sense as of early 2010. | ||
598 | */ | ||
599 | #define rcu_dereference_index_check(p, c) \ | ||
600 | __rcu_dereference_index_check((p), (c)) | ||
601 | |||
602 | #endif /* __LINUX_RCUPDATE_H */ | 780 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index e2e893144a84..13877cb93a60 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -27,103 +27,101 @@ | |||
27 | 27 | ||
28 | #include <linux/cache.h> | 28 | #include <linux/cache.h> |
29 | 29 | ||
30 | void rcu_sched_qs(int cpu); | 30 | #define rcu_init_sched() do { } while (0) |
31 | void rcu_bh_qs(int cpu); | ||
32 | static inline void rcu_note_context_switch(int cpu) | ||
33 | { | ||
34 | rcu_sched_qs(cpu); | ||
35 | } | ||
36 | 31 | ||
37 | #define __rcu_read_lock() preempt_disable() | 32 | #ifdef CONFIG_TINY_RCU |
38 | #define __rcu_read_unlock() preempt_enable() | ||
39 | #define __rcu_read_lock_bh() local_bh_disable() | ||
40 | #define __rcu_read_unlock_bh() local_bh_enable() | ||
41 | #define call_rcu_sched call_rcu | ||
42 | 33 | ||
43 | #define rcu_init_sched() do { } while (0) | 34 | static inline void synchronize_rcu_expedited(void) |
44 | extern void rcu_check_callbacks(int cpu, int user); | 35 | { |
36 | synchronize_sched(); /* Only one CPU, so pretty fast anyway!!! */ | ||
37 | } | ||
45 | 38 | ||
46 | static inline int rcu_needs_cpu(int cpu) | 39 | static inline void rcu_barrier(void) |
47 | { | 40 | { |
48 | return 0; | 41 | rcu_barrier_sched(); /* Only one CPU, so only one list of callbacks! */ |
49 | } | 42 | } |
50 | 43 | ||
51 | /* | 44 | #else /* #ifdef CONFIG_TINY_RCU */ |
52 | * Return the number of grace periods. | 45 | |
53 | */ | 46 | void rcu_barrier(void); |
54 | static inline long rcu_batches_completed(void) | 47 | void synchronize_rcu_expedited(void); |
48 | |||
49 | #endif /* #else #ifdef CONFIG_TINY_RCU */ | ||
50 | |||
51 | static inline void synchronize_rcu_bh(void) | ||
55 | { | 52 | { |
56 | return 0; | 53 | synchronize_sched(); |
57 | } | 54 | } |
58 | 55 | ||
59 | /* | 56 | static inline void synchronize_rcu_bh_expedited(void) |
60 | * Return the number of bottom-half grace periods. | ||
61 | */ | ||
62 | static inline long rcu_batches_completed_bh(void) | ||
63 | { | 57 | { |
64 | return 0; | 58 | synchronize_sched(); |
65 | } | 59 | } |
66 | 60 | ||
67 | static inline void rcu_force_quiescent_state(void) | 61 | #ifdef CONFIG_TINY_RCU |
62 | |||
63 | static inline void rcu_preempt_note_context_switch(void) | ||
68 | { | 64 | { |
69 | } | 65 | } |
70 | 66 | ||
71 | static inline void rcu_bh_force_quiescent_state(void) | 67 | static inline void exit_rcu(void) |
72 | { | 68 | { |
73 | } | 69 | } |
74 | 70 | ||
75 | static inline void rcu_sched_force_quiescent_state(void) | 71 | static inline int rcu_needs_cpu(int cpu) |
76 | { | 72 | { |
73 | return 0; | ||
77 | } | 74 | } |
78 | 75 | ||
79 | extern void synchronize_sched(void); | 76 | #else /* #ifdef CONFIG_TINY_RCU */ |
77 | |||
78 | void rcu_preempt_note_context_switch(void); | ||
79 | extern void exit_rcu(void); | ||
80 | int rcu_preempt_needs_cpu(void); | ||
80 | 81 | ||
81 | static inline void synchronize_rcu(void) | 82 | static inline int rcu_needs_cpu(int cpu) |
82 | { | 83 | { |
83 | synchronize_sched(); | 84 | return rcu_preempt_needs_cpu(); |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline void synchronize_rcu_bh(void) | 87 | #endif /* #else #ifdef CONFIG_TINY_RCU */ |
88 | |||
89 | static inline void rcu_note_context_switch(int cpu) | ||
87 | { | 90 | { |
88 | synchronize_sched(); | 91 | rcu_sched_qs(cpu); |
92 | rcu_preempt_note_context_switch(); | ||
89 | } | 93 | } |
90 | 94 | ||
91 | static inline void synchronize_rcu_expedited(void) | 95 | /* |
96 | * Return the number of grace periods. | ||
97 | */ | ||
98 | static inline long rcu_batches_completed(void) | ||
92 | { | 99 | { |
93 | synchronize_sched(); | 100 | return 0; |
94 | } | 101 | } |
95 | 102 | ||
96 | static inline void synchronize_rcu_bh_expedited(void) | 103 | /* |
104 | * Return the number of bottom-half grace periods. | ||
105 | */ | ||
106 | static inline long rcu_batches_completed_bh(void) | ||
97 | { | 107 | { |
98 | synchronize_sched(); | 108 | return 0; |
99 | } | 109 | } |
100 | 110 | ||
101 | struct notifier_block; | 111 | static inline void rcu_force_quiescent_state(void) |
102 | |||
103 | #ifdef CONFIG_NO_HZ | ||
104 | |||
105 | extern void rcu_enter_nohz(void); | ||
106 | extern void rcu_exit_nohz(void); | ||
107 | |||
108 | #else /* #ifdef CONFIG_NO_HZ */ | ||
109 | |||
110 | static inline void rcu_enter_nohz(void) | ||
111 | { | 112 | { |
112 | } | 113 | } |
113 | 114 | ||
114 | static inline void rcu_exit_nohz(void) | 115 | static inline void rcu_bh_force_quiescent_state(void) |
115 | { | 116 | { |
116 | } | 117 | } |
117 | 118 | ||
118 | #endif /* #else #ifdef CONFIG_NO_HZ */ | 119 | static inline void rcu_sched_force_quiescent_state(void) |
119 | |||
120 | static inline void exit_rcu(void) | ||
121 | { | 120 | { |
122 | } | 121 | } |
123 | 122 | ||
124 | static inline int rcu_preempt_depth(void) | 123 | static inline void rcu_cpu_stall_reset(void) |
125 | { | 124 | { |
126 | return 0; | ||
127 | } | 125 | } |
128 | 126 | ||
129 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 127 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index c0ed1c056f29..95518e628794 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -30,64 +30,23 @@ | |||
30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
32 | 32 | ||
33 | struct notifier_block; | ||
34 | |||
35 | extern void rcu_sched_qs(int cpu); | ||
36 | extern void rcu_bh_qs(int cpu); | ||
37 | extern void rcu_note_context_switch(int cpu); | 33 | extern void rcu_note_context_switch(int cpu); |
38 | extern int rcu_needs_cpu(int cpu); | 34 | extern int rcu_needs_cpu(int cpu); |
35 | extern void rcu_cpu_stall_reset(void); | ||
39 | 36 | ||
40 | #ifdef CONFIG_TREE_PREEMPT_RCU | 37 | #ifdef CONFIG_TREE_PREEMPT_RCU |
41 | 38 | ||
42 | extern void __rcu_read_lock(void); | ||
43 | extern void __rcu_read_unlock(void); | ||
44 | extern void synchronize_rcu(void); | ||
45 | extern void exit_rcu(void); | 39 | extern void exit_rcu(void); |
46 | 40 | ||
47 | /* | ||
48 | * Defined as macro as it is a very low level header | ||
49 | * included from areas that don't even know about current | ||
50 | */ | ||
51 | #define rcu_preempt_depth() (current->rcu_read_lock_nesting) | ||
52 | |||
53 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 41 | #else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
54 | 42 | ||
55 | static inline void __rcu_read_lock(void) | ||
56 | { | ||
57 | preempt_disable(); | ||
58 | } | ||
59 | |||
60 | static inline void __rcu_read_unlock(void) | ||
61 | { | ||
62 | preempt_enable(); | ||
63 | } | ||
64 | |||
65 | #define synchronize_rcu synchronize_sched | ||
66 | |||
67 | static inline void exit_rcu(void) | 43 | static inline void exit_rcu(void) |
68 | { | 44 | { |
69 | } | 45 | } |
70 | 46 | ||
71 | static inline int rcu_preempt_depth(void) | ||
72 | { | ||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ | 47 | #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ |
77 | 48 | ||
78 | static inline void __rcu_read_lock_bh(void) | ||
79 | { | ||
80 | local_bh_disable(); | ||
81 | } | ||
82 | static inline void __rcu_read_unlock_bh(void) | ||
83 | { | ||
84 | local_bh_enable(); | ||
85 | } | ||
86 | |||
87 | extern void call_rcu_sched(struct rcu_head *head, | ||
88 | void (*func)(struct rcu_head *rcu)); | ||
89 | extern void synchronize_rcu_bh(void); | 49 | extern void synchronize_rcu_bh(void); |
90 | extern void synchronize_sched(void); | ||
91 | extern void synchronize_rcu_expedited(void); | 50 | extern void synchronize_rcu_expedited(void); |
92 | 51 | ||
93 | static inline void synchronize_rcu_bh_expedited(void) | 52 | static inline void synchronize_rcu_bh_expedited(void) |
@@ -95,7 +54,7 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
95 | synchronize_sched_expedited(); | 54 | synchronize_sched_expedited(); |
96 | } | 55 | } |
97 | 56 | ||
98 | extern void rcu_check_callbacks(int cpu, int user); | 57 | extern void rcu_barrier(void); |
99 | 58 | ||
100 | extern long rcu_batches_completed(void); | 59 | extern long rcu_batches_completed(void); |
101 | extern long rcu_batches_completed_bh(void); | 60 | extern long rcu_batches_completed_bh(void); |
@@ -104,18 +63,6 @@ extern void rcu_force_quiescent_state(void); | |||
104 | extern void rcu_bh_force_quiescent_state(void); | 63 | extern void rcu_bh_force_quiescent_state(void); |
105 | extern void rcu_sched_force_quiescent_state(void); | 64 | extern void rcu_sched_force_quiescent_state(void); |
106 | 65 | ||
107 | #ifdef CONFIG_NO_HZ | ||
108 | void rcu_enter_nohz(void); | ||
109 | void rcu_exit_nohz(void); | ||
110 | #else /* CONFIG_NO_HZ */ | ||
111 | static inline void rcu_enter_nohz(void) | ||
112 | { | ||
113 | } | ||
114 | static inline void rcu_exit_nohz(void) | ||
115 | { | ||
116 | } | ||
117 | #endif /* CONFIG_NO_HZ */ | ||
118 | |||
119 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ | 66 | /* A context switch is a grace period for RCU-sched and RCU-bh. */ |
120 | static inline int rcu_blocking_is_gp(void) | 67 | static inline int rcu_blocking_is_gp(void) |
121 | { | 68 | { |
diff --git a/include/linux/rds.h b/include/linux/rds.h index 24bce3ded9ea..91950950aa59 100644 --- a/include/linux/rds.h +++ b/include/linux/rds.h | |||
@@ -36,15 +36,6 @@ | |||
36 | 36 | ||
37 | #include <linux/types.h> | 37 | #include <linux/types.h> |
38 | 38 | ||
39 | /* These sparse annotated types shouldn't be in any user | ||
40 | * visible header file. We should clean this up rather | ||
41 | * than kludging around them. */ | ||
42 | #ifndef __KERNEL__ | ||
43 | #define __be16 u_int16_t | ||
44 | #define __be32 u_int32_t | ||
45 | #define __be64 u_int64_t | ||
46 | #endif | ||
47 | |||
48 | #define RDS_IB_ABI_VERSION 0x301 | 39 | #define RDS_IB_ABI_VERSION 0x301 |
49 | 40 | ||
50 | /* | 41 | /* |
@@ -82,6 +73,10 @@ | |||
82 | #define RDS_CMSG_RDMA_MAP 3 | 73 | #define RDS_CMSG_RDMA_MAP 3 |
83 | #define RDS_CMSG_RDMA_STATUS 4 | 74 | #define RDS_CMSG_RDMA_STATUS 4 |
84 | #define RDS_CMSG_CONG_UPDATE 5 | 75 | #define RDS_CMSG_CONG_UPDATE 5 |
76 | #define RDS_CMSG_ATOMIC_FADD 6 | ||
77 | #define RDS_CMSG_ATOMIC_CSWP 7 | ||
78 | #define RDS_CMSG_MASKED_ATOMIC_FADD 8 | ||
79 | #define RDS_CMSG_MASKED_ATOMIC_CSWP 9 | ||
85 | 80 | ||
86 | #define RDS_INFO_FIRST 10000 | 81 | #define RDS_INFO_FIRST 10000 |
87 | #define RDS_INFO_COUNTERS 10000 | 82 | #define RDS_INFO_COUNTERS 10000 |
@@ -98,9 +93,9 @@ | |||
98 | #define RDS_INFO_LAST 10010 | 93 | #define RDS_INFO_LAST 10010 |
99 | 94 | ||
100 | struct rds_info_counter { | 95 | struct rds_info_counter { |
101 | u_int8_t name[32]; | 96 | uint8_t name[32]; |
102 | u_int64_t value; | 97 | uint64_t value; |
103 | } __packed; | 98 | } __attribute__((packed)); |
104 | 99 | ||
105 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 | 100 | #define RDS_INFO_CONNECTION_FLAG_SENDING 0x01 |
106 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 | 101 | #define RDS_INFO_CONNECTION_FLAG_CONNECTING 0x02 |
@@ -109,56 +104,48 @@ struct rds_info_counter { | |||
109 | #define TRANSNAMSIZ 16 | 104 | #define TRANSNAMSIZ 16 |
110 | 105 | ||
111 | struct rds_info_connection { | 106 | struct rds_info_connection { |
112 | u_int64_t next_tx_seq; | 107 | uint64_t next_tx_seq; |
113 | u_int64_t next_rx_seq; | 108 | uint64_t next_rx_seq; |
114 | __be32 laddr; | ||
115 | __be32 faddr; | ||
116 | u_int8_t transport[TRANSNAMSIZ]; /* null term ascii */ | ||
117 | u_int8_t flags; | ||
118 | } __packed; | ||
119 | |||
120 | struct rds_info_flow { | ||
121 | __be32 laddr; | 109 | __be32 laddr; |
122 | __be32 faddr; | 110 | __be32 faddr; |
123 | u_int32_t bytes; | 111 | uint8_t transport[TRANSNAMSIZ]; /* null term ascii */ |
124 | __be16 lport; | 112 | uint8_t flags; |
125 | __be16 fport; | 113 | } __attribute__((packed)); |
126 | } __packed; | ||
127 | 114 | ||
128 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 | 115 | #define RDS_INFO_MESSAGE_FLAG_ACK 0x01 |
129 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 | 116 | #define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02 |
130 | 117 | ||
131 | struct rds_info_message { | 118 | struct rds_info_message { |
132 | u_int64_t seq; | 119 | uint64_t seq; |
133 | u_int32_t len; | 120 | uint32_t len; |
134 | __be32 laddr; | 121 | __be32 laddr; |
135 | __be32 faddr; | 122 | __be32 faddr; |
136 | __be16 lport; | 123 | __be16 lport; |
137 | __be16 fport; | 124 | __be16 fport; |
138 | u_int8_t flags; | 125 | uint8_t flags; |
139 | } __packed; | 126 | } __attribute__((packed)); |
140 | 127 | ||
141 | struct rds_info_socket { | 128 | struct rds_info_socket { |
142 | u_int32_t sndbuf; | 129 | uint32_t sndbuf; |
143 | __be32 bound_addr; | 130 | __be32 bound_addr; |
144 | __be32 connected_addr; | 131 | __be32 connected_addr; |
145 | __be16 bound_port; | 132 | __be16 bound_port; |
146 | __be16 connected_port; | 133 | __be16 connected_port; |
147 | u_int32_t rcvbuf; | 134 | uint32_t rcvbuf; |
148 | u_int64_t inum; | 135 | uint64_t inum; |
149 | } __packed; | 136 | } __attribute__((packed)); |
150 | 137 | ||
151 | struct rds_info_tcp_socket { | 138 | struct rds_info_tcp_socket { |
152 | __be32 local_addr; | 139 | __be32 local_addr; |
153 | __be16 local_port; | 140 | __be16 local_port; |
154 | __be32 peer_addr; | 141 | __be32 peer_addr; |
155 | __be16 peer_port; | 142 | __be16 peer_port; |
156 | u_int64_t hdr_rem; | 143 | uint64_t hdr_rem; |
157 | u_int64_t data_rem; | 144 | uint64_t data_rem; |
158 | u_int32_t last_sent_nxt; | 145 | uint32_t last_sent_nxt; |
159 | u_int32_t last_expected_una; | 146 | uint32_t last_expected_una; |
160 | u_int32_t last_seen_una; | 147 | uint32_t last_seen_una; |
161 | } __packed; | 148 | } __attribute__((packed)); |
162 | 149 | ||
163 | #define RDS_IB_GID_LEN 16 | 150 | #define RDS_IB_GID_LEN 16 |
164 | struct rds_info_rdma_connection { | 151 | struct rds_info_rdma_connection { |
@@ -212,42 +199,69 @@ struct rds_info_rdma_connection { | |||
212 | * (so that the application does not have to worry about | 199 | * (so that the application does not have to worry about |
213 | * alignment). | 200 | * alignment). |
214 | */ | 201 | */ |
215 | typedef u_int64_t rds_rdma_cookie_t; | 202 | typedef uint64_t rds_rdma_cookie_t; |
216 | 203 | ||
217 | struct rds_iovec { | 204 | struct rds_iovec { |
218 | u_int64_t addr; | 205 | uint64_t addr; |
219 | u_int64_t bytes; | 206 | uint64_t bytes; |
220 | }; | 207 | }; |
221 | 208 | ||
222 | struct rds_get_mr_args { | 209 | struct rds_get_mr_args { |
223 | struct rds_iovec vec; | 210 | struct rds_iovec vec; |
224 | u_int64_t cookie_addr; | 211 | uint64_t cookie_addr; |
225 | uint64_t flags; | 212 | uint64_t flags; |
226 | }; | 213 | }; |
227 | 214 | ||
228 | struct rds_get_mr_for_dest_args { | 215 | struct rds_get_mr_for_dest_args { |
229 | struct sockaddr_storage dest_addr; | 216 | struct sockaddr_storage dest_addr; |
230 | struct rds_iovec vec; | 217 | struct rds_iovec vec; |
231 | u_int64_t cookie_addr; | 218 | uint64_t cookie_addr; |
232 | uint64_t flags; | 219 | uint64_t flags; |
233 | }; | 220 | }; |
234 | 221 | ||
235 | struct rds_free_mr_args { | 222 | struct rds_free_mr_args { |
236 | rds_rdma_cookie_t cookie; | 223 | rds_rdma_cookie_t cookie; |
237 | u_int64_t flags; | 224 | uint64_t flags; |
238 | }; | 225 | }; |
239 | 226 | ||
240 | struct rds_rdma_args { | 227 | struct rds_rdma_args { |
241 | rds_rdma_cookie_t cookie; | 228 | rds_rdma_cookie_t cookie; |
242 | struct rds_iovec remote_vec; | 229 | struct rds_iovec remote_vec; |
243 | u_int64_t local_vec_addr; | 230 | uint64_t local_vec_addr; |
244 | u_int64_t nr_local; | 231 | uint64_t nr_local; |
245 | u_int64_t flags; | 232 | uint64_t flags; |
246 | u_int64_t user_token; | 233 | uint64_t user_token; |
234 | }; | ||
235 | |||
236 | struct rds_atomic_args { | ||
237 | rds_rdma_cookie_t cookie; | ||
238 | uint64_t local_addr; | ||
239 | uint64_t remote_addr; | ||
240 | union { | ||
241 | struct { | ||
242 | uint64_t compare; | ||
243 | uint64_t swap; | ||
244 | } cswp; | ||
245 | struct { | ||
246 | uint64_t add; | ||
247 | } fadd; | ||
248 | struct { | ||
249 | uint64_t compare; | ||
250 | uint64_t swap; | ||
251 | uint64_t compare_mask; | ||
252 | uint64_t swap_mask; | ||
253 | } m_cswp; | ||
254 | struct { | ||
255 | uint64_t add; | ||
256 | uint64_t nocarry_mask; | ||
257 | } m_fadd; | ||
258 | }; | ||
259 | uint64_t flags; | ||
260 | uint64_t user_token; | ||
247 | }; | 261 | }; |
248 | 262 | ||
249 | struct rds_rdma_notify { | 263 | struct rds_rdma_notify { |
250 | u_int64_t user_token; | 264 | uint64_t user_token; |
251 | int32_t status; | 265 | int32_t status; |
252 | }; | 266 | }; |
253 | 267 | ||
@@ -266,5 +280,6 @@ struct rds_rdma_notify { | |||
266 | #define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ | 280 | #define RDS_RDMA_USE_ONCE 0x0008 /* free MR after use */ |
267 | #define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ | 281 | #define RDS_RDMA_DONTWAIT 0x0010 /* Don't wait in SET_BARRIER */ |
268 | #define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ | 282 | #define RDS_RDMA_NOTIFY_ME 0x0020 /* Notify when operation completes */ |
283 | #define RDS_RDMA_SILENT 0x0040 /* Do not interrupt remote */ | ||
269 | 284 | ||
270 | #endif /* IB_RDS_H */ | 285 | #endif /* IB_RDS_H */ |
diff --git a/include/linux/resume-trace.h b/include/linux/resume-trace.h index bc8c3881c729..f31db2368782 100644 --- a/include/linux/resume-trace.h +++ b/include/linux/resume-trace.h | |||
@@ -3,6 +3,7 @@ | |||
3 | 3 | ||
4 | #ifdef CONFIG_PM_TRACE | 4 | #ifdef CONFIG_PM_TRACE |
5 | #include <asm/resume-trace.h> | 5 | #include <asm/resume-trace.h> |
6 | #include <linux/types.h> | ||
6 | 7 | ||
7 | extern int pm_trace_enabled; | 8 | extern int pm_trace_enabled; |
8 | 9 | ||
@@ -14,6 +15,7 @@ static inline int pm_trace_is_enabled(void) | |||
14 | struct device; | 15 | struct device; |
15 | extern void set_trace_device(struct device *); | 16 | extern void set_trace_device(struct device *); |
16 | extern void generate_resume_trace(const void *tracedata, unsigned int user); | 17 | extern void generate_resume_trace(const void *tracedata, unsigned int user); |
18 | extern int show_trace_dev_match(char *buf, size_t size); | ||
17 | 19 | ||
18 | #define TRACE_DEVICE(dev) do { \ | 20 | #define TRACE_DEVICE(dev) do { \ |
19 | if (pm_trace_enabled) \ | 21 | if (pm_trace_enabled) \ |
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h index 4f82326eb294..08c32e4f261a 100644 --- a/include/linux/rfkill.h +++ b/include/linux/rfkill.h | |||
@@ -81,7 +81,7 @@ struct rfkill_event { | |||
81 | __u8 type; | 81 | __u8 type; |
82 | __u8 op; | 82 | __u8 op; |
83 | __u8 soft, hard; | 83 | __u8 soft, hard; |
84 | } __packed; | 84 | } __attribute__((packed)); |
85 | 85 | ||
86 | /* | 86 | /* |
87 | * We are planning to be backward and forward compatible with changes | 87 | * We are planning to be backward and forward compatible with changes |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 58d44491880f..d42f274418b8 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -6,6 +6,7 @@ | |||
6 | #include <linux/if_link.h> | 6 | #include <linux/if_link.h> |
7 | #include <linux/if_addr.h> | 7 | #include <linux/if_addr.h> |
8 | #include <linux/neighbour.h> | 8 | #include <linux/neighbour.h> |
9 | #include <linux/netdevice.h> | ||
9 | 10 | ||
10 | /* rtnetlink families. Values up to 127 are reserved for real address | 11 | /* rtnetlink families. Values up to 127 are reserved for real address |
11 | * families, values above 128 may be used arbitrarily. | 12 | * families, values above 128 may be used arbitrarily. |
@@ -749,6 +750,35 @@ extern int rtnl_is_locked(void); | |||
749 | extern int lockdep_rtnl_is_held(void); | 750 | extern int lockdep_rtnl_is_held(void); |
750 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ | 751 | #endif /* #ifdef CONFIG_PROVE_LOCKING */ |
751 | 752 | ||
753 | /** | ||
754 | * rcu_dereference_rtnl - rcu_dereference with debug checking | ||
755 | * @p: The pointer to read, prior to dereferencing | ||
756 | * | ||
757 | * Do an rcu_dereference(p), but check caller either holds rcu_read_lock() | ||
758 | * or RTNL. Note : Please prefer rtnl_dereference() or rcu_dereference() | ||
759 | */ | ||
760 | #define rcu_dereference_rtnl(p) \ | ||
761 | rcu_dereference_check(p, rcu_read_lock_held() || \ | ||
762 | lockdep_rtnl_is_held()) | ||
763 | |||
764 | /** | ||
765 | * rtnl_dereference - fetch RCU pointer when updates are prevented by RTNL | ||
766 | * @p: The pointer to read, prior to dereferencing | ||
767 | * | ||
768 | * Return the value of the specified RCU-protected pointer, but omit | ||
769 | * both the smp_read_barrier_depends() and the ACCESS_ONCE(), because | ||
770 | * caller holds RTNL. | ||
771 | */ | ||
772 | #define rtnl_dereference(p) \ | ||
773 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) | ||
774 | |||
775 | static inline struct netdev_queue *dev_ingress_queue(struct net_device *dev) | ||
776 | { | ||
777 | return rtnl_dereference(dev->ingress_queue); | ||
778 | } | ||
779 | |||
780 | extern struct netdev_queue *dev_ingress_queue_create(struct net_device *dev); | ||
781 | |||
752 | extern void rtnetlink_init(void); | 782 | extern void rtnetlink_init(void); |
753 | extern void __rtnl_unlock(void); | 783 | extern void __rtnl_unlock(void); |
754 | 784 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index ce160d68f5e7..56154bbb8da9 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -336,6 +336,9 @@ extern unsigned long sysctl_hung_task_warnings; | |||
336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, | 336 | extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write, |
337 | void __user *buffer, | 337 | void __user *buffer, |
338 | size_t *lenp, loff_t *ppos); | 338 | size_t *lenp, loff_t *ppos); |
339 | #else | ||
340 | /* Avoid need for ifdefs elsewhere in the code */ | ||
341 | enum { sysctl_hung_task_timeout_secs = 0 }; | ||
339 | #endif | 342 | #endif |
340 | 343 | ||
341 | /* Attach to any functions which should be ignored in wchan output. */ | 344 | /* Attach to any functions which should be ignored in wchan output. */ |
@@ -875,6 +878,7 @@ enum sched_domain_level { | |||
875 | SD_LV_NONE = 0, | 878 | SD_LV_NONE = 0, |
876 | SD_LV_SIBLING, | 879 | SD_LV_SIBLING, |
877 | SD_LV_MC, | 880 | SD_LV_MC, |
881 | SD_LV_BOOK, | ||
878 | SD_LV_CPU, | 882 | SD_LV_CPU, |
879 | SD_LV_NODE, | 883 | SD_LV_NODE, |
880 | SD_LV_ALLNODES, | 884 | SD_LV_ALLNODES, |
@@ -1160,6 +1164,13 @@ struct sched_rt_entity { | |||
1160 | 1164 | ||
1161 | struct rcu_node; | 1165 | struct rcu_node; |
1162 | 1166 | ||
1167 | enum perf_event_task_context { | ||
1168 | perf_invalid_context = -1, | ||
1169 | perf_hw_context = 0, | ||
1170 | perf_sw_context, | ||
1171 | perf_nr_task_contexts, | ||
1172 | }; | ||
1173 | |||
1163 | struct task_struct { | 1174 | struct task_struct { |
1164 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 1175 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
1165 | void *stack; | 1176 | void *stack; |
@@ -1202,11 +1213,13 @@ struct task_struct { | |||
1202 | unsigned int policy; | 1213 | unsigned int policy; |
1203 | cpumask_t cpus_allowed; | 1214 | cpumask_t cpus_allowed; |
1204 | 1215 | ||
1205 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1216 | #ifdef CONFIG_PREEMPT_RCU |
1206 | int rcu_read_lock_nesting; | 1217 | int rcu_read_lock_nesting; |
1207 | char rcu_read_unlock_special; | 1218 | char rcu_read_unlock_special; |
1208 | struct rcu_node *rcu_blocked_node; | ||
1209 | struct list_head rcu_node_entry; | 1219 | struct list_head rcu_node_entry; |
1220 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
1221 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1222 | struct rcu_node *rcu_blocked_node; | ||
1210 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ | 1223 | #endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ |
1211 | 1224 | ||
1212 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1225 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
@@ -1288,9 +1301,9 @@ struct task_struct { | |||
1288 | struct list_head cpu_timers[3]; | 1301 | struct list_head cpu_timers[3]; |
1289 | 1302 | ||
1290 | /* process credentials */ | 1303 | /* process credentials */ |
1291 | const struct cred *real_cred; /* objective and real subjective task | 1304 | const struct cred __rcu *real_cred; /* objective and real subjective task |
1292 | * credentials (COW) */ | 1305 | * credentials (COW) */ |
1293 | const struct cred *cred; /* effective (overridable) subjective task | 1306 | const struct cred __rcu *cred; /* effective (overridable) subjective task |
1294 | * credentials (COW) */ | 1307 | * credentials (COW) */ |
1295 | struct mutex cred_guard_mutex; /* guard against foreign influences on | 1308 | struct mutex cred_guard_mutex; /* guard against foreign influences on |
1296 | * credential calculations | 1309 | * credential calculations |
@@ -1418,7 +1431,7 @@ struct task_struct { | |||
1418 | #endif | 1431 | #endif |
1419 | #ifdef CONFIG_CGROUPS | 1432 | #ifdef CONFIG_CGROUPS |
1420 | /* Control Group info protected by css_set_lock */ | 1433 | /* Control Group info protected by css_set_lock */ |
1421 | struct css_set *cgroups; | 1434 | struct css_set __rcu *cgroups; |
1422 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ | 1435 | /* cg_list protected by css_set_lock and tsk->alloc_lock */ |
1423 | struct list_head cg_list; | 1436 | struct list_head cg_list; |
1424 | #endif | 1437 | #endif |
@@ -1431,7 +1444,7 @@ struct task_struct { | |||
1431 | struct futex_pi_state *pi_state_cache; | 1444 | struct futex_pi_state *pi_state_cache; |
1432 | #endif | 1445 | #endif |
1433 | #ifdef CONFIG_PERF_EVENTS | 1446 | #ifdef CONFIG_PERF_EVENTS |
1434 | struct perf_event_context *perf_event_ctxp; | 1447 | struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts]; |
1435 | struct mutex perf_event_mutex; | 1448 | struct mutex perf_event_mutex; |
1436 | struct list_head perf_event_list; | 1449 | struct list_head perf_event_list; |
1437 | #endif | 1450 | #endif |
@@ -1681,8 +1694,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1681 | /* | 1694 | /* |
1682 | * Per process flags | 1695 | * Per process flags |
1683 | */ | 1696 | */ |
1684 | #define PF_ALIGNWARN 0x00000001 /* Print alignment warning msgs */ | 1697 | #define PF_KSOFTIRQD 0x00000001 /* I am ksoftirqd */ |
1685 | /* Not implemented yet, only for 486*/ | ||
1686 | #define PF_STARTING 0x00000002 /* being created */ | 1698 | #define PF_STARTING 0x00000002 /* being created */ |
1687 | #define PF_EXITING 0x00000004 /* getting shut down */ | 1699 | #define PF_EXITING 0x00000004 /* getting shut down */ |
1688 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ | 1700 | #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ |
@@ -1740,7 +1752,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * | |||
1740 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) | 1752 | #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) |
1741 | #define used_math() tsk_used_math(current) | 1753 | #define used_math() tsk_used_math(current) |
1742 | 1754 | ||
1743 | #ifdef CONFIG_TREE_PREEMPT_RCU | 1755 | #ifdef CONFIG_PREEMPT_RCU |
1744 | 1756 | ||
1745 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ | 1757 | #define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ |
1746 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ | 1758 | #define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ |
@@ -1749,7 +1761,9 @@ static inline void rcu_copy_process(struct task_struct *p) | |||
1749 | { | 1761 | { |
1750 | p->rcu_read_lock_nesting = 0; | 1762 | p->rcu_read_lock_nesting = 0; |
1751 | p->rcu_read_unlock_special = 0; | 1763 | p->rcu_read_unlock_special = 0; |
1764 | #ifdef CONFIG_TREE_PREEMPT_RCU | ||
1752 | p->rcu_blocked_node = NULL; | 1765 | p->rcu_blocked_node = NULL; |
1766 | #endif | ||
1753 | INIT_LIST_HEAD(&p->rcu_node_entry); | 1767 | INIT_LIST_HEAD(&p->rcu_node_entry); |
1754 | } | 1768 | } |
1755 | 1769 | ||
@@ -1826,6 +1840,19 @@ extern void sched_clock_idle_sleep_event(void); | |||
1826 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); | 1840 | extern void sched_clock_idle_wakeup_event(u64 delta_ns); |
1827 | #endif | 1841 | #endif |
1828 | 1842 | ||
1843 | #ifdef CONFIG_IRQ_TIME_ACCOUNTING | ||
1844 | /* | ||
1845 | * An i/f to runtime opt-in for irq time accounting based off of sched_clock. | ||
1846 | * The reason for this explicit opt-in is not to have perf penalty with | ||
1847 | * slow sched_clocks. | ||
1848 | */ | ||
1849 | extern void enable_sched_clock_irqtime(void); | ||
1850 | extern void disable_sched_clock_irqtime(void); | ||
1851 | #else | ||
1852 | static inline void enable_sched_clock_irqtime(void) {} | ||
1853 | static inline void disable_sched_clock_irqtime(void) {} | ||
1854 | #endif | ||
1855 | |||
1829 | extern unsigned long long | 1856 | extern unsigned long long |
1830 | task_sched_runtime(struct task_struct *task); | 1857 | task_sched_runtime(struct task_struct *task); |
1831 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); | 1858 | extern unsigned long long thread_group_sched_runtime(struct task_struct *task); |
@@ -2109,7 +2136,9 @@ extern void daemonize(const char *, ...); | |||
2109 | extern int allow_signal(int); | 2136 | extern int allow_signal(int); |
2110 | extern int disallow_signal(int); | 2137 | extern int disallow_signal(int); |
2111 | 2138 | ||
2112 | extern int do_execve(char *, char __user * __user *, char __user * __user *, struct pt_regs *); | 2139 | extern int do_execve(const char *, |
2140 | const char __user * const __user *, | ||
2141 | const char __user * const __user *, struct pt_regs *); | ||
2113 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); | 2142 | extern long do_fork(unsigned long, unsigned long, struct pt_regs *, unsigned long, int __user *, int __user *); |
2114 | struct task_struct *fork_idle(int); | 2143 | struct task_struct *fork_idle(int); |
2115 | 2144 | ||
@@ -2365,9 +2394,9 @@ extern int __cond_resched_lock(spinlock_t *lock); | |||
2365 | 2394 | ||
2366 | extern int __cond_resched_softirq(void); | 2395 | extern int __cond_resched_softirq(void); |
2367 | 2396 | ||
2368 | #define cond_resched_softirq() ({ \ | 2397 | #define cond_resched_softirq() ({ \ |
2369 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_OFFSET); \ | 2398 | __might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ |
2370 | __cond_resched_softirq(); \ | 2399 | __cond_resched_softirq(); \ |
2371 | }) | 2400 | }) |
2372 | 2401 | ||
2373 | /* | 2402 | /* |
diff --git a/include/linux/security.h b/include/linux/security.h index a22219afff09..b8246a8df7d2 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
@@ -74,7 +74,7 @@ extern int cap_file_mmap(struct file *file, unsigned long reqprot, | |||
74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); | 74 | extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); |
75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, | 75 | extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, |
76 | unsigned long arg4, unsigned long arg5); | 76 | unsigned long arg4, unsigned long arg5); |
77 | extern int cap_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp); | 77 | extern int cap_task_setscheduler(struct task_struct *p); |
78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); | 78 | extern int cap_task_setioprio(struct task_struct *p, int ioprio); |
79 | extern int cap_task_setnice(struct task_struct *p, int nice); | 79 | extern int cap_task_setnice(struct task_struct *p, int nice); |
80 | extern int cap_syslog(int type, bool from_file); | 80 | extern int cap_syslog(int type, bool from_file); |
@@ -959,6 +959,12 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
959 | * Sets the new child socket's sid to the openreq sid. | 959 | * Sets the new child socket's sid to the openreq sid. |
960 | * @inet_conn_established: | 960 | * @inet_conn_established: |
961 | * Sets the connection's peersid to the secmark on skb. | 961 | * Sets the connection's peersid to the secmark on skb. |
962 | * @secmark_relabel_packet: | ||
963 | * check if the process should be allowed to relabel packets to the given secid | ||
964 | * @security_secmark_refcount_inc | ||
965 | * tells the LSM to increment the number of secmark labeling rules loaded | ||
966 | * @security_secmark_refcount_dec | ||
967 | * tells the LSM to decrement the number of secmark labeling rules loaded | ||
962 | * @req_classify_flow: | 968 | * @req_classify_flow: |
963 | * Sets the flow's sid to the openreq sid. | 969 | * Sets the flow's sid to the openreq sid. |
964 | * @tun_dev_create: | 970 | * @tun_dev_create: |
@@ -1279,9 +1285,13 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
1279 | * Return 0 if permission is granted. | 1285 | * Return 0 if permission is granted. |
1280 | * | 1286 | * |
1281 | * @secid_to_secctx: | 1287 | * @secid_to_secctx: |
1282 | * Convert secid to security context. | 1288 | * Convert secid to security context. If secdata is NULL the length of |
1289 | * the result will be returned in seclen, but no secdata will be returned. | ||
1290 | * This does mean that the length could change between calls to check the | ||
1291 | * length and the next call which actually allocates and returns the secdata. | ||
1283 | * @secid contains the security ID. | 1292 | * @secid contains the security ID. |
1284 | * @secdata contains the pointer that stores the converted security context. | 1293 | * @secdata contains the pointer that stores the converted security context. |
1294 | * @seclen pointer which contains the length of the data | ||
1285 | * @secctx_to_secid: | 1295 | * @secctx_to_secid: |
1286 | * Convert security context to secid. | 1296 | * Convert security context to secid. |
1287 | * @secid contains the pointer to the generated security ID. | 1297 | * @secid contains the pointer to the generated security ID. |
@@ -1501,8 +1511,7 @@ struct security_operations { | |||
1501 | int (*task_getioprio) (struct task_struct *p); | 1511 | int (*task_getioprio) (struct task_struct *p); |
1502 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, | 1512 | int (*task_setrlimit) (struct task_struct *p, unsigned int resource, |
1503 | struct rlimit *new_rlim); | 1513 | struct rlimit *new_rlim); |
1504 | int (*task_setscheduler) (struct task_struct *p, int policy, | 1514 | int (*task_setscheduler) (struct task_struct *p); |
1505 | struct sched_param *lp); | ||
1506 | int (*task_getscheduler) (struct task_struct *p); | 1515 | int (*task_getscheduler) (struct task_struct *p); |
1507 | int (*task_movememory) (struct task_struct *p); | 1516 | int (*task_movememory) (struct task_struct *p); |
1508 | int (*task_kill) (struct task_struct *p, | 1517 | int (*task_kill) (struct task_struct *p, |
@@ -1594,6 +1603,9 @@ struct security_operations { | |||
1594 | struct request_sock *req); | 1603 | struct request_sock *req); |
1595 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); | 1604 | void (*inet_csk_clone) (struct sock *newsk, const struct request_sock *req); |
1596 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); | 1605 | void (*inet_conn_established) (struct sock *sk, struct sk_buff *skb); |
1606 | int (*secmark_relabel_packet) (u32 secid); | ||
1607 | void (*secmark_refcount_inc) (void); | ||
1608 | void (*secmark_refcount_dec) (void); | ||
1597 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); | 1609 | void (*req_classify_flow) (const struct request_sock *req, struct flowi *fl); |
1598 | int (*tun_dev_create)(void); | 1610 | int (*tun_dev_create)(void); |
1599 | void (*tun_dev_post_create)(struct sock *sk); | 1611 | void (*tun_dev_post_create)(struct sock *sk); |
@@ -1752,8 +1764,7 @@ int security_task_setioprio(struct task_struct *p, int ioprio); | |||
1752 | int security_task_getioprio(struct task_struct *p); | 1764 | int security_task_getioprio(struct task_struct *p); |
1753 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, | 1765 | int security_task_setrlimit(struct task_struct *p, unsigned int resource, |
1754 | struct rlimit *new_rlim); | 1766 | struct rlimit *new_rlim); |
1755 | int security_task_setscheduler(struct task_struct *p, | 1767 | int security_task_setscheduler(struct task_struct *p); |
1756 | int policy, struct sched_param *lp); | ||
1757 | int security_task_getscheduler(struct task_struct *p); | 1768 | int security_task_getscheduler(struct task_struct *p); |
1758 | int security_task_movememory(struct task_struct *p); | 1769 | int security_task_movememory(struct task_struct *p); |
1759 | int security_task_kill(struct task_struct *p, struct siginfo *info, | 1770 | int security_task_kill(struct task_struct *p, struct siginfo *info, |
@@ -2320,11 +2331,9 @@ static inline int security_task_setrlimit(struct task_struct *p, | |||
2320 | return 0; | 2331 | return 0; |
2321 | } | 2332 | } |
2322 | 2333 | ||
2323 | static inline int security_task_setscheduler(struct task_struct *p, | 2334 | static inline int security_task_setscheduler(struct task_struct *p) |
2324 | int policy, | ||
2325 | struct sched_param *lp) | ||
2326 | { | 2335 | { |
2327 | return cap_task_setscheduler(p, policy, lp); | 2336 | return cap_task_setscheduler(p); |
2328 | } | 2337 | } |
2329 | 2338 | ||
2330 | static inline int security_task_getscheduler(struct task_struct *p) | 2339 | static inline int security_task_getscheduler(struct task_struct *p) |
@@ -2551,6 +2560,9 @@ void security_inet_csk_clone(struct sock *newsk, | |||
2551 | const struct request_sock *req); | 2560 | const struct request_sock *req); |
2552 | void security_inet_conn_established(struct sock *sk, | 2561 | void security_inet_conn_established(struct sock *sk, |
2553 | struct sk_buff *skb); | 2562 | struct sk_buff *skb); |
2563 | int security_secmark_relabel_packet(u32 secid); | ||
2564 | void security_secmark_refcount_inc(void); | ||
2565 | void security_secmark_refcount_dec(void); | ||
2554 | int security_tun_dev_create(void); | 2566 | int security_tun_dev_create(void); |
2555 | void security_tun_dev_post_create(struct sock *sk); | 2567 | void security_tun_dev_post_create(struct sock *sk); |
2556 | int security_tun_dev_attach(struct sock *sk); | 2568 | int security_tun_dev_attach(struct sock *sk); |
@@ -2705,6 +2717,19 @@ static inline void security_inet_conn_established(struct sock *sk, | |||
2705 | { | 2717 | { |
2706 | } | 2718 | } |
2707 | 2719 | ||
2720 | static inline int security_secmark_relabel_packet(u32 secid) | ||
2721 | { | ||
2722 | return 0; | ||
2723 | } | ||
2724 | |||
2725 | static inline void security_secmark_refcount_inc(void) | ||
2726 | { | ||
2727 | } | ||
2728 | |||
2729 | static inline void security_secmark_refcount_dec(void) | ||
2730 | { | ||
2731 | } | ||
2732 | |||
2708 | static inline int security_tun_dev_create(void) | 2733 | static inline int security_tun_dev_create(void) |
2709 | { | 2734 | { |
2710 | return 0; | 2735 | return 0; |
diff --git a/include/linux/selection.h b/include/linux/selection.h index 8cdaa1151d2e..85193aa8c1e3 100644 --- a/include/linux/selection.h +++ b/include/linux/selection.h | |||
@@ -39,5 +39,6 @@ extern void putconsxy(struct vc_data *vc, unsigned char *p); | |||
39 | 39 | ||
40 | extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); | 40 | extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org); |
41 | extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); | 41 | extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org); |
42 | extern void vcs_scr_updated(struct vc_data *vc); | ||
42 | 43 | ||
43 | #endif | 44 | #endif |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h index 82e0f26a1299..44f459612690 100644 --- a/include/linux/selinux.h +++ b/include/linux/selinux.h | |||
@@ -21,74 +21,11 @@ struct kern_ipc_perm; | |||
21 | #ifdef CONFIG_SECURITY_SELINUX | 21 | #ifdef CONFIG_SECURITY_SELINUX |
22 | 22 | ||
23 | /** | 23 | /** |
24 | * selinux_string_to_sid - map a security context string to a security ID | ||
25 | * @str: the security context string to be mapped | ||
26 | * @sid: ID value returned via this. | ||
27 | * | ||
28 | * Returns 0 if successful, with the SID stored in sid. A value | ||
29 | * of zero for sid indicates no SID could be determined (but no error | ||
30 | * occurred). | ||
31 | */ | ||
32 | int selinux_string_to_sid(char *str, u32 *sid); | ||
33 | |||
34 | /** | ||
35 | * selinux_secmark_relabel_packet_permission - secmark permission check | ||
36 | * @sid: SECMARK ID value to be applied to network packet | ||
37 | * | ||
38 | * Returns 0 if the current task is allowed to set the SECMARK label of | ||
39 | * packets with the supplied security ID. Note that it is implicit that | ||
40 | * the packet is always being relabeled from the default unlabeled value, | ||
41 | * and that the access control decision is made in the AVC. | ||
42 | */ | ||
43 | int selinux_secmark_relabel_packet_permission(u32 sid); | ||
44 | |||
45 | /** | ||
46 | * selinux_secmark_refcount_inc - increments the secmark use counter | ||
47 | * | ||
48 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
49 | * when to apply SECMARK label access checks to network packets. This | ||
50 | * function incements this reference count to indicate that a new SECMARK | ||
51 | * target has been configured. | ||
52 | */ | ||
53 | void selinux_secmark_refcount_inc(void); | ||
54 | |||
55 | /** | ||
56 | * selinux_secmark_refcount_dec - decrements the secmark use counter | ||
57 | * | ||
58 | * SELinux keeps track of the current SECMARK targets in use so it knows | ||
59 | * when to apply SECMARK label access checks to network packets. This | ||
60 | * function decements this reference count to indicate that one of the | ||
61 | * existing SECMARK targets has been removed/flushed. | ||
62 | */ | ||
63 | void selinux_secmark_refcount_dec(void); | ||
64 | |||
65 | /** | ||
66 | * selinux_is_enabled - is SELinux enabled? | 24 | * selinux_is_enabled - is SELinux enabled? |
67 | */ | 25 | */ |
68 | bool selinux_is_enabled(void); | 26 | bool selinux_is_enabled(void); |
69 | #else | 27 | #else |
70 | 28 | ||
71 | static inline int selinux_string_to_sid(const char *str, u32 *sid) | ||
72 | { | ||
73 | *sid = 0; | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static inline int selinux_secmark_relabel_packet_permission(u32 sid) | ||
78 | { | ||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static inline void selinux_secmark_refcount_inc(void) | ||
83 | { | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | static inline void selinux_secmark_refcount_dec(void) | ||
88 | { | ||
89 | return; | ||
90 | } | ||
91 | |||
92 | static inline bool selinux_is_enabled(void) | 29 | static inline bool selinux_is_enabled(void) |
93 | { | 30 | { |
94 | return false; | 31 | return false; |
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h index 7415839ac890..5310d27abd2a 100644 --- a/include/linux/semaphore.h +++ b/include/linux/semaphore.h | |||
@@ -26,6 +26,9 @@ struct semaphore { | |||
26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ | 26 | .wait_list = LIST_HEAD_INIT((name).wait_list), \ |
27 | } | 27 | } |
28 | 28 | ||
29 | #define DEFINE_SEMAPHORE(name) \ | ||
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | ||
31 | |||
29 | #define DECLARE_MUTEX(name) \ | 32 | #define DECLARE_MUTEX(name) \ |
30 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) | 33 | struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1) |
31 | 34 | ||
diff --git a/include/linux/serial.h b/include/linux/serial.h index 1ebc694a6d52..ef914061511e 100644 --- a/include/linux/serial.h +++ b/include/linux/serial.h | |||
@@ -77,8 +77,7 @@ struct serial_struct { | |||
77 | #define PORT_16654 11 | 77 | #define PORT_16654 11 |
78 | #define PORT_16850 12 | 78 | #define PORT_16850 12 |
79 | #define PORT_RSA 13 /* RSA-DV II/S card */ | 79 | #define PORT_RSA 13 /* RSA-DV II/S card */ |
80 | #define PORT_U6_16550A 14 | 80 | #define PORT_MAX 13 |
81 | #define PORT_MAX 14 | ||
82 | 81 | ||
83 | #define SERIAL_IO_PORT 0 | 82 | #define SERIAL_IO_PORT 0 |
84 | #define SERIAL_IO_HUB6 1 | 83 | #define SERIAL_IO_HUB6 1 |
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h index 7638deaaba65..97f5b45bbc07 100644 --- a/include/linux/serial_8250.h +++ b/include/linux/serial_8250.h | |||
@@ -35,6 +35,8 @@ struct plat_serial8250_port { | |||
35 | void (*set_termios)(struct uart_port *, | 35 | void (*set_termios)(struct uart_port *, |
36 | struct ktermios *new, | 36 | struct ktermios *new, |
37 | struct ktermios *old); | 37 | struct ktermios *old); |
38 | void (*pm)(struct uart_port *, unsigned int state, | ||
39 | unsigned old); | ||
38 | }; | 40 | }; |
39 | 41 | ||
40 | /* | 42 | /* |
@@ -76,5 +78,11 @@ extern int serial8250_find_port_for_earlycon(void); | |||
76 | extern int setup_early_serial8250_console(char *cmdline); | 78 | extern int setup_early_serial8250_console(char *cmdline); |
77 | extern void serial8250_do_set_termios(struct uart_port *port, | 79 | extern void serial8250_do_set_termios(struct uart_port *port, |
78 | struct ktermios *termios, struct ktermios *old); | 80 | struct ktermios *termios, struct ktermios *old); |
81 | extern void serial8250_do_pm(struct uart_port *port, unsigned int state, | ||
82 | unsigned int oldstate); | ||
83 | |||
84 | extern void serial8250_set_isa_configurator(void (*v) | ||
85 | (int port, struct uart_port *up, | ||
86 | unsigned short *capabilities)); | ||
79 | 87 | ||
80 | #endif | 88 | #endif |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index 64458a9a8938..99e5994e6f84 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -44,7 +44,8 @@ | |||
44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ | 44 | #define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */ |
45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ | 45 | #define PORT_OCTEON 17 /* Cavium OCTEON internal UART */ |
46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ | 46 | #define PORT_AR7 18 /* Texas Instruments AR7 internal UART */ |
47 | #define PORT_MAX_8250 18 /* max port ID */ | 47 | #define PORT_U6_16550A 19 /* ST-Ericsson U6xxx internal UART */ |
48 | #define PORT_MAX_8250 19 /* max port ID */ | ||
48 | 49 | ||
49 | /* | 50 | /* |
50 | * ARM specific type numbers. These are not currently guaranteed | 51 | * ARM specific type numbers. These are not currently guaranteed |
@@ -288,6 +289,8 @@ struct uart_port { | |||
288 | void (*set_termios)(struct uart_port *, | 289 | void (*set_termios)(struct uart_port *, |
289 | struct ktermios *new, | 290 | struct ktermios *new, |
290 | struct ktermios *old); | 291 | struct ktermios *old); |
292 | void (*pm)(struct uart_port *, unsigned int state, | ||
293 | unsigned int old); | ||
291 | unsigned int irq; /* irq number */ | 294 | unsigned int irq; /* irq number */ |
292 | unsigned long irqflags; /* irq flags */ | 295 | unsigned long irqflags; /* irq flags */ |
293 | unsigned int uartclk; /* base uart clock */ | 296 | unsigned int uartclk; /* base uart clock */ |
@@ -410,6 +413,14 @@ unsigned int uart_get_baud_rate(struct uart_port *port, struct ktermios *termios | |||
410 | unsigned int max); | 413 | unsigned int max); |
411 | unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); | 414 | unsigned int uart_get_divisor(struct uart_port *port, unsigned int baud); |
412 | 415 | ||
416 | /* Base timer interval for polling */ | ||
417 | static inline int uart_poll_timeout(struct uart_port *port) | ||
418 | { | ||
419 | int timeout = port->timeout; | ||
420 | |||
421 | return timeout > 6 ? (timeout / 2 - 2) : 1; | ||
422 | } | ||
423 | |||
413 | /* | 424 | /* |
414 | * Console helpers. | 425 | * Console helpers. |
415 | */ | 426 | */ |
diff --git a/include/linux/sh_clk.h b/include/linux/sh_clk.h index 875ce50719a9..4dca992f3093 100644 --- a/include/linux/sh_clk.h +++ b/include/linux/sh_clk.h | |||
@@ -4,11 +4,20 @@ | |||
4 | #include <linux/list.h> | 4 | #include <linux/list.h> |
5 | #include <linux/seq_file.h> | 5 | #include <linux/seq_file.h> |
6 | #include <linux/cpufreq.h> | 6 | #include <linux/cpufreq.h> |
7 | #include <linux/types.h> | ||
8 | #include <linux/kref.h> | ||
7 | #include <linux/clk.h> | 9 | #include <linux/clk.h> |
8 | #include <linux/err.h> | 10 | #include <linux/err.h> |
9 | 11 | ||
10 | struct clk; | 12 | struct clk; |
11 | 13 | ||
14 | struct clk_mapping { | ||
15 | phys_addr_t phys; | ||
16 | void __iomem *base; | ||
17 | unsigned long len; | ||
18 | struct kref ref; | ||
19 | }; | ||
20 | |||
12 | struct clk_ops { | 21 | struct clk_ops { |
13 | void (*init)(struct clk *clk); | 22 | void (*init)(struct clk *clk); |
14 | int (*enable)(struct clk *clk); | 23 | int (*enable)(struct clk *clk); |
@@ -21,9 +30,6 @@ struct clk_ops { | |||
21 | 30 | ||
22 | struct clk { | 31 | struct clk { |
23 | struct list_head node; | 32 | struct list_head node; |
24 | const char *name; | ||
25 | int id; | ||
26 | |||
27 | struct clk *parent; | 33 | struct clk *parent; |
28 | struct clk **parent_table; /* list of parents to */ | 34 | struct clk **parent_table; /* list of parents to */ |
29 | unsigned short parent_num; /* choose between */ | 35 | unsigned short parent_num; /* choose between */ |
@@ -45,7 +51,9 @@ struct clk { | |||
45 | unsigned long arch_flags; | 51 | unsigned long arch_flags; |
46 | void *priv; | 52 | void *priv; |
47 | struct dentry *dentry; | 53 | struct dentry *dentry; |
54 | struct clk_mapping *mapping; | ||
48 | struct cpufreq_frequency_table *freq_table; | 55 | struct cpufreq_frequency_table *freq_table; |
56 | unsigned int nr_freqs; | ||
49 | }; | 57 | }; |
50 | 58 | ||
51 | #define CLK_ENABLE_ON_INIT (1 << 0) | 59 | #define CLK_ENABLE_ON_INIT (1 << 0) |
@@ -111,6 +119,9 @@ int clk_rate_table_find(struct clk *clk, | |||
111 | struct cpufreq_frequency_table *freq_table, | 119 | struct cpufreq_frequency_table *freq_table, |
112 | unsigned long rate); | 120 | unsigned long rate); |
113 | 121 | ||
122 | long clk_rate_div_range_round(struct clk *clk, unsigned int div_min, | ||
123 | unsigned int div_max, unsigned long rate); | ||
124 | |||
114 | #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ | 125 | #define SH_CLK_MSTP32(_parent, _enable_reg, _enable_bit, _flags) \ |
115 | { \ | 126 | { \ |
116 | .parent = _parent, \ | 127 | .parent = _parent, \ |
diff --git a/include/linux/sh_intc.h b/include/linux/sh_intc.h index 0d6cd38e673d..b4f183a31f13 100644 --- a/include/linux/sh_intc.h +++ b/include/linux/sh_intc.h | |||
@@ -20,6 +20,12 @@ struct intc_group { | |||
20 | 20 | ||
21 | #define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } | 21 | #define INTC_GROUP(enum_id, ids...) { enum_id, { ids } } |
22 | 22 | ||
23 | struct intc_subgroup { | ||
24 | unsigned long reg, reg_width; | ||
25 | intc_enum parent_id; | ||
26 | intc_enum enum_ids[32]; | ||
27 | }; | ||
28 | |||
23 | struct intc_mask_reg { | 29 | struct intc_mask_reg { |
24 | unsigned long set_reg, clr_reg, reg_width; | 30 | unsigned long set_reg, clr_reg, reg_width; |
25 | intc_enum enum_ids[32]; | 31 | intc_enum enum_ids[32]; |
@@ -69,9 +75,12 @@ struct intc_hw_desc { | |||
69 | unsigned int nr_sense_regs; | 75 | unsigned int nr_sense_regs; |
70 | struct intc_mask_reg *ack_regs; | 76 | struct intc_mask_reg *ack_regs; |
71 | unsigned int nr_ack_regs; | 77 | unsigned int nr_ack_regs; |
78 | struct intc_subgroup *subgroups; | ||
79 | unsigned int nr_subgroups; | ||
72 | }; | 80 | }; |
73 | 81 | ||
74 | #define _INTC_ARRAY(a) a, sizeof(a)/sizeof(*a) | 82 | #define _INTC_ARRAY(a) a, a == NULL ? 0 : sizeof(a)/sizeof(*a) |
83 | |||
75 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ | 84 | #define INTC_HW_DESC(vectors, groups, mask_regs, \ |
76 | prio_regs, sense_regs, ack_regs) \ | 85 | prio_regs, sense_regs, ack_regs) \ |
77 | { \ | 86 | { \ |
@@ -105,8 +114,11 @@ struct intc_desc symbol __initdata = { \ | |||
105 | prio_regs, sense_regs, ack_regs), \ | 114 | prio_regs, sense_regs, ack_regs), \ |
106 | } | 115 | } |
107 | 116 | ||
108 | int __init register_intc_controller(struct intc_desc *desc); | 117 | int register_intc_controller(struct intc_desc *desc); |
118 | void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs); | ||
109 | int intc_set_priority(unsigned int irq, unsigned int prio); | 119 | int intc_set_priority(unsigned int irq, unsigned int prio); |
120 | int intc_irq_lookup(const char *chipname, intc_enum enum_id); | ||
121 | void intc_finalize(void); | ||
110 | 122 | ||
111 | #ifdef CONFIG_INTC_USERIMASK | 123 | #ifdef CONFIG_INTC_USERIMASK |
112 | int register_intc_userimask(unsigned long addr); | 124 | int register_intc_userimask(unsigned long addr); |
diff --git a/include/linux/sh_pfc.h b/include/linux/sh_pfc.h index 07c08af9f8f6..30cae70874f4 100644 --- a/include/linux/sh_pfc.h +++ b/include/linux/sh_pfc.h | |||
@@ -92,5 +92,6 @@ struct pinmux_info { | |||
92 | }; | 92 | }; |
93 | 93 | ||
94 | int register_pinmux(struct pinmux_info *pip); | 94 | int register_pinmux(struct pinmux_info *pip); |
95 | int unregister_pinmux(struct pinmux_info *pip); | ||
95 | 96 | ||
96 | #endif /* __SH_PFC_H */ | 97 | #endif /* __SH_PFC_H */ |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 77eb60d2b496..e6ba898de61c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -129,8 +129,13 @@ typedef struct skb_frag_struct skb_frag_t; | |||
129 | 129 | ||
130 | struct skb_frag_struct { | 130 | struct skb_frag_struct { |
131 | struct page *page; | 131 | struct page *page; |
132 | #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) | ||
132 | __u32 page_offset; | 133 | __u32 page_offset; |
133 | __u32 size; | 134 | __u32 size; |
135 | #else | ||
136 | __u16 page_offset; | ||
137 | __u16 size; | ||
138 | #endif | ||
134 | }; | 139 | }; |
135 | 140 | ||
136 | #define HAVE_HW_TIME_STAMP | 141 | #define HAVE_HW_TIME_STAMP |
@@ -163,26 +168,19 @@ struct skb_shared_hwtstamps { | |||
163 | ktime_t syststamp; | 168 | ktime_t syststamp; |
164 | }; | 169 | }; |
165 | 170 | ||
166 | /** | 171 | /* Definitions for tx_flags in struct skb_shared_info */ |
167 | * struct skb_shared_tx - instructions for time stamping of outgoing packets | 172 | enum { |
168 | * @hardware: generate hardware time stamp | 173 | /* generate hardware time stamp */ |
169 | * @software: generate software time stamp | 174 | SKBTX_HW_TSTAMP = 1 << 0, |
170 | * @in_progress: device driver is going to provide | 175 | |
171 | * hardware time stamp | 176 | /* generate software time stamp */ |
172 | * @prevent_sk_orphan: make sk reference available on driver level | 177 | SKBTX_SW_TSTAMP = 1 << 1, |
173 | * @flags: all shared_tx flags | 178 | |
174 | * | 179 | /* device driver is going to provide hardware time stamp */ |
175 | * These flags are attached to packets as part of the | 180 | SKBTX_IN_PROGRESS = 1 << 2, |
176 | * &skb_shared_info. Use skb_tx() to get a pointer. | 181 | |
177 | */ | 182 | /* ensure the originating sk reference is available on driver level */ |
178 | union skb_shared_tx { | 183 | SKBTX_DRV_NEEDS_SK_REF = 1 << 3, |
179 | struct { | ||
180 | __u8 hardware:1, | ||
181 | software:1, | ||
182 | in_progress:1, | ||
183 | prevent_sk_orphan:1; | ||
184 | }; | ||
185 | __u8 flags; | ||
186 | }; | 184 | }; |
187 | 185 | ||
188 | /* This data is invariant across clones and lives at | 186 | /* This data is invariant across clones and lives at |
@@ -195,7 +193,7 @@ struct skb_shared_info { | |||
195 | unsigned short gso_segs; | 193 | unsigned short gso_segs; |
196 | unsigned short gso_type; | 194 | unsigned short gso_type; |
197 | __be32 ip6_frag_id; | 195 | __be32 ip6_frag_id; |
198 | union skb_shared_tx tx_flags; | 196 | __u8 tx_flags; |
199 | struct sk_buff *frag_list; | 197 | struct sk_buff *frag_list; |
200 | struct skb_shared_hwtstamps hwtstamps; | 198 | struct skb_shared_hwtstamps hwtstamps; |
201 | 199 | ||
@@ -462,19 +460,7 @@ static inline void skb_dst_set(struct sk_buff *skb, struct dst_entry *dst) | |||
462 | skb->_skb_refdst = (unsigned long)dst; | 460 | skb->_skb_refdst = (unsigned long)dst; |
463 | } | 461 | } |
464 | 462 | ||
465 | /** | 463 | extern void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst); |
466 | * skb_dst_set_noref - sets skb dst, without a reference | ||
467 | * @skb: buffer | ||
468 | * @dst: dst entry | ||
469 | * | ||
470 | * Sets skb dst, assuming a reference was not taken on dst | ||
471 | * skb_dst_drop() should not dst_release() this dst | ||
472 | */ | ||
473 | static inline void skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst) | ||
474 | { | ||
475 | WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held()); | ||
476 | skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF; | ||
477 | } | ||
478 | 464 | ||
479 | /** | 465 | /** |
480 | * skb_dst_is_noref - Test if skb dst isnt refcounted | 466 | * skb_dst_is_noref - Test if skb dst isnt refcounted |
@@ -498,13 +484,13 @@ extern struct sk_buff *__alloc_skb(unsigned int size, | |||
498 | static inline struct sk_buff *alloc_skb(unsigned int size, | 484 | static inline struct sk_buff *alloc_skb(unsigned int size, |
499 | gfp_t priority) | 485 | gfp_t priority) |
500 | { | 486 | { |
501 | return __alloc_skb(size, priority, 0, -1); | 487 | return __alloc_skb(size, priority, 0, NUMA_NO_NODE); |
502 | } | 488 | } |
503 | 489 | ||
504 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 490 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
505 | gfp_t priority) | 491 | gfp_t priority) |
506 | { | 492 | { |
507 | return __alloc_skb(size, priority, 1, -1); | 493 | return __alloc_skb(size, priority, 1, NUMA_NO_NODE); |
508 | } | 494 | } |
509 | 495 | ||
510 | extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); | 496 | extern bool skb_recycle_check(struct sk_buff *skb, int skb_size); |
@@ -558,6 +544,15 @@ extern unsigned int skb_find_text(struct sk_buff *skb, unsigned int from, | |||
558 | unsigned int to, struct ts_config *config, | 544 | unsigned int to, struct ts_config *config, |
559 | struct ts_state *state); | 545 | struct ts_state *state); |
560 | 546 | ||
547 | extern __u32 __skb_get_rxhash(struct sk_buff *skb); | ||
548 | static inline __u32 skb_get_rxhash(struct sk_buff *skb) | ||
549 | { | ||
550 | if (!skb->rxhash) | ||
551 | skb->rxhash = __skb_get_rxhash(skb); | ||
552 | |||
553 | return skb->rxhash; | ||
554 | } | ||
555 | |||
561 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 556 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
562 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) | 557 | static inline unsigned char *skb_end_pointer(const struct sk_buff *skb) |
563 | { | 558 | { |
@@ -578,11 +573,6 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb) | |||
578 | return &skb_shinfo(skb)->hwtstamps; | 573 | return &skb_shinfo(skb)->hwtstamps; |
579 | } | 574 | } |
580 | 575 | ||
581 | static inline union skb_shared_tx *skb_tx(struct sk_buff *skb) | ||
582 | { | ||
583 | return &skb_shinfo(skb)->tx_flags; | ||
584 | } | ||
585 | |||
586 | /** | 576 | /** |
587 | * skb_queue_empty - check if a queue is empty | 577 | * skb_queue_empty - check if a queue is empty |
588 | * @list: queue head | 578 | * @list: queue head |
@@ -604,7 +594,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list) | |||
604 | static inline bool skb_queue_is_last(const struct sk_buff_head *list, | 594 | static inline bool skb_queue_is_last(const struct sk_buff_head *list, |
605 | const struct sk_buff *skb) | 595 | const struct sk_buff *skb) |
606 | { | 596 | { |
607 | return (skb->next == (struct sk_buff *) list); | 597 | return skb->next == (struct sk_buff *)list; |
608 | } | 598 | } |
609 | 599 | ||
610 | /** | 600 | /** |
@@ -617,7 +607,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list, | |||
617 | static inline bool skb_queue_is_first(const struct sk_buff_head *list, | 607 | static inline bool skb_queue_is_first(const struct sk_buff_head *list, |
618 | const struct sk_buff *skb) | 608 | const struct sk_buff *skb) |
619 | { | 609 | { |
620 | return (skb->prev == (struct sk_buff *) list); | 610 | return skb->prev == (struct sk_buff *)list; |
621 | } | 611 | } |
622 | 612 | ||
623 | /** | 613 | /** |
@@ -1123,7 +1113,7 @@ extern void skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, | |||
1123 | int off, int size); | 1113 | int off, int size); |
1124 | 1114 | ||
1125 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) | 1115 | #define SKB_PAGE_ASSERT(skb) BUG_ON(skb_shinfo(skb)->nr_frags) |
1126 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frags(skb)) | 1116 | #define SKB_FRAG_ASSERT(skb) BUG_ON(skb_has_frag_list(skb)) |
1127 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) | 1117 | #define SKB_LINEAR_ASSERT(skb) BUG_ON(skb_is_nonlinear(skb)) |
1128 | 1118 | ||
1129 | #ifdef NET_SKBUFF_DATA_USES_OFFSET | 1119 | #ifdef NET_SKBUFF_DATA_USES_OFFSET |
@@ -1561,13 +1551,25 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev, | |||
1561 | return skb; | 1551 | return skb; |
1562 | } | 1552 | } |
1563 | 1553 | ||
1564 | extern struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask); | 1554 | /** |
1555 | * __netdev_alloc_page - allocate a page for ps-rx on a specific device | ||
1556 | * @dev: network device to receive on | ||
1557 | * @gfp_mask: alloc_pages_node mask | ||
1558 | * | ||
1559 | * Allocate a new page. dev currently unused. | ||
1560 | * | ||
1561 | * %NULL is returned if there is no free memory. | ||
1562 | */ | ||
1563 | static inline struct page *__netdev_alloc_page(struct net_device *dev, gfp_t gfp_mask) | ||
1564 | { | ||
1565 | return alloc_pages_node(NUMA_NO_NODE, gfp_mask, 0); | ||
1566 | } | ||
1565 | 1567 | ||
1566 | /** | 1568 | /** |
1567 | * netdev_alloc_page - allocate a page for ps-rx on a specific device | 1569 | * netdev_alloc_page - allocate a page for ps-rx on a specific device |
1568 | * @dev: network device to receive on | 1570 | * @dev: network device to receive on |
1569 | * | 1571 | * |
1570 | * Allocate a new page node local to the specified device. | 1572 | * Allocate a new page. dev currently unused. |
1571 | * | 1573 | * |
1572 | * %NULL is returned if there is no free memory. | 1574 | * %NULL is returned if there is no free memory. |
1573 | */ | 1575 | */ |
@@ -1787,7 +1789,7 @@ static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | |||
1787 | skb = skb->prev) | 1789 | skb = skb->prev) |
1788 | 1790 | ||
1789 | 1791 | ||
1790 | static inline bool skb_has_frags(const struct sk_buff *skb) | 1792 | static inline bool skb_has_frag_list(const struct sk_buff *skb) |
1791 | { | 1793 | { |
1792 | return skb_shinfo(skb)->frag_list != NULL; | 1794 | return skb_shinfo(skb)->frag_list != NULL; |
1793 | } | 1795 | } |
@@ -1987,8 +1989,8 @@ extern void skb_tstamp_tx(struct sk_buff *orig_skb, | |||
1987 | 1989 | ||
1988 | static inline void sw_tx_timestamp(struct sk_buff *skb) | 1990 | static inline void sw_tx_timestamp(struct sk_buff *skb) |
1989 | { | 1991 | { |
1990 | union skb_shared_tx *shtx = skb_tx(skb); | 1992 | if (skb_shinfo(skb)->tx_flags & SKBTX_SW_TSTAMP && |
1991 | if (shtx->software && !shtx->in_progress) | 1993 | !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) |
1992 | skb_tstamp_tx(skb, NULL); | 1994 | skb_tstamp_tx(skb, NULL); |
1993 | } | 1995 | } |
1994 | 1996 | ||
@@ -2159,7 +2161,7 @@ static inline u16 skb_get_rx_queue(const struct sk_buff *skb) | |||
2159 | 2161 | ||
2160 | static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) | 2162 | static inline bool skb_rx_queue_recorded(const struct sk_buff *skb) |
2161 | { | 2163 | { |
2162 | return (skb->queue_mapping != 0); | 2164 | return skb->queue_mapping != 0; |
2163 | } | 2165 | } |
2164 | 2166 | ||
2165 | extern u16 skb_tx_hash(const struct net_device *dev, | 2167 | extern u16 skb_tx_hash(const struct net_device *dev, |
@@ -2209,6 +2211,21 @@ static inline void skb_forward_csum(struct sk_buff *skb) | |||
2209 | skb->ip_summed = CHECKSUM_NONE; | 2211 | skb->ip_summed = CHECKSUM_NONE; |
2210 | } | 2212 | } |
2211 | 2213 | ||
2214 | /** | ||
2215 | * skb_checksum_none_assert - make sure skb ip_summed is CHECKSUM_NONE | ||
2216 | * @skb: skb to check | ||
2217 | * | ||
2218 | * fresh skbs have their ip_summed set to CHECKSUM_NONE. | ||
2219 | * Instead of forcing ip_summed to CHECKSUM_NONE, we can | ||
2220 | * use this helper, to document places where we make this assertion. | ||
2221 | */ | ||
2222 | static inline void skb_checksum_none_assert(struct sk_buff *skb) | ||
2223 | { | ||
2224 | #ifdef DEBUG | ||
2225 | BUG_ON(skb->ip_summed != CHECKSUM_NONE); | ||
2226 | #endif | ||
2227 | } | ||
2228 | |||
2212 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); | 2229 | bool skb_partial_csum_set(struct sk_buff *skb, u16 start, u16 off); |
2213 | #endif /* __KERNEL__ */ | 2230 | #endif /* __KERNEL__ */ |
2214 | #endif /* _LINUX_SKBUFF_H */ | 2231 | #endif /* _LINUX_SKBUFF_H */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 6d14409c4d9a..e4f5ed180b9b 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -68,7 +68,7 @@ struct kmem_cache_order_objects { | |||
68 | * Slab cache management. | 68 | * Slab cache management. |
69 | */ | 69 | */ |
70 | struct kmem_cache { | 70 | struct kmem_cache { |
71 | struct kmem_cache_cpu *cpu_slab; | 71 | struct kmem_cache_cpu __percpu *cpu_slab; |
72 | /* Used for retriving partial slabs etc */ | 72 | /* Used for retriving partial slabs etc */ |
73 | unsigned long flags; | 73 | unsigned long flags; |
74 | int size; /* The size of an object including meta data */ | 74 | int size; /* The size of an object including meta data */ |
@@ -87,7 +87,7 @@ struct kmem_cache { | |||
87 | unsigned long min_partial; | 87 | unsigned long min_partial; |
88 | const char *name; /* Name (only for display!) */ | 88 | const char *name; /* Name (only for display!) */ |
89 | struct list_head list; /* List of slab caches */ | 89 | struct list_head list; /* List of slab caches */ |
90 | #ifdef CONFIG_SLUB_DEBUG | 90 | #ifdef CONFIG_SYSFS |
91 | struct kobject kobj; /* For sysfs */ | 91 | struct kobject kobj; /* For sysfs */ |
92 | #endif | 92 | #endif |
93 | 93 | ||
@@ -96,11 +96,8 @@ struct kmem_cache { | |||
96 | * Defragmentation by allocating from a remote node. | 96 | * Defragmentation by allocating from a remote node. |
97 | */ | 97 | */ |
98 | int remote_node_defrag_ratio; | 98 | int remote_node_defrag_ratio; |
99 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
100 | #else | ||
101 | /* Avoid an extra cache line for UP */ | ||
102 | struct kmem_cache_node local_node; | ||
103 | #endif | 99 | #endif |
100 | struct kmem_cache_node *node[MAX_NUMNODES]; | ||
104 | }; | 101 | }; |
105 | 102 | ||
106 | /* | 103 | /* |
@@ -139,19 +136,16 @@ struct kmem_cache { | |||
139 | 136 | ||
140 | #ifdef CONFIG_ZONE_DMA | 137 | #ifdef CONFIG_ZONE_DMA |
141 | #define SLUB_DMA __GFP_DMA | 138 | #define SLUB_DMA __GFP_DMA |
142 | /* Reserve extra caches for potential DMA use */ | ||
143 | #define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT) | ||
144 | #else | 139 | #else |
145 | /* Disable DMA functionality */ | 140 | /* Disable DMA functionality */ |
146 | #define SLUB_DMA (__force gfp_t)0 | 141 | #define SLUB_DMA (__force gfp_t)0 |
147 | #define KMALLOC_CACHES SLUB_PAGE_SHIFT | ||
148 | #endif | 142 | #endif |
149 | 143 | ||
150 | /* | 144 | /* |
151 | * We keep the general caches in an array of slab caches that are used for | 145 | * We keep the general caches in an array of slab caches that are used for |
152 | * 2^x bytes of allocations. | 146 | * 2^x bytes of allocations. |
153 | */ | 147 | */ |
154 | extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES]; | 148 | extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT]; |
155 | 149 | ||
156 | /* | 150 | /* |
157 | * Sorry that the following has to be that ugly but some versions of GCC | 151 | * Sorry that the following has to be that ugly but some versions of GCC |
@@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size) | |||
216 | if (index == 0) | 210 | if (index == 0) |
217 | return NULL; | 211 | return NULL; |
218 | 212 | ||
219 | return &kmalloc_caches[index]; | 213 | return kmalloc_caches[index]; |
220 | } | 214 | } |
221 | 215 | ||
222 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); | 216 | void *kmem_cache_alloc(struct kmem_cache *, gfp_t); |
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 2ea1dd1ba21c..291f721144c2 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -54,12 +54,15 @@ static inline void cycle_kernel_lock(void) | |||
54 | 54 | ||
55 | #else | 55 | #else |
56 | 56 | ||
57 | #ifdef CONFIG_BKL /* provoke build bug if not set */ | ||
57 | #define lock_kernel() | 58 | #define lock_kernel() |
58 | #define unlock_kernel() | 59 | #define unlock_kernel() |
59 | #define release_kernel_lock(task) do { } while(0) | ||
60 | #define cycle_kernel_lock() do { } while(0) | 60 | #define cycle_kernel_lock() do { } while(0) |
61 | #define reacquire_kernel_lock(task) 0 | ||
62 | #define kernel_locked() 1 | 61 | #define kernel_locked() 1 |
62 | #endif /* CONFIG_BKL */ | ||
63 | |||
64 | #define release_kernel_lock(task) do { } while(0) | ||
65 | #define reacquire_kernel_lock(task) 0 | ||
63 | 66 | ||
64 | #endif /* CONFIG_LOCK_KERNEL */ | 67 | #endif /* CONFIG_LOCK_KERNEL */ |
65 | #endif /* __LINUX_SMPLOCK_H */ | 68 | #endif /* __LINUX_SMPLOCK_H */ |
diff --git a/include/linux/socket.h b/include/linux/socket.h index a2fada9becb6..5146b50202ce 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
@@ -322,11 +322,10 @@ extern int csum_partial_copy_fromiovecend(unsigned char *kdata, | |||
322 | int offset, | 322 | int offset, |
323 | unsigned int len, __wsum *csump); | 323 | unsigned int len, __wsum *csump); |
324 | 324 | ||
325 | extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); | 325 | extern long verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr *address, int mode); |
326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); | 326 | extern int memcpy_toiovec(struct iovec *v, unsigned char *kdata, int len); |
327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, | 327 | extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata, |
328 | int offset, int len); | 328 | int offset, int len); |
329 | extern int move_addr_to_user(struct sockaddr *kaddr, int klen, void __user *uaddr, int __user *ulen); | ||
330 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); | 329 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr *kaddr); |
331 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 330 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
332 | 331 | ||
diff --git a/include/linux/spi/dw_spi.h b/include/linux/spi/dw_spi.h index cc813f95a2f2..c91302f3a257 100644 --- a/include/linux/spi/dw_spi.h +++ b/include/linux/spi/dw_spi.h | |||
@@ -14,7 +14,9 @@ | |||
14 | #define SPI_MODE_OFFSET 6 | 14 | #define SPI_MODE_OFFSET 6 |
15 | #define SPI_SCPH_OFFSET 6 | 15 | #define SPI_SCPH_OFFSET 6 |
16 | #define SPI_SCOL_OFFSET 7 | 16 | #define SPI_SCOL_OFFSET 7 |
17 | |||
17 | #define SPI_TMOD_OFFSET 8 | 18 | #define SPI_TMOD_OFFSET 8 |
19 | #define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET) | ||
18 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ | 20 | #define SPI_TMOD_TR 0x0 /* xmit & recv */ |
19 | #define SPI_TMOD_TO 0x1 /* xmit only */ | 21 | #define SPI_TMOD_TO 0x1 /* xmit only */ |
20 | #define SPI_TMOD_RO 0x2 /* recv only */ | 22 | #define SPI_TMOD_RO 0x2 /* recv only */ |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index ae0a5286f558..92e52a1e6af3 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -213,6 +213,9 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
213 | * @dma_alignment: SPI controller constraint on DMA buffers alignment. | 213 | * @dma_alignment: SPI controller constraint on DMA buffers alignment. |
214 | * @mode_bits: flags understood by this controller driver | 214 | * @mode_bits: flags understood by this controller driver |
215 | * @flags: other constraints relevant to this driver | 215 | * @flags: other constraints relevant to this driver |
216 | * @bus_lock_spinlock: spinlock for SPI bus locking | ||
217 | * @bus_lock_mutex: mutex for SPI bus locking | ||
218 | * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use | ||
216 | * @setup: updates the device mode and clocking records used by a | 219 | * @setup: updates the device mode and clocking records used by a |
217 | * device's SPI controller; protocol code may call this. This | 220 | * device's SPI controller; protocol code may call this. This |
218 | * must fail if an unrecognized or unsupported mode is requested. | 221 | * must fail if an unrecognized or unsupported mode is requested. |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index f8854655860e..80e535897de6 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/preempt.h> | 50 | #include <linux/preempt.h> |
51 | #include <linux/linkage.h> | 51 | #include <linux/linkage.h> |
52 | #include <linux/compiler.h> | 52 | #include <linux/compiler.h> |
53 | #include <linux/irqflags.h> | ||
53 | #include <linux/thread_info.h> | 54 | #include <linux/thread_info.h> |
54 | #include <linux/kernel.h> | 55 | #include <linux/kernel.h> |
55 | #include <linux/stringify.h> | 56 | #include <linux/stringify.h> |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 4d5d2f546dbf..58971e891f48 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -108,19 +108,43 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 108 | #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
109 | 109 | ||
110 | /** | 110 | /** |
111 | * srcu_dereference - fetch SRCU-protected pointer with checking | 111 | * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing |
112 | * @p: the pointer to fetch and protect for later dereferencing | ||
113 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
114 | * really are in an SRCU read-side critical section. | ||
115 | * @c: condition to check for update-side use | ||
112 | * | 116 | * |
113 | * Makes rcu_dereference_check() do the dirty work. | 117 | * If PROVE_RCU is enabled, invoking this outside of an RCU read-side |
118 | * critical section will result in an RCU-lockdep splat, unless @c evaluates | ||
119 | * to 1. The @c argument will normally be a logical expression containing | ||
120 | * lockdep_is_held() calls. | ||
114 | */ | 121 | */ |
115 | #define srcu_dereference(p, sp) \ | 122 | #define srcu_dereference_check(p, sp, c) \ |
116 | rcu_dereference_check(p, srcu_read_lock_held(sp)) | 123 | __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) |
124 | |||
125 | /** | ||
126 | * srcu_dereference - fetch SRCU-protected pointer for later dereferencing | ||
127 | * @p: the pointer to fetch and protect for later dereferencing | ||
128 | * @sp: pointer to the srcu_struct, which is used to check that we | ||
129 | * really are in an SRCU read-side critical section. | ||
130 | * | ||
131 | * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU | ||
132 | * is enabled, invoking this outside of an RCU read-side critical | ||
133 | * section will result in an RCU-lockdep splat. | ||
134 | */ | ||
135 | #define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) | ||
117 | 136 | ||
118 | /** | 137 | /** |
119 | * srcu_read_lock - register a new reader for an SRCU-protected structure. | 138 | * srcu_read_lock - register a new reader for an SRCU-protected structure. |
120 | * @sp: srcu_struct in which to register the new reader. | 139 | * @sp: srcu_struct in which to register the new reader. |
121 | * | 140 | * |
122 | * Enter an SRCU read-side critical section. Note that SRCU read-side | 141 | * Enter an SRCU read-side critical section. Note that SRCU read-side |
123 | * critical sections may be nested. | 142 | * critical sections may be nested. However, it is illegal to |
143 | * call anything that waits on an SRCU grace period for the same | ||
144 | * srcu_struct, whether directly or indirectly. Please note that | ||
145 | * one way to indirectly wait on an SRCU grace period is to acquire | ||
146 | * a mutex that is held elsewhere while calling synchronize_srcu() or | ||
147 | * synchronize_srcu_expedited(). | ||
124 | */ | 148 | */ |
125 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) | 149 | static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) |
126 | { | 150 | { |
diff --git a/include/linux/ssb/ssb_regs.h b/include/linux/ssb/ssb_regs.h index a6d5225b9275..11daf9c140e7 100644 --- a/include/linux/ssb/ssb_regs.h +++ b/include/linux/ssb/ssb_regs.h | |||
@@ -97,6 +97,7 @@ | |||
97 | #define SSB_TMSLOW_RESET 0x00000001 /* Reset */ | 97 | #define SSB_TMSLOW_RESET 0x00000001 /* Reset */ |
98 | #define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ | 98 | #define SSB_TMSLOW_REJECT_22 0x00000002 /* Reject (Backplane rev 2.2) */ |
99 | #define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ | 99 | #define SSB_TMSLOW_REJECT_23 0x00000004 /* Reject (Backplane rev 2.3) */ |
100 | #define SSB_TMSLOW_PHYCLK 0x00000010 /* MAC PHY Clock Control Enable */ | ||
100 | #define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ | 101 | #define SSB_TMSLOW_CLOCK 0x00010000 /* Clock Enable */ |
101 | #define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ | 102 | #define SSB_TMSLOW_FGC 0x00020000 /* Force Gated Clocks On */ |
102 | #define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ | 103 | #define SSB_TMSLOW_PE 0x40000000 /* Power Management Enable */ |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 632ff7c03280..d66c61774d95 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
@@ -32,10 +32,14 @@ | |||
32 | struct plat_stmmacenet_data { | 32 | struct plat_stmmacenet_data { |
33 | int bus_id; | 33 | int bus_id; |
34 | int pbl; | 34 | int pbl; |
35 | int clk_csr; | ||
35 | int has_gmac; | 36 | int has_gmac; |
36 | int enh_desc; | 37 | int enh_desc; |
38 | int tx_coe; | ||
39 | int bugged_jumbo; | ||
40 | int pmt; | ||
37 | void (*fix_mac_speed)(void *priv, unsigned int speed); | 41 | void (*fix_mac_speed)(void *priv, unsigned int speed); |
38 | void (*bus_setup)(unsigned long ioaddr); | 42 | void (*bus_setup)(void __iomem *ioaddr); |
39 | #ifdef CONFIG_STM_DRIVERS | 43 | #ifdef CONFIG_STM_DRIVERS |
40 | struct stm_pad_config *pad_config; | 44 | struct stm_pad_config *pad_config; |
41 | #endif | 45 | #endif |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 6b524a0d02e4..1808960c5059 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -126,8 +126,8 @@ int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus); | |||
126 | 126 | ||
127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 127 | #else /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
128 | 128 | ||
129 | static inline int stop_machine(int (*fn)(void *), void *data, | 129 | static inline int __stop_machine(int (*fn)(void *), void *data, |
130 | const struct cpumask *cpus) | 130 | const struct cpumask *cpus) |
131 | { | 131 | { |
132 | int ret; | 132 | int ret; |
133 | local_irq_disable(); | 133 | local_irq_disable(); |
@@ -136,5 +136,11 @@ static inline int stop_machine(int (*fn)(void *), void *data, | |||
136 | return ret; | 136 | return ret; |
137 | } | 137 | } |
138 | 138 | ||
139 | static inline int stop_machine(int (*fn)(void *), void *data, | ||
140 | const struct cpumask *cpus) | ||
141 | { | ||
142 | return __stop_machine(fn, data, cpus); | ||
143 | } | ||
144 | |||
139 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ | 145 | #endif /* CONFIG_STOP_MACHINE && CONFIG_SMP */ |
140 | #endif /* _LINUX_STOP_MACHINE */ | 146 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h index 671538d25bc1..8eee9dbbfe7a 100644 --- a/include/linux/sunrpc/auth_gss.h +++ b/include/linux/sunrpc/auth_gss.h | |||
@@ -69,7 +69,7 @@ struct gss_cl_ctx { | |||
69 | enum rpc_gss_proc gc_proc; | 69 | enum rpc_gss_proc gc_proc; |
70 | u32 gc_seq; | 70 | u32 gc_seq; |
71 | spinlock_t gc_seq_lock; | 71 | spinlock_t gc_seq_lock; |
72 | struct gss_ctx *gc_gss_ctx; | 72 | struct gss_ctx __rcu *gc_gss_ctx; |
73 | struct xdr_netobj gc_wire_ctx; | 73 | struct xdr_netobj gc_wire_ctx; |
74 | u32 gc_win; | 74 | u32 gc_win; |
75 | unsigned long gc_expiry; | 75 | unsigned long gc_expiry; |
@@ -80,7 +80,7 @@ struct gss_upcall_msg; | |||
80 | struct gss_cred { | 80 | struct gss_cred { |
81 | struct rpc_cred gc_base; | 81 | struct rpc_cred gc_base; |
82 | enum rpc_gss_svc gc_service; | 82 | enum rpc_gss_svc gc_service; |
83 | struct gss_cl_ctx *gc_ctx; | 83 | struct gss_cl_ctx __rcu *gc_ctx; |
84 | struct gss_upcall_msg *gc_upcall; | 84 | struct gss_upcall_msg *gc_upcall; |
85 | unsigned long gc_upcall_timestamp; | 85 | unsigned long gc_upcall_timestamp; |
86 | unsigned char gc_machine_cred : 1; | 86 | unsigned char gc_machine_cred : 1; |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 569dc722a600..85f38a63f098 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -30,7 +30,7 @@ struct rpc_inode; | |||
30 | * The high-level client handle | 30 | * The high-level client handle |
31 | */ | 31 | */ |
32 | struct rpc_clnt { | 32 | struct rpc_clnt { |
33 | struct kref cl_kref; /* Number of references */ | 33 | atomic_t cl_count; /* Number of references */ |
34 | struct list_head cl_clients; /* Global list of clients */ | 34 | struct list_head cl_clients; /* Global list of clients */ |
35 | struct list_head cl_tasks; /* List of tasks */ | 35 | struct list_head cl_tasks; /* List of tasks */ |
36 | spinlock_t cl_lock; /* spinlock */ | 36 | spinlock_t cl_lock; /* spinlock */ |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 4af270ec2204..26697514c5ec 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -293,8 +293,8 @@ extern int unregister_pm_notifier(struct notifier_block *nb); | |||
293 | extern bool events_check_enabled; | 293 | extern bool events_check_enabled; |
294 | 294 | ||
295 | extern bool pm_check_wakeup_events(void); | 295 | extern bool pm_check_wakeup_events(void); |
296 | extern bool pm_get_wakeup_count(unsigned long *count); | 296 | extern bool pm_get_wakeup_count(unsigned int *count); |
297 | extern bool pm_save_wakeup_count(unsigned long count); | 297 | extern bool pm_save_wakeup_count(unsigned int count); |
298 | #else /* !CONFIG_PM_SLEEP */ | 298 | #else /* !CONFIG_PM_SLEEP */ |
299 | 299 | ||
300 | static inline int register_pm_notifier(struct notifier_block *nb) | 300 | static inline int register_pm_notifier(struct notifier_block *nb) |
@@ -308,6 +308,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb) | |||
308 | } | 308 | } |
309 | 309 | ||
310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) | 310 | #define pm_notifier(fn, pri) do { (void)(fn); } while (0) |
311 | |||
312 | static inline bool pm_check_wakeup_events(void) { return true; } | ||
311 | #endif /* !CONFIG_PM_SLEEP */ | 313 | #endif /* !CONFIG_PM_SLEEP */ |
312 | 314 | ||
313 | extern struct mutex pm_mutex; | 315 | extern struct mutex pm_mutex; |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 2fee51a11b73..7cdd63366f88 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -19,6 +19,7 @@ struct bio; | |||
19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ | 19 | #define SWAP_FLAG_PREFER 0x8000 /* set if swap priority specified */ |
20 | #define SWAP_FLAG_PRIO_MASK 0x7fff | 20 | #define SWAP_FLAG_PRIO_MASK 0x7fff |
21 | #define SWAP_FLAG_PRIO_SHIFT 0 | 21 | #define SWAP_FLAG_PRIO_SHIFT 0 |
22 | #define SWAP_FLAG_DISCARD 0x10000 /* discard swap cluster after use */ | ||
22 | 23 | ||
23 | static inline int current_is_kswapd(void) | 24 | static inline int current_is_kswapd(void) |
24 | { | 25 | { |
@@ -142,7 +143,7 @@ struct swap_extent { | |||
142 | enum { | 143 | enum { |
143 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 144 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
144 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ | 145 | SWP_WRITEOK = (1 << 1), /* ok to write to this swap? */ |
145 | SWP_DISCARDABLE = (1 << 2), /* blkdev supports discard */ | 146 | SWP_DISCARDABLE = (1 << 2), /* swapon+blkdev support discard */ |
146 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ | 147 | SWP_DISCARDING = (1 << 3), /* now discarding a free cluster */ |
147 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ | 148 | SWP_SOLIDSTATE = (1 << 4), /* blkdev seeks are cheap */ |
148 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ | 149 | SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */ |
@@ -315,6 +316,7 @@ extern long nr_swap_pages; | |||
315 | extern long total_swap_pages; | 316 | extern long total_swap_pages; |
316 | extern void si_swapinfo(struct sysinfo *); | 317 | extern void si_swapinfo(struct sysinfo *); |
317 | extern swp_entry_t get_swap_page(void); | 318 | extern swp_entry_t get_swap_page(void); |
319 | extern swp_entry_t get_swap_page_of_type(int); | ||
318 | extern int valid_swaphandles(swp_entry_t, unsigned long *); | 320 | extern int valid_swaphandles(swp_entry_t, unsigned long *); |
319 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); | 321 | extern int add_swap_count_continuation(swp_entry_t, gfp_t); |
320 | extern void swap_shmem_alloc(swp_entry_t); | 322 | extern void swap_shmem_alloc(swp_entry_t); |
@@ -331,13 +333,6 @@ extern int reuse_swap_page(struct page *); | |||
331 | extern int try_to_free_swap(struct page *); | 333 | extern int try_to_free_swap(struct page *); |
332 | struct backing_dev_info; | 334 | struct backing_dev_info; |
333 | 335 | ||
334 | #ifdef CONFIG_HIBERNATION | ||
335 | void hibernation_freeze_swap(void); | ||
336 | void hibernation_thaw_swap(void); | ||
337 | swp_entry_t get_swap_for_hibernation(int type); | ||
338 | void swap_free_for_hibernation(swp_entry_t val); | ||
339 | #endif | ||
340 | |||
341 | /* linux/mm/thrash.c */ | 336 | /* linux/mm/thrash.c */ |
342 | extern struct mm_struct *swap_token_mm; | 337 | extern struct mm_struct *swap_token_mm; |
343 | extern void grab_swap_token(struct mm_struct *); | 338 | extern void grab_swap_token(struct mm_struct *); |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 6e5d19788634..e6319d18a55d 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
@@ -820,7 +820,7 @@ asmlinkage long sys_fanotify_mark(int fanotify_fd, unsigned int flags, | |||
820 | u64 mask, int fd, | 820 | u64 mask, int fd, |
821 | const char __user *pathname); | 821 | const char __user *pathname); |
822 | 822 | ||
823 | int kernel_execve(const char *filename, char *const argv[], char *const envp[]); | 823 | int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]); |
824 | 824 | ||
825 | 825 | ||
826 | asmlinkage long sys_perf_event_open( | 826 | asmlinkage long sys_perf_event_open( |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 3c92121ba9af..30b881555fa5 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/list.h> | 17 | #include <linux/list.h> |
18 | #include <linux/lockdep.h> | 18 | #include <linux/lockdep.h> |
19 | #include <linux/kobject_ns.h> | ||
19 | #include <asm/atomic.h> | 20 | #include <asm/atomic.h> |
20 | 21 | ||
21 | struct kobject; | 22 | struct kobject; |
@@ -163,6 +164,10 @@ int sysfs_add_file_to_group(struct kobject *kobj, | |||
163 | const struct attribute *attr, const char *group); | 164 | const struct attribute *attr, const char *group); |
164 | void sysfs_remove_file_from_group(struct kobject *kobj, | 165 | void sysfs_remove_file_from_group(struct kobject *kobj, |
165 | const struct attribute *attr, const char *group); | 166 | const struct attribute *attr, const char *group); |
167 | int sysfs_merge_group(struct kobject *kobj, | ||
168 | const struct attribute_group *grp); | ||
169 | void sysfs_unmerge_group(struct kobject *kobj, | ||
170 | const struct attribute_group *grp); | ||
166 | 171 | ||
167 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); | 172 | void sysfs_notify(struct kobject *kobj, const char *dir, const char *attr); |
168 | void sysfs_notify_dirent(struct sysfs_dirent *sd); | 173 | void sysfs_notify_dirent(struct sysfs_dirent *sd); |
@@ -301,6 +306,17 @@ static inline void sysfs_remove_file_from_group(struct kobject *kobj, | |||
301 | { | 306 | { |
302 | } | 307 | } |
303 | 308 | ||
309 | static inline int sysfs_merge_group(struct kobject *kobj, | ||
310 | const struct attribute_group *grp) | ||
311 | { | ||
312 | return 0; | ||
313 | } | ||
314 | |||
315 | static inline void sysfs_unmerge_group(struct kobject *kobj, | ||
316 | const struct attribute_group *grp) | ||
317 | { | ||
318 | } | ||
319 | |||
304 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, | 320 | static inline void sysfs_notify(struct kobject *kobj, const char *dir, |
305 | const char *attr) | 321 | const char *attr) |
306 | { | 322 | { |
diff --git a/include/linux/tc_act/Kbuild b/include/linux/tc_act/Kbuild index 76990937f4c9..67b501c302b2 100644 --- a/include/linux/tc_act/Kbuild +++ b/include/linux/tc_act/Kbuild | |||
@@ -4,3 +4,4 @@ header-y += tc_mirred.h | |||
4 | header-y += tc_pedit.h | 4 | header-y += tc_pedit.h |
5 | header-y += tc_nat.h | 5 | header-y += tc_nat.h |
6 | header-y += tc_skbedit.h | 6 | header-y += tc_skbedit.h |
7 | header-y += tc_csum.h | ||
diff --git a/include/linux/tc_act/tc_csum.h b/include/linux/tc_act/tc_csum.h new file mode 100644 index 000000000000..a047c49a3153 --- /dev/null +++ b/include/linux/tc_act/tc_csum.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef __LINUX_TC_CSUM_H | ||
2 | #define __LINUX_TC_CSUM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <linux/pkt_cls.h> | ||
6 | |||
7 | #define TCA_ACT_CSUM 16 | ||
8 | |||
9 | enum { | ||
10 | TCA_CSUM_UNSPEC, | ||
11 | TCA_CSUM_PARMS, | ||
12 | TCA_CSUM_TM, | ||
13 | __TCA_CSUM_MAX | ||
14 | }; | ||
15 | #define TCA_CSUM_MAX (__TCA_CSUM_MAX - 1) | ||
16 | |||
17 | enum { | ||
18 | TCA_CSUM_UPDATE_FLAG_IPV4HDR = 1, | ||
19 | TCA_CSUM_UPDATE_FLAG_ICMP = 2, | ||
20 | TCA_CSUM_UPDATE_FLAG_IGMP = 4, | ||
21 | TCA_CSUM_UPDATE_FLAG_TCP = 8, | ||
22 | TCA_CSUM_UPDATE_FLAG_UDP = 16, | ||
23 | TCA_CSUM_UPDATE_FLAG_UDPLITE = 32 | ||
24 | }; | ||
25 | |||
26 | struct tc_csum { | ||
27 | tc_gen; | ||
28 | |||
29 | __u32 update_flags; | ||
30 | }; | ||
31 | |||
32 | #endif /* __LINUX_TC_CSUM_H */ | ||
diff --git a/include/linux/tc_ematch/tc_em_meta.h b/include/linux/tc_ematch/tc_em_meta.h index 0864206ec1a3..7138962664f8 100644 --- a/include/linux/tc_ematch/tc_em_meta.h +++ b/include/linux/tc_ematch/tc_em_meta.h | |||
@@ -79,6 +79,7 @@ enum { | |||
79 | TCF_META_ID_SK_SENDMSG_OFF, | 79 | TCF_META_ID_SK_SENDMSG_OFF, |
80 | TCF_META_ID_SK_WRITE_PENDING, | 80 | TCF_META_ID_SK_WRITE_PENDING, |
81 | TCF_META_ID_VLAN_TAG, | 81 | TCF_META_ID_VLAN_TAG, |
82 | TCF_META_ID_RXHASH, | ||
82 | __TCF_META_ID_MAX | 83 | __TCF_META_ID_MAX |
83 | }; | 84 | }; |
84 | #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) | 85 | #define TCF_META_ID_MAX (__TCF_META_ID_MAX - 1) |
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index a778ee024590..e64f4c67d0ef 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -105,6 +105,7 @@ enum { | |||
105 | #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ | 105 | #define TCP_COOKIE_TRANSACTIONS 15 /* TCP Cookie Transactions */ |
106 | #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ | 106 | #define TCP_THIN_LINEAR_TIMEOUTS 16 /* Use linear timeouts for thin streams*/ |
107 | #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ | 107 | #define TCP_THIN_DUPACK 17 /* Fast retrans. after 1 dupack */ |
108 | #define TCP_USER_TIMEOUT 18 /* How long for loss retry before timeout */ | ||
108 | 109 | ||
109 | /* for TCP_INFO socket option */ | 110 | /* for TCP_INFO socket option */ |
110 | #define TCPI_OPT_TIMESTAMPS 1 | 111 | #define TCPI_OPT_TIMESTAMPS 1 |
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h index a8cc4e13434c..c90696544176 100644 --- a/include/linux/thread_info.h +++ b/include/linux/thread_info.h | |||
@@ -23,12 +23,12 @@ struct restart_block { | |||
23 | }; | 23 | }; |
24 | /* For futex_wait and futex_wait_requeue_pi */ | 24 | /* For futex_wait and futex_wait_requeue_pi */ |
25 | struct { | 25 | struct { |
26 | u32 *uaddr; | 26 | u32 __user *uaddr; |
27 | u32 val; | 27 | u32 val; |
28 | u32 flags; | 28 | u32 flags; |
29 | u32 bitset; | 29 | u32 bitset; |
30 | u64 time; | 30 | u64 time; |
31 | u32 *uaddr2; | 31 | u32 __user *uaddr2; |
32 | } futex; | 32 | } futex; |
33 | /* For nanosleep */ | 33 | /* For nanosleep */ |
34 | struct { | 34 | struct { |
diff --git a/include/linux/tipc.h b/include/linux/tipc.h index 181c8d0e6f73..d10614b29d59 100644 --- a/include/linux/tipc.h +++ b/include/linux/tipc.h | |||
@@ -127,17 +127,23 @@ static inline unsigned int tipc_node(__u32 addr) | |||
127 | * TIPC topology subscription service definitions | 127 | * TIPC topology subscription service definitions |
128 | */ | 128 | */ |
129 | 129 | ||
130 | #define TIPC_SUB_SERVICE 0x00 /* Filter for service availability */ | 130 | #define TIPC_SUB_PORTS 0x01 /* filter for port availability */ |
131 | #define TIPC_SUB_PORTS 0x01 /* Filter for port availability */ | 131 | #define TIPC_SUB_SERVICE 0x02 /* filter for service availability */ |
132 | #define TIPC_SUB_CANCEL 0x04 /* Cancel a subscription */ | 132 | #define TIPC_SUB_CANCEL 0x04 /* cancel a subscription */ |
133 | #if 0 | ||
134 | /* The following filter options are not currently implemented */ | ||
135 | #define TIPC_SUB_NO_BIND_EVTS 0x04 /* filter out "publish" events */ | ||
136 | #define TIPC_SUB_NO_UNBIND_EVTS 0x08 /* filter out "withdraw" events */ | ||
137 | #define TIPC_SUB_SINGLE_EVT 0x10 /* expire after first event */ | ||
138 | #endif | ||
133 | 139 | ||
134 | #define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ | 140 | #define TIPC_WAIT_FOREVER ~0 /* timeout for permanent subscription */ |
135 | 141 | ||
136 | struct tipc_subscr { | 142 | struct tipc_subscr { |
137 | struct tipc_name_seq seq; /* NBO. Name sequence of interest */ | 143 | struct tipc_name_seq seq; /* name sequence of interest */ |
138 | __u32 timeout; /* NBO. Subscription duration (in ms) */ | 144 | __u32 timeout; /* subscription duration (in ms) */ |
139 | __u32 filter; /* NBO. Bitmask of filter options */ | 145 | __u32 filter; /* bitmask of filter options */ |
140 | char usr_handle[8]; /* Opaque. Available for subscriber use */ | 146 | char usr_handle[8]; /* available for subscriber use */ |
141 | }; | 147 | }; |
142 | 148 | ||
143 | #define TIPC_PUBLISHED 1 /* publication event */ | 149 | #define TIPC_PUBLISHED 1 /* publication event */ |
@@ -145,11 +151,11 @@ struct tipc_subscr { | |||
145 | #define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ | 151 | #define TIPC_SUBSCR_TIMEOUT 3 /* subscription timeout event */ |
146 | 152 | ||
147 | struct tipc_event { | 153 | struct tipc_event { |
148 | __u32 event; /* NBO. Event type, as defined above */ | 154 | __u32 event; /* event type */ |
149 | __u32 found_lower; /* NBO. Matching name seq instances */ | 155 | __u32 found_lower; /* matching name seq instances */ |
150 | __u32 found_upper; /* " " " " " */ | 156 | __u32 found_upper; /* " " " " */ |
151 | struct tipc_portid port; /* NBO. Associated port */ | 157 | struct tipc_portid port; /* associated port */ |
152 | struct tipc_subscr s; /* Original, associated subscription */ | 158 | struct tipc_subscr s; /* associated subscription */ |
153 | }; | 159 | }; |
154 | 160 | ||
155 | /* | 161 | /* |
diff --git a/include/linux/topology.h b/include/linux/topology.h index 64e084ff5e5c..b91a40e847d2 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -201,6 +201,12 @@ int arch_update_cpu_topology(void); | |||
201 | .balance_interval = 64, \ | 201 | .balance_interval = 64, \ |
202 | } | 202 | } |
203 | 203 | ||
204 | #ifdef CONFIG_SCHED_BOOK | ||
205 | #ifndef SD_BOOK_INIT | ||
206 | #error Please define an appropriate SD_BOOK_INIT in include/asm/topology.h!!! | ||
207 | #endif | ||
208 | #endif /* CONFIG_SCHED_BOOK */ | ||
209 | |||
204 | #ifdef CONFIG_NUMA | 210 | #ifdef CONFIG_NUMA |
205 | #ifndef SD_NODE_INIT | 211 | #ifndef SD_NODE_INIT |
206 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! | 212 | #error Please define an appropriate SD_NODE_INIT in include/asm/topology.h!!! |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index 103d1b61aacb..a4a90b6726ce 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/rcupdate.h> | 19 | #include <linux/rcupdate.h> |
20 | #include <linux/jump_label.h> | ||
20 | 21 | ||
21 | struct module; | 22 | struct module; |
22 | struct tracepoint; | 23 | struct tracepoint; |
@@ -145,7 +146,9 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, | |||
145 | extern struct tracepoint __tracepoint_##name; \ | 146 | extern struct tracepoint __tracepoint_##name; \ |
146 | static inline void trace_##name(proto) \ | 147 | static inline void trace_##name(proto) \ |
147 | { \ | 148 | { \ |
148 | if (unlikely(__tracepoint_##name.state)) \ | 149 | JUMP_LABEL(&__tracepoint_##name.state, do_trace); \ |
150 | return; \ | ||
151 | do_trace: \ | ||
149 | __DO_TRACE(&__tracepoint_##name, \ | 152 | __DO_TRACE(&__tracepoint_##name, \ |
150 | TP_PROTO(data_proto), \ | 153 | TP_PROTO(data_proto), \ |
151 | TP_ARGS(data_args)); \ | 154 | TP_ARGS(data_args)); \ |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 1437da3ddc62..86be0cdeb11b 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -256,6 +256,7 @@ struct tty_operations; | |||
256 | struct tty_struct { | 256 | struct tty_struct { |
257 | int magic; | 257 | int magic; |
258 | struct kref kref; | 258 | struct kref kref; |
259 | struct device *dev; | ||
259 | struct tty_driver *driver; | 260 | struct tty_driver *driver; |
260 | const struct tty_operations *ops; | 261 | const struct tty_operations *ops; |
261 | int index; | 262 | int index; |
@@ -329,6 +330,13 @@ struct tty_struct { | |||
329 | struct tty_port *port; | 330 | struct tty_port *port; |
330 | }; | 331 | }; |
331 | 332 | ||
333 | /* Each of a tty's open files has private_data pointing to tty_file_private */ | ||
334 | struct tty_file_private { | ||
335 | struct tty_struct *tty; | ||
336 | struct file *file; | ||
337 | struct list_head list; | ||
338 | }; | ||
339 | |||
332 | /* tty magic number */ | 340 | /* tty magic number */ |
333 | #define TTY_MAGIC 0x5401 | 341 | #define TTY_MAGIC 0x5401 |
334 | 342 | ||
@@ -458,6 +466,7 @@ extern void proc_clear_tty(struct task_struct *p); | |||
458 | extern struct tty_struct *get_current_tty(void); | 466 | extern struct tty_struct *get_current_tty(void); |
459 | extern void tty_default_fops(struct file_operations *fops); | 467 | extern void tty_default_fops(struct file_operations *fops); |
460 | extern struct tty_struct *alloc_tty_struct(void); | 468 | extern struct tty_struct *alloc_tty_struct(void); |
469 | extern int tty_add_file(struct tty_struct *tty, struct file *file); | ||
461 | extern void free_tty_struct(struct tty_struct *tty); | 470 | extern void free_tty_struct(struct tty_struct *tty); |
462 | extern void initialize_tty_struct(struct tty_struct *tty, | 471 | extern void initialize_tty_struct(struct tty_struct *tty, |
463 | struct tty_driver *driver, int idx); | 472 | struct tty_driver *driver, int idx); |
@@ -470,6 +479,7 @@ extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty); | |||
470 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); | 479 | extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty); |
471 | 480 | ||
472 | extern struct mutex tty_mutex; | 481 | extern struct mutex tty_mutex; |
482 | extern spinlock_t tty_files_lock; | ||
473 | 483 | ||
474 | extern void tty_write_unlock(struct tty_struct *tty); | 484 | extern void tty_write_unlock(struct tty_struct *tty); |
475 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); | 485 | extern int tty_write_lock(struct tty_struct *tty, int ndelay); |
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h index b08677982525..db2d227694da 100644 --- a/include/linux/tty_driver.h +++ b/include/linux/tty_driver.h | |||
@@ -224,6 +224,12 @@ | |||
224 | * unless the tty also has a valid tty->termiox pointer. | 224 | * unless the tty also has a valid tty->termiox pointer. |
225 | * | 225 | * |
226 | * Optional: Called under the termios lock | 226 | * Optional: Called under the termios lock |
227 | * | ||
228 | * int (*get_icount)(struct tty_struct *tty, struct serial_icounter *icount); | ||
229 | * | ||
230 | * Called when the device receives a TIOCGICOUNT ioctl. Passed a kernel | ||
231 | * structure to complete. This method is optional and will only be called | ||
232 | * if provided (otherwise EINVAL will be returned). | ||
227 | */ | 233 | */ |
228 | 234 | ||
229 | #include <linux/fs.h> | 235 | #include <linux/fs.h> |
@@ -232,6 +238,7 @@ | |||
232 | 238 | ||
233 | struct tty_struct; | 239 | struct tty_struct; |
234 | struct tty_driver; | 240 | struct tty_driver; |
241 | struct serial_icounter_struct; | ||
235 | 242 | ||
236 | struct tty_operations { | 243 | struct tty_operations { |
237 | struct tty_struct * (*lookup)(struct tty_driver *driver, | 244 | struct tty_struct * (*lookup)(struct tty_driver *driver, |
@@ -268,6 +275,8 @@ struct tty_operations { | |||
268 | unsigned int set, unsigned int clear); | 275 | unsigned int set, unsigned int clear); |
269 | int (*resize)(struct tty_struct *tty, struct winsize *ws); | 276 | int (*resize)(struct tty_struct *tty, struct winsize *ws); |
270 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); | 277 | int (*set_termiox)(struct tty_struct *tty, struct termiox *tnew); |
278 | int (*get_icount)(struct tty_struct *tty, | ||
279 | struct serial_icounter_struct *icount); | ||
271 | #ifdef CONFIG_CONSOLE_POLL | 280 | #ifdef CONFIG_CONSOLE_POLL |
272 | int (*poll_init)(struct tty_driver *driver, int line, char *options); | 281 | int (*poll_init)(struct tty_driver *driver, int line, char *options); |
273 | int (*poll_get_char)(struct tty_driver *driver, int line); | 282 | int (*poll_get_char)(struct tty_driver *driver, int line); |
diff --git a/include/linux/types.h b/include/linux/types.h index 01a082f56ef4..357dbc19606f 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
@@ -121,7 +121,15 @@ typedef __u64 u_int64_t; | |||
121 | typedef __s64 int64_t; | 121 | typedef __s64 int64_t; |
122 | #endif | 122 | #endif |
123 | 123 | ||
124 | /* this is a special 64bit data type that is 8-byte aligned */ | 124 | /* |
125 | * aligned_u64 should be used in defining kernel<->userspace ABIs to avoid | ||
126 | * common 32/64-bit compat problems. | ||
127 | * 64-bit values align to 4-byte boundaries on x86_32 (and possibly other | ||
128 | * architectures) and to 8-byte boundaries on 64-bit architetures. The new | ||
129 | * aligned_64 type enforces 8-byte alignment so that structs containing | ||
130 | * aligned_64 values have the same alignment on 32-bit and 64-bit architectures. | ||
131 | * No conversions are necessary between 32-bit user-space and a 64-bit kernel. | ||
132 | */ | ||
125 | #define aligned_u64 __u64 __attribute__((aligned(8))) | 133 | #define aligned_u64 __u64 __attribute__((aligned(8))) |
126 | #define aligned_be64 __be64 __attribute__((aligned(8))) | 134 | #define aligned_be64 __be64 __attribute__((aligned(8))) |
127 | #define aligned_le64 __le64 __attribute__((aligned(8))) | 135 | #define aligned_le64 __le64 __attribute__((aligned(8))) |
@@ -178,6 +186,11 @@ typedef __u64 __bitwise __be64; | |||
178 | typedef __u16 __bitwise __sum16; | 186 | typedef __u16 __bitwise __sum16; |
179 | typedef __u32 __bitwise __wsum; | 187 | typedef __u32 __bitwise __wsum; |
180 | 188 | ||
189 | /* this is a special 64bit data type that is 8-byte aligned */ | ||
190 | #define __aligned_u64 __u64 __attribute__((aligned(8))) | ||
191 | #define __aligned_be64 __be64 __attribute__((aligned(8))) | ||
192 | #define __aligned_le64 __le64 __attribute__((aligned(8))) | ||
193 | |||
181 | #ifdef __KERNEL__ | 194 | #ifdef __KERNEL__ |
182 | typedef unsigned __bitwise__ gfp_t; | 195 | typedef unsigned __bitwise__ gfp_t; |
183 | typedef unsigned __bitwise__ fmode_t; | 196 | typedef unsigned __bitwise__ fmode_t; |
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h index 5dcc9ff72f69..d6188e5a52df 100644 --- a/include/linux/uio_driver.h +++ b/include/linux/uio_driver.h | |||
@@ -108,7 +108,7 @@ extern void uio_event_notify(struct uio_info *info); | |||
108 | 108 | ||
109 | /* defines for uio_info->irq */ | 109 | /* defines for uio_info->irq */ |
110 | #define UIO_IRQ_CUSTOM -1 | 110 | #define UIO_IRQ_CUSTOM -1 |
111 | #define UIO_IRQ_NONE -2 | 111 | #define UIO_IRQ_NONE 0 |
112 | 112 | ||
113 | /* defines for uio_mem->memtype */ | 113 | /* defines for uio_mem->memtype */ |
114 | #define UIO_MEM_NONE 0 | 114 | #define UIO_MEM_NONE 0 |
diff --git a/include/linux/usb/cdc.h b/include/linux/usb/cdc.h index c117a68d04a7..5e86dc771da4 100644 --- a/include/linux/usb/cdc.h +++ b/include/linux/usb/cdc.h | |||
@@ -32,6 +32,8 @@ | |||
32 | 32 | ||
33 | #define USB_CDC_PROTO_EEM 7 | 33 | #define USB_CDC_PROTO_EEM 7 |
34 | 34 | ||
35 | #define USB_CDC_NCM_PROTO_NTB 1 | ||
36 | |||
35 | /*-------------------------------------------------------------------------*/ | 37 | /*-------------------------------------------------------------------------*/ |
36 | 38 | ||
37 | /* | 39 | /* |
@@ -274,13 +276,13 @@ struct usb_cdc_notification { | |||
274 | /* | 276 | /* |
275 | * Class Specific structures and constants | 277 | * Class Specific structures and constants |
276 | * | 278 | * |
277 | * CDC NCM parameter structure, CDC NCM subclass 6.2.1 | 279 | * CDC NCM NTB parameters structure, CDC NCM subclass 6.2.1 |
278 | * | 280 | * |
279 | */ | 281 | */ |
280 | 282 | ||
281 | struct usb_cdc_ncm_ntb_parameter { | 283 | struct usb_cdc_ncm_ntb_parameters { |
282 | __le16 wLength; | 284 | __le16 wLength; |
283 | __le16 bmNtbFormatSupported; | 285 | __le16 bmNtbFormatsSupported; |
284 | __le32 dwNtbInMaxSize; | 286 | __le32 dwNtbInMaxSize; |
285 | __le16 wNdpInDivisor; | 287 | __le16 wNdpInDivisor; |
286 | __le16 wNdpInPayloadRemainder; | 288 | __le16 wNdpInPayloadRemainder; |
@@ -297,8 +299,8 @@ struct usb_cdc_ncm_ntb_parameter { | |||
297 | * CDC NCM transfer headers, CDC NCM subclass 3.2 | 299 | * CDC NCM transfer headers, CDC NCM subclass 3.2 |
298 | */ | 300 | */ |
299 | 301 | ||
300 | #define NCM_NTH16_SIGN 0x484D434E /* NCMH */ | 302 | #define USB_CDC_NCM_NTH16_SIGN 0x484D434E /* NCMH */ |
301 | #define NCM_NTH32_SIGN 0x686D636E /* ncmh */ | 303 | #define USB_CDC_NCM_NTH32_SIGN 0x686D636E /* ncmh */ |
302 | 304 | ||
303 | struct usb_cdc_ncm_nth16 { | 305 | struct usb_cdc_ncm_nth16 { |
304 | __le32 dwSignature; | 306 | __le32 dwSignature; |
@@ -320,25 +322,78 @@ struct usb_cdc_ncm_nth32 { | |||
320 | * CDC NCM datagram pointers, CDC NCM subclass 3.3 | 322 | * CDC NCM datagram pointers, CDC NCM subclass 3.3 |
321 | */ | 323 | */ |
322 | 324 | ||
323 | #define NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ | 325 | #define USB_CDC_NCM_NDP16_CRC_SIGN 0x314D434E /* NCM1 */ |
324 | #define NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ | 326 | #define USB_CDC_NCM_NDP16_NOCRC_SIGN 0x304D434E /* NCM0 */ |
325 | #define NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ | 327 | #define USB_CDC_NCM_NDP32_CRC_SIGN 0x316D636E /* ncm1 */ |
326 | #define NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ | 328 | #define USB_CDC_NCM_NDP32_NOCRC_SIGN 0x306D636E /* ncm0 */ |
329 | |||
330 | /* 16-bit NCM Datagram Pointer Entry */ | ||
331 | struct usb_cdc_ncm_dpe16 { | ||
332 | __le16 wDatagramIndex; | ||
333 | __le16 wDatagramLength; | ||
334 | } __attribute__((__packed__)); | ||
327 | 335 | ||
336 | /* 16-bit NCM Datagram Pointer Table */ | ||
328 | struct usb_cdc_ncm_ndp16 { | 337 | struct usb_cdc_ncm_ndp16 { |
329 | __le32 dwSignature; | 338 | __le32 dwSignature; |
330 | __le16 wLength; | 339 | __le16 wLength; |
331 | __le16 wNextFpIndex; | 340 | __le16 wNextFpIndex; |
332 | __u8 data[0]; | 341 | struct usb_cdc_ncm_dpe16 dpe16[0]; |
333 | } __attribute__ ((packed)); | 342 | } __attribute__ ((packed)); |
334 | 343 | ||
344 | /* 32-bit NCM Datagram Pointer Entry */ | ||
345 | struct usb_cdc_ncm_dpe32 { | ||
346 | __le32 dwDatagramIndex; | ||
347 | __le32 dwDatagramLength; | ||
348 | } __attribute__((__packed__)); | ||
349 | |||
350 | /* 32-bit NCM Datagram Pointer Table */ | ||
335 | struct usb_cdc_ncm_ndp32 { | 351 | struct usb_cdc_ncm_ndp32 { |
336 | __le32 dwSignature; | 352 | __le32 dwSignature; |
337 | __le16 wLength; | 353 | __le16 wLength; |
338 | __le16 wReserved6; | 354 | __le16 wReserved6; |
339 | __le32 dwNextFpIndex; | 355 | __le32 dwNextNdpIndex; |
340 | __le32 dwReserved12; | 356 | __le32 dwReserved12; |
341 | __u8 data[0]; | 357 | struct usb_cdc_ncm_dpe32 dpe32[0]; |
342 | } __attribute__ ((packed)); | 358 | } __attribute__ ((packed)); |
343 | 359 | ||
360 | /* CDC NCM subclass 3.2.1 and 3.2.2 */ | ||
361 | #define USB_CDC_NCM_NDP16_INDEX_MIN 0x000C | ||
362 | #define USB_CDC_NCM_NDP32_INDEX_MIN 0x0010 | ||
363 | |||
364 | /* CDC NCM subclass 3.3.3 Datagram Formatting */ | ||
365 | #define USB_CDC_NCM_DATAGRAM_FORMAT_CRC 0x30 | ||
366 | #define USB_CDC_NCM_DATAGRAM_FORMAT_NOCRC 0X31 | ||
367 | |||
368 | /* CDC NCM subclass 4.2 NCM Communications Interface Protocol Code */ | ||
369 | #define USB_CDC_NCM_PROTO_CODE_NO_ENCAP_COMMANDS 0x00 | ||
370 | #define USB_CDC_NCM_PROTO_CODE_EXTERN_PROTO 0xFE | ||
371 | |||
372 | /* CDC NCM subclass 5.2.1 NCM Functional Descriptor, bmNetworkCapabilities */ | ||
373 | #define USB_CDC_NCM_NCAP_ETH_FILTER (1 << 0) | ||
374 | #define USB_CDC_NCM_NCAP_NET_ADDRESS (1 << 1) | ||
375 | #define USB_CDC_NCM_NCAP_ENCAP_COMMAND (1 << 2) | ||
376 | #define USB_CDC_NCM_NCAP_MAX_DATAGRAM_SIZE (1 << 3) | ||
377 | #define USB_CDC_NCM_NCAP_CRC_MODE (1 << 4) | ||
378 | |||
379 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ | ||
380 | #define USB_CDC_NCM_NTB16_SUPPORTED (1 << 0) | ||
381 | #define USB_CDC_NCM_NTB32_SUPPORTED (1 << 1) | ||
382 | |||
383 | /* CDC NCM subclass Table 6-3: NTB Parameter Structure */ | ||
384 | #define USB_CDC_NCM_NDP_ALIGN_MIN_SIZE 0x04 | ||
385 | #define USB_CDC_NCM_NTB_MAX_LENGTH 0x1C | ||
386 | |||
387 | /* CDC NCM subclass 6.2.5 SetNtbFormat */ | ||
388 | #define USB_CDC_NCM_NTB16_FORMAT 0x00 | ||
389 | #define USB_CDC_NCM_NTB32_FORMAT 0x01 | ||
390 | |||
391 | /* CDC NCM subclass 6.2.7 SetNtbInputSize */ | ||
392 | #define USB_CDC_NCM_NTB_MIN_IN_SIZE 2048 | ||
393 | #define USB_CDC_NCM_NTB_MIN_OUT_SIZE 2048 | ||
394 | |||
395 | /* CDC NCM subclass 6.2.11 SetCrcMode */ | ||
396 | #define USB_CDC_NCM_CRC_NOT_APPENDED 0x00 | ||
397 | #define USB_CDC_NCM_CRC_APPENDED 0x01 | ||
398 | |||
344 | #endif /* __LINUX_USB_CDC_H */ | 399 | #endif /* __LINUX_USB_CDC_H */ |
diff --git a/include/linux/usb/ch9.h b/include/linux/usb/ch9.h index da2ed77d3e8d..f917bbbc8901 100644 --- a/include/linux/usb/ch9.h +++ b/include/linux/usb/ch9.h | |||
@@ -123,8 +123,23 @@ | |||
123 | #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ | 123 | #define USB_DEVICE_A_ALT_HNP_SUPPORT 5 /* (otg) other RH port does */ |
124 | #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ | 124 | #define USB_DEVICE_DEBUG_MODE 6 /* (special devices only) */ |
125 | 125 | ||
126 | /* | ||
127 | * New Feature Selectors as added by USB 3.0 | ||
128 | * See USB 3.0 spec Table 9-6 | ||
129 | */ | ||
130 | #define USB_DEVICE_U1_ENABLE 48 /* dev may initiate U1 transition */ | ||
131 | #define USB_DEVICE_U2_ENABLE 49 /* dev may initiate U2 transition */ | ||
132 | #define USB_DEVICE_LTM_ENABLE 50 /* dev may send LTM */ | ||
133 | #define USB_INTRF_FUNC_SUSPEND 0 /* function suspend */ | ||
134 | |||
135 | #define USB_INTR_FUNC_SUSPEND_OPT_MASK 0xFF00 | ||
136 | |||
126 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ | 137 | #define USB_ENDPOINT_HALT 0 /* IN/OUT will STALL */ |
127 | 138 | ||
139 | /* Bit array elements as returned by the USB_REQ_GET_STATUS request. */ | ||
140 | #define USB_DEV_STAT_U1_ENABLED 2 /* transition into U1 state */ | ||
141 | #define USB_DEV_STAT_U2_ENABLED 3 /* transition into U2 state */ | ||
142 | #define USB_DEV_STAT_LTM_ENABLED 4 /* Latency tolerance messages */ | ||
128 | 143 | ||
129 | /** | 144 | /** |
130 | * struct usb_ctrlrequest - SETUP data for a USB device control request | 145 | * struct usb_ctrlrequest - SETUP data for a USB device control request |
@@ -675,6 +690,7 @@ struct usb_bos_descriptor { | |||
675 | __u8 bNumDeviceCaps; | 690 | __u8 bNumDeviceCaps; |
676 | } __attribute__((packed)); | 691 | } __attribute__((packed)); |
677 | 692 | ||
693 | #define USB_DT_BOS_SIZE 5 | ||
678 | /*-------------------------------------------------------------------------*/ | 694 | /*-------------------------------------------------------------------------*/ |
679 | 695 | ||
680 | /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ | 696 | /* USB_DT_DEVICE_CAPABILITY: grouped with BOS */ |
@@ -712,16 +728,56 @@ struct usb_wireless_cap_descriptor { /* Ultra Wide Band */ | |||
712 | __u8 bReserved; | 728 | __u8 bReserved; |
713 | } __attribute__((packed)); | 729 | } __attribute__((packed)); |
714 | 730 | ||
731 | /* USB 2.0 Extension descriptor */ | ||
715 | #define USB_CAP_TYPE_EXT 2 | 732 | #define USB_CAP_TYPE_EXT 2 |
716 | 733 | ||
717 | struct usb_ext_cap_descriptor { /* Link Power Management */ | 734 | struct usb_ext_cap_descriptor { /* Link Power Management */ |
718 | __u8 bLength; | 735 | __u8 bLength; |
719 | __u8 bDescriptorType; | 736 | __u8 bDescriptorType; |
720 | __u8 bDevCapabilityType; | 737 | __u8 bDevCapabilityType; |
721 | __u8 bmAttributes; | 738 | __le32 bmAttributes; |
722 | #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ | 739 | #define USB_LPM_SUPPORT (1 << 1) /* supports LPM */ |
723 | } __attribute__((packed)); | 740 | } __attribute__((packed)); |
724 | 741 | ||
742 | #define USB_DT_USB_EXT_CAP_SIZE 7 | ||
743 | |||
744 | /* | ||
745 | * SuperSpeed USB Capability descriptor: Defines the set of SuperSpeed USB | ||
746 | * specific device level capabilities | ||
747 | */ | ||
748 | #define USB_SS_CAP_TYPE 3 | ||
749 | struct usb_ss_cap_descriptor { /* Link Power Management */ | ||
750 | __u8 bLength; | ||
751 | __u8 bDescriptorType; | ||
752 | __u8 bDevCapabilityType; | ||
753 | __u8 bmAttributes; | ||
754 | #define USB_LTM_SUPPORT (1 << 1) /* supports LTM */ | ||
755 | __le16 wSpeedSupported; | ||
756 | #define USB_LOW_SPEED_OPERATION (1) /* Low speed operation */ | ||
757 | #define USB_FULL_SPEED_OPERATION (1 << 1) /* Full speed operation */ | ||
758 | #define USB_HIGH_SPEED_OPERATION (1 << 2) /* High speed operation */ | ||
759 | #define USB_5GBPS_OPERATION (1 << 3) /* Operation at 5Gbps */ | ||
760 | __u8 bFunctionalitySupport; | ||
761 | __u8 bU1devExitLat; | ||
762 | __le16 bU2DevExitLat; | ||
763 | } __attribute__((packed)); | ||
764 | |||
765 | #define USB_DT_USB_SS_CAP_SIZE 10 | ||
766 | |||
767 | /* | ||
768 | * Container ID Capability descriptor: Defines the instance unique ID used to | ||
769 | * identify the instance across all operating modes | ||
770 | */ | ||
771 | #define CONTAINER_ID_TYPE 4 | ||
772 | struct usb_ss_container_id_descriptor { | ||
773 | __u8 bLength; | ||
774 | __u8 bDescriptorType; | ||
775 | __u8 bDevCapabilityType; | ||
776 | __u8 bReserved; | ||
777 | __u8 ContainerID[16]; /* 128-bit number */ | ||
778 | } __attribute__((packed)); | ||
779 | |||
780 | #define USB_DT_USB_SS_CONTN_ID_SIZE 20 | ||
725 | /*-------------------------------------------------------------------------*/ | 781 | /*-------------------------------------------------------------------------*/ |
726 | 782 | ||
727 | /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with | 783 | /* USB_DT_WIRELESS_ENDPOINT_COMP: companion descriptor associated with |
@@ -808,4 +864,14 @@ enum usb_device_state { | |||
808 | */ | 864 | */ |
809 | }; | 865 | }; |
810 | 866 | ||
867 | /*-------------------------------------------------------------------------*/ | ||
868 | |||
869 | /* | ||
870 | * As per USB compliance update, a device that is actively drawing | ||
871 | * more than 100mA from USB must report itself as bus-powered in | ||
872 | * the GetStatus(DEVICE) call. | ||
873 | * http://compliance.usb.org/index.asp?UpdateFile=Electrical&Format=Standard#34 | ||
874 | */ | ||
875 | #define USB_SELF_POWER_VBUS_MAX_DRAW 100 | ||
876 | |||
811 | #endif /* __LINUX_USB_CH9_H */ | 877 | #endif /* __LINUX_USB_CH9_H */ |
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h index 890bc1472190..3d29a7dcac2d 100644 --- a/include/linux/usb/composite.h +++ b/include/linux/usb/composite.h | |||
@@ -161,8 +161,6 @@ ep_choose(struct usb_gadget *g, struct usb_endpoint_descriptor *hs, | |||
161 | * and by language IDs provided in control requests. | 161 | * and by language IDs provided in control requests. |
162 | * @descriptors: Table of descriptors preceding all function descriptors. | 162 | * @descriptors: Table of descriptors preceding all function descriptors. |
163 | * Examples include OTG and vendor-specific descriptors. | 163 | * Examples include OTG and vendor-specific descriptors. |
164 | * @bind: Called from @usb_add_config() to allocate resources unique to this | ||
165 | * configuration and to call @usb_add_function() for each function used. | ||
166 | * @unbind: Reverses @bind; called as a side effect of unregistering the | 164 | * @unbind: Reverses @bind; called as a side effect of unregistering the |
167 | * driver which added this configuration. | 165 | * driver which added this configuration. |
168 | * @setup: Used to delegate control requests that aren't handled by standard | 166 | * @setup: Used to delegate control requests that aren't handled by standard |
@@ -207,8 +205,7 @@ struct usb_configuration { | |||
207 | * we can't restructure things to avoid mismatching... | 205 | * we can't restructure things to avoid mismatching... |
208 | */ | 206 | */ |
209 | 207 | ||
210 | /* configuration management: bind/unbind */ | 208 | /* configuration management: unbind/setup */ |
211 | int (*bind)(struct usb_configuration *); | ||
212 | void (*unbind)(struct usb_configuration *); | 209 | void (*unbind)(struct usb_configuration *); |
213 | int (*setup)(struct usb_configuration *, | 210 | int (*setup)(struct usb_configuration *, |
214 | const struct usb_ctrlrequest *); | 211 | const struct usb_ctrlrequest *); |
@@ -232,21 +229,26 @@ struct usb_configuration { | |||
232 | }; | 229 | }; |
233 | 230 | ||
234 | int usb_add_config(struct usb_composite_dev *, | 231 | int usb_add_config(struct usb_composite_dev *, |
235 | struct usb_configuration *); | 232 | struct usb_configuration *, |
233 | int (*)(struct usb_configuration *)); | ||
236 | 234 | ||
237 | /** | 235 | /** |
238 | * struct usb_composite_driver - groups configurations into a gadget | 236 | * struct usb_composite_driver - groups configurations into a gadget |
239 | * @name: For diagnostics, identifies the driver. | 237 | * @name: For diagnostics, identifies the driver. |
238 | * @iProduct: Used as iProduct override if @dev->iProduct is not set. | ||
239 | * If NULL value of @name is taken. | ||
240 | * @iManufacturer: Used as iManufacturer override if @dev->iManufacturer is | ||
241 | * not set. If NULL a default "<system> <release> with <udc>" value | ||
242 | * will be used. | ||
240 | * @dev: Template descriptor for the device, including default device | 243 | * @dev: Template descriptor for the device, including default device |
241 | * identifiers. | 244 | * identifiers. |
242 | * @strings: tables of strings, keyed by identifiers assigned during bind() | 245 | * @strings: tables of strings, keyed by identifiers assigned during bind() |
243 | * and language IDs provided in control requests | 246 | * and language IDs provided in control requests |
244 | * @bind: (REQUIRED) Used to allocate resources that are shared across the | 247 | * @needs_serial: set to 1 if the gadget needs userspace to provide |
245 | * whole device, such as string IDs, and add its configurations using | 248 | * a serial number. If one is not provided, warning will be printed. |
246 | * @usb_add_config(). This may fail by returning a negative errno | 249 | * @unbind: Reverses bind; called as a side effect of unregistering |
247 | * value; it should return zero on successful initialization. | ||
248 | * @unbind: Reverses @bind(); called as a side effect of unregistering | ||
249 | * this driver. | 250 | * this driver. |
251 | * @disconnect: optional driver disconnect method | ||
250 | * @suspend: Notifies when the host stops sending USB traffic, | 252 | * @suspend: Notifies when the host stops sending USB traffic, |
251 | * after function notifications | 253 | * after function notifications |
252 | * @resume: Notifies configuration when the host restarts USB traffic, | 254 | * @resume: Notifies configuration when the host restarts USB traffic, |
@@ -255,7 +257,7 @@ int usb_add_config(struct usb_composite_dev *, | |||
255 | * Devices default to reporting self powered operation. Devices which rely | 257 | * Devices default to reporting self powered operation. Devices which rely |
256 | * on bus powered operation should report this in their @bind() method. | 258 | * on bus powered operation should report this in their @bind() method. |
257 | * | 259 | * |
258 | * Before returning from @bind, various fields in the template descriptor | 260 | * Before returning from bind, various fields in the template descriptor |
259 | * may be overridden. These include the idVendor/idProduct/bcdDevice values | 261 | * may be overridden. These include the idVendor/idProduct/bcdDevice values |
260 | * normally to bind the appropriate host side driver, and the three strings | 262 | * normally to bind the appropriate host side driver, and the three strings |
261 | * (iManufacturer, iProduct, iSerialNumber) normally used to provide user | 263 | * (iManufacturer, iProduct, iSerialNumber) normally used to provide user |
@@ -265,15 +267,12 @@ int usb_add_config(struct usb_composite_dev *, | |||
265 | */ | 267 | */ |
266 | struct usb_composite_driver { | 268 | struct usb_composite_driver { |
267 | const char *name; | 269 | const char *name; |
270 | const char *iProduct; | ||
271 | const char *iManufacturer; | ||
268 | const struct usb_device_descriptor *dev; | 272 | const struct usb_device_descriptor *dev; |
269 | struct usb_gadget_strings **strings; | 273 | struct usb_gadget_strings **strings; |
274 | unsigned needs_serial:1; | ||
270 | 275 | ||
271 | /* REVISIT: bind() functions can be marked __init, which | ||
272 | * makes trouble for section mismatch analysis. See if | ||
273 | * we can't restructure things to avoid mismatching... | ||
274 | */ | ||
275 | |||
276 | int (*bind)(struct usb_composite_dev *); | ||
277 | int (*unbind)(struct usb_composite_dev *); | 276 | int (*unbind)(struct usb_composite_dev *); |
278 | 277 | ||
279 | void (*disconnect)(struct usb_composite_dev *); | 278 | void (*disconnect)(struct usb_composite_dev *); |
@@ -283,8 +282,9 @@ struct usb_composite_driver { | |||
283 | void (*resume)(struct usb_composite_dev *); | 282 | void (*resume)(struct usb_composite_dev *); |
284 | }; | 283 | }; |
285 | 284 | ||
286 | extern int usb_composite_register(struct usb_composite_driver *); | 285 | extern int usb_composite_probe(struct usb_composite_driver *driver, |
287 | extern void usb_composite_unregister(struct usb_composite_driver *); | 286 | int (*bind)(struct usb_composite_dev *cdev)); |
287 | extern void usb_composite_unregister(struct usb_composite_driver *driver); | ||
288 | 288 | ||
289 | 289 | ||
290 | /** | 290 | /** |
@@ -333,6 +333,9 @@ struct usb_composite_dev { | |||
333 | struct list_head configs; | 333 | struct list_head configs; |
334 | struct usb_composite_driver *driver; | 334 | struct usb_composite_driver *driver; |
335 | u8 next_string_id; | 335 | u8 next_string_id; |
336 | u8 manufacturer_override; | ||
337 | u8 product_override; | ||
338 | u8 serial_override; | ||
336 | 339 | ||
337 | /* the gadget driver won't enable the data pullup | 340 | /* the gadget driver won't enable the data pullup |
338 | * while the deactivation count is nonzero. | 341 | * while the deactivation count is nonzero. |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index d3ef42d7d2f0..006412ce2303 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
@@ -705,11 +705,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget) | |||
705 | * struct usb_gadget_driver - driver for usb 'slave' devices | 705 | * struct usb_gadget_driver - driver for usb 'slave' devices |
706 | * @function: String describing the gadget's function | 706 | * @function: String describing the gadget's function |
707 | * @speed: Highest speed the driver handles. | 707 | * @speed: Highest speed the driver handles. |
708 | * @bind: Invoked when the driver is bound to a gadget, usually | ||
709 | * after registering the driver. | ||
710 | * At that point, ep0 is fully initialized, and ep_list holds | ||
711 | * the currently-available endpoints. | ||
712 | * Called in a context that permits sleeping. | ||
713 | * @setup: Invoked for ep0 control requests that aren't handled by | 708 | * @setup: Invoked for ep0 control requests that aren't handled by |
714 | * the hardware level driver. Most calls must be handled by | 709 | * the hardware level driver. Most calls must be handled by |
715 | * the gadget driver, including descriptor and configuration | 710 | * the gadget driver, including descriptor and configuration |
@@ -774,7 +769,6 @@ static inline int usb_gadget_disconnect(struct usb_gadget *gadget) | |||
774 | struct usb_gadget_driver { | 769 | struct usb_gadget_driver { |
775 | char *function; | 770 | char *function; |
776 | enum usb_device_speed speed; | 771 | enum usb_device_speed speed; |
777 | int (*bind)(struct usb_gadget *); | ||
778 | void (*unbind)(struct usb_gadget *); | 772 | void (*unbind)(struct usb_gadget *); |
779 | int (*setup)(struct usb_gadget *, | 773 | int (*setup)(struct usb_gadget *, |
780 | const struct usb_ctrlrequest *); | 774 | const struct usb_ctrlrequest *); |
@@ -798,17 +792,19 @@ struct usb_gadget_driver { | |||
798 | */ | 792 | */ |
799 | 793 | ||
800 | /** | 794 | /** |
801 | * usb_gadget_register_driver - register a gadget driver | 795 | * usb_gadget_probe_driver - probe a gadget driver |
802 | * @driver:the driver being registered | 796 | * @driver: the driver being registered |
797 | * @bind: the driver's bind callback | ||
803 | * Context: can sleep | 798 | * Context: can sleep |
804 | * | 799 | * |
805 | * Call this in your gadget driver's module initialization function, | 800 | * Call this in your gadget driver's module initialization function, |
806 | * to tell the underlying usb controller driver about your driver. | 801 | * to tell the underlying usb controller driver about your driver. |
807 | * The driver's bind() function will be called to bind it to a | 802 | * The @bind() function will be called to bind it to a gadget before this |
808 | * gadget before this registration call returns. It's expected that | 803 | * registration call returns. It's expected that the @bind() function will |
809 | * the bind() functions will be in init sections. | 804 | * be in init sections. |
810 | */ | 805 | */ |
811 | int usb_gadget_register_driver(struct usb_gadget_driver *driver); | 806 | int usb_gadget_probe_driver(struct usb_gadget_driver *driver, |
807 | int (*bind)(struct usb_gadget *)); | ||
812 | 808 | ||
813 | /** | 809 | /** |
814 | * usb_gadget_unregister_driver - unregister a gadget driver | 810 | * usb_gadget_unregister_driver - unregister a gadget driver |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 3b571f1ffbb3..0b6e751ea0b1 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
@@ -329,6 +329,8 @@ extern int usb_hcd_submit_urb(struct urb *urb, gfp_t mem_flags); | |||
329 | extern int usb_hcd_unlink_urb(struct urb *urb, int status); | 329 | extern int usb_hcd_unlink_urb(struct urb *urb, int status); |
330 | extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, | 330 | extern void usb_hcd_giveback_urb(struct usb_hcd *hcd, struct urb *urb, |
331 | int status); | 331 | int status); |
332 | extern void unmap_urb_setup_for_dma(struct usb_hcd *, struct urb *); | ||
333 | extern void unmap_urb_for_dma(struct usb_hcd *, struct urb *); | ||
332 | extern void usb_hcd_flush_endpoint(struct usb_device *udev, | 334 | extern void usb_hcd_flush_endpoint(struct usb_device *udev, |
333 | struct usb_host_endpoint *ep); | 335 | struct usb_host_endpoint *ep); |
334 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, | 336 | extern void usb_hcd_disable_endpoint(struct usb_device *udev, |
diff --git a/include/linux/usb/intel_mid_otg.h b/include/linux/usb/intel_mid_otg.h new file mode 100644 index 000000000000..a0ccf795f362 --- /dev/null +++ b/include/linux/usb/intel_mid_otg.h | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * Intel MID (Langwell/Penwell) USB OTG Transceiver driver | ||
3 | * Copyright (C) 2008 - 2010, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef __INTEL_MID_OTG_H | ||
21 | #define __INTEL_MID_OTG_H | ||
22 | |||
23 | #include <linux/pm.h> | ||
24 | #include <linux/usb/otg.h> | ||
25 | #include <linux/notifier.h> | ||
26 | |||
27 | struct intel_mid_otg_xceiv; | ||
28 | |||
29 | /* This is a common data structure for Intel MID platform to | ||
30 | * save values of the OTG state machine */ | ||
31 | struct otg_hsm { | ||
32 | /* Input */ | ||
33 | int a_bus_resume; | ||
34 | int a_bus_suspend; | ||
35 | int a_conn; | ||
36 | int a_sess_vld; | ||
37 | int a_srp_det; | ||
38 | int a_vbus_vld; | ||
39 | int b_bus_resume; | ||
40 | int b_bus_suspend; | ||
41 | int b_conn; | ||
42 | int b_se0_srp; | ||
43 | int b_ssend_srp; | ||
44 | int b_sess_end; | ||
45 | int b_sess_vld; | ||
46 | int id; | ||
47 | /* id values */ | ||
48 | #define ID_B 0x05 | ||
49 | #define ID_A 0x04 | ||
50 | #define ID_ACA_C 0x03 | ||
51 | #define ID_ACA_B 0x02 | ||
52 | #define ID_ACA_A 0x01 | ||
53 | int power_up; | ||
54 | int adp_change; | ||
55 | int test_device; | ||
56 | |||
57 | /* Internal variables */ | ||
58 | int a_set_b_hnp_en; | ||
59 | int b_srp_done; | ||
60 | int b_hnp_enable; | ||
61 | int hnp_poll_enable; | ||
62 | |||
63 | /* Timeout indicator for timers */ | ||
64 | int a_wait_vrise_tmout; | ||
65 | int a_wait_bcon_tmout; | ||
66 | int a_aidl_bdis_tmout; | ||
67 | int a_bidl_adis_tmout; | ||
68 | int a_bidl_adis_tmr; | ||
69 | int a_wait_vfall_tmout; | ||
70 | int b_ase0_brst_tmout; | ||
71 | int b_bus_suspend_tmout; | ||
72 | int b_srp_init_tmout; | ||
73 | int b_srp_fail_tmout; | ||
74 | int b_srp_fail_tmr; | ||
75 | int b_adp_sense_tmout; | ||
76 | |||
77 | /* Informative variables */ | ||
78 | int a_bus_drop; | ||
79 | int a_bus_req; | ||
80 | int a_clr_err; | ||
81 | int b_bus_req; | ||
82 | int a_suspend_req; | ||
83 | int b_bus_suspend_vld; | ||
84 | |||
85 | /* Output */ | ||
86 | int drv_vbus; | ||
87 | int loc_conn; | ||
88 | int loc_sof; | ||
89 | |||
90 | /* Others */ | ||
91 | int vbus_srp_up; | ||
92 | }; | ||
93 | |||
94 | /* must provide ULPI access function to read/write registers implemented in | ||
95 | * ULPI address space */ | ||
96 | struct iotg_ulpi_access_ops { | ||
97 | int (*read)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 *val); | ||
98 | int (*write)(struct intel_mid_otg_xceiv *iotg, u8 reg, u8 val); | ||
99 | }; | ||
100 | |||
101 | #define OTG_A_DEVICE 0x0 | ||
102 | #define OTG_B_DEVICE 0x1 | ||
103 | |||
104 | /* | ||
105 | * the Intel MID (Langwell/Penwell) otg transceiver driver needs to interact | ||
106 | * with device and host drivers to implement the USB OTG related feature. More | ||
107 | * function members are added based on otg_transceiver data structure for this | ||
108 | * purpose. | ||
109 | */ | ||
110 | struct intel_mid_otg_xceiv { | ||
111 | struct otg_transceiver otg; | ||
112 | struct otg_hsm hsm; | ||
113 | |||
114 | /* base address */ | ||
115 | void __iomem *base; | ||
116 | |||
117 | /* ops to access ulpi */ | ||
118 | struct iotg_ulpi_access_ops ulpi_ops; | ||
119 | |||
120 | /* atomic notifier for interrupt context */ | ||
121 | struct atomic_notifier_head iotg_notifier; | ||
122 | |||
123 | /* start/stop USB Host function */ | ||
124 | int (*start_host)(struct intel_mid_otg_xceiv *iotg); | ||
125 | int (*stop_host)(struct intel_mid_otg_xceiv *iotg); | ||
126 | |||
127 | /* start/stop USB Peripheral function */ | ||
128 | int (*start_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
129 | int (*stop_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
130 | |||
131 | /* start/stop ADP sense/probe function */ | ||
132 | int (*set_adp_probe)(struct intel_mid_otg_xceiv *iotg, | ||
133 | bool enabled, int dev); | ||
134 | int (*set_adp_sense)(struct intel_mid_otg_xceiv *iotg, | ||
135 | bool enabled); | ||
136 | |||
137 | #ifdef CONFIG_PM | ||
138 | /* suspend/resume USB host function */ | ||
139 | int (*suspend_host)(struct intel_mid_otg_xceiv *iotg, | ||
140 | pm_message_t message); | ||
141 | int (*resume_host)(struct intel_mid_otg_xceiv *iotg); | ||
142 | |||
143 | int (*suspend_peripheral)(struct intel_mid_otg_xceiv *iotg, | ||
144 | pm_message_t message); | ||
145 | int (*resume_peripheral)(struct intel_mid_otg_xceiv *iotg); | ||
146 | #endif | ||
147 | |||
148 | }; | ||
149 | static inline | ||
150 | struct intel_mid_otg_xceiv *otg_to_mid_xceiv(struct otg_transceiver *otg) | ||
151 | { | ||
152 | return container_of(otg, struct intel_mid_otg_xceiv, otg); | ||
153 | } | ||
154 | |||
155 | #define MID_OTG_NOTIFY_CONNECT 0x0001 | ||
156 | #define MID_OTG_NOTIFY_DISCONN 0x0002 | ||
157 | #define MID_OTG_NOTIFY_HSUSPEND 0x0003 | ||
158 | #define MID_OTG_NOTIFY_HRESUME 0x0004 | ||
159 | #define MID_OTG_NOTIFY_CSUSPEND 0x0005 | ||
160 | #define MID_OTG_NOTIFY_CRESUME 0x0006 | ||
161 | #define MID_OTG_NOTIFY_HOSTADD 0x0007 | ||
162 | #define MID_OTG_NOTIFY_HOSTREMOVE 0x0008 | ||
163 | #define MID_OTG_NOTIFY_CLIENTADD 0x0009 | ||
164 | #define MID_OTG_NOTIFY_CLIENTREMOVE 0x000a | ||
165 | |||
166 | static inline int | ||
167 | intel_mid_otg_register_notifier(struct intel_mid_otg_xceiv *iotg, | ||
168 | struct notifier_block *nb) | ||
169 | { | ||
170 | return atomic_notifier_chain_register(&iotg->iotg_notifier, nb); | ||
171 | } | ||
172 | |||
173 | static inline void | ||
174 | intel_mid_otg_unregister_notifier(struct intel_mid_otg_xceiv *iotg, | ||
175 | struct notifier_block *nb) | ||
176 | { | ||
177 | atomic_notifier_chain_unregister(&iotg->iotg_notifier, nb); | ||
178 | } | ||
179 | |||
180 | #endif /* __INTEL_MID_OTG_H */ | ||
diff --git a/include/linux/usb/langwell_otg.h b/include/linux/usb/langwell_otg.h new file mode 100644 index 000000000000..51f17b16d312 --- /dev/null +++ b/include/linux/usb/langwell_otg.h | |||
@@ -0,0 +1,139 @@ | |||
1 | /* | ||
2 | * Intel Langwell USB OTG transceiver driver | ||
3 | * Copyright (C) 2008 - 2010, Intel Corporation. | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify it | ||
6 | * under the terms and conditions of the GNU General Public License, | ||
7 | * version 2, as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
12 | * more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License along with | ||
15 | * this program; if not, write to the Free Software Foundation, Inc., | ||
16 | * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | * | ||
18 | */ | ||
19 | |||
20 | #ifndef __LANGWELL_OTG_H | ||
21 | #define __LANGWELL_OTG_H | ||
22 | |||
23 | #include <linux/usb/intel_mid_otg.h> | ||
24 | |||
25 | #define CI_USBCMD 0x30 | ||
26 | # define USBCMD_RST BIT(1) | ||
27 | # define USBCMD_RS BIT(0) | ||
28 | #define CI_USBSTS 0x34 | ||
29 | # define USBSTS_SLI BIT(8) | ||
30 | # define USBSTS_URI BIT(6) | ||
31 | # define USBSTS_PCI BIT(2) | ||
32 | #define CI_PORTSC1 0x74 | ||
33 | # define PORTSC_PP BIT(12) | ||
34 | # define PORTSC_LS (BIT(11) | BIT(10)) | ||
35 | # define PORTSC_SUSP BIT(7) | ||
36 | # define PORTSC_CCS BIT(0) | ||
37 | #define CI_HOSTPC1 0xb4 | ||
38 | # define HOSTPC1_PHCD BIT(22) | ||
39 | #define CI_OTGSC 0xf4 | ||
40 | # define OTGSC_DPIE BIT(30) | ||
41 | # define OTGSC_1MSE BIT(29) | ||
42 | # define OTGSC_BSEIE BIT(28) | ||
43 | # define OTGSC_BSVIE BIT(27) | ||
44 | # define OTGSC_ASVIE BIT(26) | ||
45 | # define OTGSC_AVVIE BIT(25) | ||
46 | # define OTGSC_IDIE BIT(24) | ||
47 | # define OTGSC_DPIS BIT(22) | ||
48 | # define OTGSC_1MSS BIT(21) | ||
49 | # define OTGSC_BSEIS BIT(20) | ||
50 | # define OTGSC_BSVIS BIT(19) | ||
51 | # define OTGSC_ASVIS BIT(18) | ||
52 | # define OTGSC_AVVIS BIT(17) | ||
53 | # define OTGSC_IDIS BIT(16) | ||
54 | # define OTGSC_DPS BIT(14) | ||
55 | # define OTGSC_1MST BIT(13) | ||
56 | # define OTGSC_BSE BIT(12) | ||
57 | # define OTGSC_BSV BIT(11) | ||
58 | # define OTGSC_ASV BIT(10) | ||
59 | # define OTGSC_AVV BIT(9) | ||
60 | # define OTGSC_ID BIT(8) | ||
61 | # define OTGSC_HABA BIT(7) | ||
62 | # define OTGSC_HADP BIT(6) | ||
63 | # define OTGSC_IDPU BIT(5) | ||
64 | # define OTGSC_DP BIT(4) | ||
65 | # define OTGSC_OT BIT(3) | ||
66 | # define OTGSC_HAAR BIT(2) | ||
67 | # define OTGSC_VC BIT(1) | ||
68 | # define OTGSC_VD BIT(0) | ||
69 | # define OTGSC_INTEN_MASK (0x7f << 24) | ||
70 | # define OTGSC_INT_MASK (0x5f << 24) | ||
71 | # define OTGSC_INTSTS_MASK (0x7f << 16) | ||
72 | #define CI_USBMODE 0xf8 | ||
73 | # define USBMODE_CM (BIT(1) | BIT(0)) | ||
74 | # define USBMODE_IDLE 0 | ||
75 | # define USBMODE_DEVICE 0x2 | ||
76 | # define USBMODE_HOST 0x3 | ||
77 | #define USBCFG_ADDR 0xff10801c | ||
78 | #define USBCFG_LEN 4 | ||
79 | # define USBCFG_VBUSVAL BIT(14) | ||
80 | # define USBCFG_AVALID BIT(13) | ||
81 | # define USBCFG_BVALID BIT(12) | ||
82 | # define USBCFG_SESEND BIT(11) | ||
83 | |||
84 | #define INTR_DUMMY_MASK (USBSTS_SLI | USBSTS_URI | USBSTS_PCI) | ||
85 | |||
86 | enum langwell_otg_timer_type { | ||
87 | TA_WAIT_VRISE_TMR, | ||
88 | TA_WAIT_BCON_TMR, | ||
89 | TA_AIDL_BDIS_TMR, | ||
90 | TB_ASE0_BRST_TMR, | ||
91 | TB_SE0_SRP_TMR, | ||
92 | TB_SRP_INIT_TMR, | ||
93 | TB_SRP_FAIL_TMR, | ||
94 | TB_BUS_SUSPEND_TMR | ||
95 | }; | ||
96 | |||
97 | #define TA_WAIT_VRISE 100 | ||
98 | #define TA_WAIT_BCON 30000 | ||
99 | #define TA_AIDL_BDIS 15000 | ||
100 | #define TB_ASE0_BRST 5000 | ||
101 | #define TB_SE0_SRP 2 | ||
102 | #define TB_SRP_INIT 100 | ||
103 | #define TB_SRP_FAIL 5500 | ||
104 | #define TB_BUS_SUSPEND 500 | ||
105 | |||
106 | struct langwell_otg_timer { | ||
107 | unsigned long expires; /* Number of count increase to timeout */ | ||
108 | unsigned long count; /* Tick counter */ | ||
109 | void (*function)(unsigned long); /* Timeout function */ | ||
110 | unsigned long data; /* Data passed to function */ | ||
111 | struct list_head list; | ||
112 | }; | ||
113 | |||
114 | struct langwell_otg { | ||
115 | struct intel_mid_otg_xceiv iotg; | ||
116 | struct device *dev; | ||
117 | |||
118 | void __iomem *usbcfg; /* SCCBUSB config Reg */ | ||
119 | |||
120 | unsigned region; | ||
121 | unsigned cfg_region; | ||
122 | |||
123 | struct work_struct work; | ||
124 | struct workqueue_struct *qwork; | ||
125 | struct timer_list hsm_timer; | ||
126 | |||
127 | spinlock_t lock; | ||
128 | spinlock_t wq_lock; | ||
129 | |||
130 | struct notifier_block iotg_notifier; | ||
131 | }; | ||
132 | |||
133 | static inline | ||
134 | struct langwell_otg *mid_xceiv_to_lnw(struct intel_mid_otg_xceiv *iotg) | ||
135 | { | ||
136 | return container_of(iotg, struct langwell_otg, iotg); | ||
137 | } | ||
138 | |||
139 | #endif /* __LANGWELL_OTG_H__ */ | ||
diff --git a/include/linux/usb/ncm.h b/include/linux/usb/ncm.h deleted file mode 100644 index 006d1064c8b2..000000000000 --- a/include/linux/usb/ncm.h +++ /dev/null | |||
@@ -1,114 +0,0 @@ | |||
1 | /* | ||
2 | * USB CDC NCM auxiliary definitions | ||
3 | */ | ||
4 | |||
5 | #ifndef __LINUX_USB_NCM_H | ||
6 | #define __LINUX_USB_NCM_H | ||
7 | |||
8 | #include <linux/types.h> | ||
9 | #include <linux/usb/cdc.h> | ||
10 | #include <asm/unaligned.h> | ||
11 | |||
12 | #define NCM_NTB_MIN_IN_SIZE 2048 | ||
13 | #define NCM_NTB_MIN_OUT_SIZE 2048 | ||
14 | |||
15 | #define NCM_CONTROL_TIMEOUT (5 * 1000) | ||
16 | |||
17 | /* bmNetworkCapabilities */ | ||
18 | |||
19 | #define NCM_NCAP_ETH_FILTER (1 << 0) | ||
20 | #define NCM_NCAP_NET_ADDRESS (1 << 1) | ||
21 | #define NCM_NCAP_ENCAP_COMM (1 << 2) | ||
22 | #define NCM_NCAP_MAX_DGRAM (1 << 3) | ||
23 | #define NCM_NCAP_CRC_MODE (1 << 4) | ||
24 | |||
25 | /* | ||
26 | * Here are options for NCM Datagram Pointer table (NDP) parser. | ||
27 | * There are 2 different formats: NDP16 and NDP32 in the spec (ch. 3), | ||
28 | * in NDP16 offsets and sizes fields are 1 16bit word wide, | ||
29 | * in NDP32 -- 2 16bit words wide. Also signatures are different. | ||
30 | * To make the parser code the same, put the differences in the structure, | ||
31 | * and switch pointers to the structures when the format is changed. | ||
32 | */ | ||
33 | |||
34 | struct ndp_parser_opts { | ||
35 | u32 nth_sign; | ||
36 | u32 ndp_sign; | ||
37 | unsigned nth_size; | ||
38 | unsigned ndp_size; | ||
39 | unsigned ndplen_align; | ||
40 | /* sizes in u16 units */ | ||
41 | unsigned dgram_item_len; /* index or length */ | ||
42 | unsigned block_length; | ||
43 | unsigned fp_index; | ||
44 | unsigned reserved1; | ||
45 | unsigned reserved2; | ||
46 | unsigned next_fp_index; | ||
47 | }; | ||
48 | |||
49 | #define INIT_NDP16_OPTS { \ | ||
50 | .nth_sign = NCM_NTH16_SIGN, \ | ||
51 | .ndp_sign = NCM_NDP16_NOCRC_SIGN, \ | ||
52 | .nth_size = sizeof(struct usb_cdc_ncm_nth16), \ | ||
53 | .ndp_size = sizeof(struct usb_cdc_ncm_ndp16), \ | ||
54 | .ndplen_align = 4, \ | ||
55 | .dgram_item_len = 1, \ | ||
56 | .block_length = 1, \ | ||
57 | .fp_index = 1, \ | ||
58 | .reserved1 = 0, \ | ||
59 | .reserved2 = 0, \ | ||
60 | .next_fp_index = 1, \ | ||
61 | } | ||
62 | |||
63 | |||
64 | #define INIT_NDP32_OPTS { \ | ||
65 | .nth_sign = NCM_NTH32_SIGN, \ | ||
66 | .ndp_sign = NCM_NDP32_NOCRC_SIGN, \ | ||
67 | .nth_size = sizeof(struct usb_cdc_ncm_nth32), \ | ||
68 | .ndp_size = sizeof(struct usb_cdc_ncm_ndp32), \ | ||
69 | .ndplen_align = 8, \ | ||
70 | .dgram_item_len = 2, \ | ||
71 | .block_length = 2, \ | ||
72 | .fp_index = 2, \ | ||
73 | .reserved1 = 1, \ | ||
74 | .reserved2 = 2, \ | ||
75 | .next_fp_index = 2, \ | ||
76 | } | ||
77 | |||
78 | static inline void put_ncm(__le16 **p, unsigned size, unsigned val) | ||
79 | { | ||
80 | switch (size) { | ||
81 | case 1: | ||
82 | put_unaligned_le16((u16)val, *p); | ||
83 | break; | ||
84 | case 2: | ||
85 | put_unaligned_le32((u32)val, *p); | ||
86 | |||
87 | break; | ||
88 | default: | ||
89 | BUG(); | ||
90 | } | ||
91 | |||
92 | *p += size; | ||
93 | } | ||
94 | |||
95 | static inline unsigned get_ncm(__le16 **p, unsigned size) | ||
96 | { | ||
97 | unsigned tmp; | ||
98 | |||
99 | switch (size) { | ||
100 | case 1: | ||
101 | tmp = get_unaligned_le16(*p); | ||
102 | break; | ||
103 | case 2: | ||
104 | tmp = get_unaligned_le32(*p); | ||
105 | break; | ||
106 | default: | ||
107 | BUG(); | ||
108 | } | ||
109 | |||
110 | *p += size; | ||
111 | return tmp; | ||
112 | } | ||
113 | |||
114 | #endif /* __LINUX_USB_NCM_H */ | ||
diff --git a/include/linux/usb/otg.h b/include/linux/usb/otg.h index 545cba73ccaf..0a5b3711e502 100644 --- a/include/linux/usb/otg.h +++ b/include/linux/usb/otg.h | |||
@@ -164,8 +164,19 @@ otg_shutdown(struct otg_transceiver *otg) | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* for usb host and peripheral controller drivers */ | 166 | /* for usb host and peripheral controller drivers */ |
167 | #ifdef CONFIG_USB_OTG_UTILS | ||
167 | extern struct otg_transceiver *otg_get_transceiver(void); | 168 | extern struct otg_transceiver *otg_get_transceiver(void); |
168 | extern void otg_put_transceiver(struct otg_transceiver *); | 169 | extern void otg_put_transceiver(struct otg_transceiver *); |
170 | #else | ||
171 | static inline struct otg_transceiver *otg_get_transceiver(void) | ||
172 | { | ||
173 | return NULL; | ||
174 | } | ||
175 | |||
176 | static inline void otg_put_transceiver(struct otg_transceiver *x) | ||
177 | { | ||
178 | } | ||
179 | #endif | ||
169 | 180 | ||
170 | /* Context: can sleep */ | 181 | /* Context: can sleep */ |
171 | static inline int | 182 | static inline int |
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h index 55675b1efb28..16d682f4f7c3 100644 --- a/include/linux/usb/serial.h +++ b/include/linux/usb/serial.h | |||
@@ -271,6 +271,8 @@ struct usb_serial_driver { | |||
271 | int (*tiocmget)(struct tty_struct *tty, struct file *file); | 271 | int (*tiocmget)(struct tty_struct *tty, struct file *file); |
272 | int (*tiocmset)(struct tty_struct *tty, struct file *file, | 272 | int (*tiocmset)(struct tty_struct *tty, struct file *file, |
273 | unsigned int set, unsigned int clear); | 273 | unsigned int set, unsigned int clear); |
274 | int (*get_icount)(struct tty_struct *tty, | ||
275 | struct serial_icounter_struct *icount); | ||
274 | /* Called by the tty layer for port level work. There may or may not | 276 | /* Called by the tty layer for port level work. There may or may not |
275 | be an attached tty at this point */ | 277 | be an attached tty at this point */ |
276 | void (*dtr_rts)(struct usb_serial_port *port, int on); | 278 | void (*dtr_rts)(struct usb_serial_port *port, int on); |
diff --git a/include/linux/usb/storage.h b/include/linux/usb/storage.h new file mode 100644 index 000000000000..d7fc910f1dc4 --- /dev/null +++ b/include/linux/usb/storage.h | |||
@@ -0,0 +1,48 @@ | |||
1 | #ifndef __LINUX_USB_STORAGE_H | ||
2 | #define __LINUX_USB_STORAGE_H | ||
3 | |||
4 | /* | ||
5 | * linux/usb/storage.h | ||
6 | * | ||
7 | * Copyright Matthew Wilcox for Intel Corp, 2010 | ||
8 | * | ||
9 | * This file contains definitions taken from the | ||
10 | * USB Mass Storage Class Specification Overview | ||
11 | * | ||
12 | * Distributed under the terms of the GNU GPL, version two. | ||
13 | */ | ||
14 | |||
15 | /* Storage subclass codes */ | ||
16 | |||
17 | #define USB_SC_RBC 0x01 /* Typically, flash devices */ | ||
18 | #define USB_SC_8020 0x02 /* CD-ROM */ | ||
19 | #define USB_SC_QIC 0x03 /* QIC-157 Tapes */ | ||
20 | #define USB_SC_UFI 0x04 /* Floppy */ | ||
21 | #define USB_SC_8070 0x05 /* Removable media */ | ||
22 | #define USB_SC_SCSI 0x06 /* Transparent */ | ||
23 | #define USB_SC_LOCKABLE 0x07 /* Password-protected */ | ||
24 | |||
25 | #define USB_SC_ISD200 0xf0 /* ISD200 ATA */ | ||
26 | #define USB_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ | ||
27 | #define USB_SC_DEVICE 0xff /* Use device's value */ | ||
28 | |||
29 | /* Storage protocol codes */ | ||
30 | |||
31 | #define USB_PR_CBI 0x00 /* Control/Bulk/Interrupt */ | ||
32 | #define USB_PR_CB 0x01 /* Control/Bulk w/o interrupt */ | ||
33 | #define USB_PR_BULK 0x50 /* bulk only */ | ||
34 | #define USB_PR_UAS 0x62 /* USB Attached SCSI */ | ||
35 | |||
36 | #define USB_PR_USBAT 0x80 /* SCM-ATAPI bridge */ | ||
37 | #define USB_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ | ||
38 | #define USB_PR_SDDR55 0x82 /* SDDR-55 (made up) */ | ||
39 | #define USB_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ | ||
40 | #define USB_PR_FREECOM 0xf1 /* Freecom */ | ||
41 | #define USB_PR_DATAFAB 0xf2 /* Datafab chipsets */ | ||
42 | #define USB_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ | ||
43 | #define USB_PR_ALAUDA 0xf4 /* Alauda chipsets */ | ||
44 | #define USB_PR_KARMA 0xf5 /* Rio Karma */ | ||
45 | |||
46 | #define USB_PR_DEVICE 0xff /* Use device's value */ | ||
47 | |||
48 | #endif | ||
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h index a4b947e470a5..71693d4a4fe1 100644 --- a/include/linux/usb_usual.h +++ b/include/linux/usb_usual.h | |||
@@ -58,7 +58,11 @@ | |||
58 | US_FLAG(CAPACITY_OK, 0x00010000) \ | 58 | US_FLAG(CAPACITY_OK, 0x00010000) \ |
59 | /* READ CAPACITY response is correct */ \ | 59 | /* READ CAPACITY response is correct */ \ |
60 | US_FLAG(BAD_SENSE, 0x00020000) \ | 60 | US_FLAG(BAD_SENSE, 0x00020000) \ |
61 | /* Bad Sense (never more than 18 bytes) */ | 61 | /* Bad Sense (never more than 18 bytes) */ \ |
62 | US_FLAG(NO_READ_DISC_INFO, 0x00040000) \ | ||
63 | /* cannot handle READ_DISC_INFO */ \ | ||
64 | US_FLAG(NO_READ_CAPACITY_16, 0x00080000) \ | ||
65 | /* cannot handle READ_CAPACITY_16 */ | ||
62 | 66 | ||
63 | #define US_FLAG(name, value) US_FL_##name = value , | 67 | #define US_FLAG(name, value) US_FL_##name = value , |
64 | enum { US_DO_ALL_FLAGS }; | 68 | enum { US_DO_ALL_FLAGS }; |
@@ -74,42 +78,7 @@ enum { US_DO_ALL_FLAGS }; | |||
74 | #define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) | 78 | #define USB_US_TYPE(flags) (((flags) >> 24) & 0xFF) |
75 | #define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) | 79 | #define USB_US_ORIG_FLAGS(flags) ((flags) & 0x00FFFFFF) |
76 | 80 | ||
77 | /* | 81 | #include <linux/usb/storage.h> |
78 | * This is probably not the best place to keep these constants, conceptually. | ||
79 | * But it's the only header included into all places which need them. | ||
80 | */ | ||
81 | |||
82 | /* Sub Classes */ | ||
83 | |||
84 | #define US_SC_RBC 0x01 /* Typically, flash devices */ | ||
85 | #define US_SC_8020 0x02 /* CD-ROM */ | ||
86 | #define US_SC_QIC 0x03 /* QIC-157 Tapes */ | ||
87 | #define US_SC_UFI 0x04 /* Floppy */ | ||
88 | #define US_SC_8070 0x05 /* Removable media */ | ||
89 | #define US_SC_SCSI 0x06 /* Transparent */ | ||
90 | #define US_SC_LOCKABLE 0x07 /* Password-protected */ | ||
91 | |||
92 | #define US_SC_ISD200 0xf0 /* ISD200 ATA */ | ||
93 | #define US_SC_CYP_ATACB 0xf1 /* Cypress ATACB */ | ||
94 | #define US_SC_DEVICE 0xff /* Use device's value */ | ||
95 | |||
96 | /* Protocols */ | ||
97 | |||
98 | #define US_PR_CBI 0x00 /* Control/Bulk/Interrupt */ | ||
99 | #define US_PR_CB 0x01 /* Control/Bulk w/o interrupt */ | ||
100 | #define US_PR_BULK 0x50 /* bulk only */ | ||
101 | |||
102 | #define US_PR_USBAT 0x80 /* SCM-ATAPI bridge */ | ||
103 | #define US_PR_EUSB_SDDR09 0x81 /* SCM-SCSI bridge for SDDR-09 */ | ||
104 | #define US_PR_SDDR55 0x82 /* SDDR-55 (made up) */ | ||
105 | #define US_PR_DPCM_USB 0xf0 /* Combination CB/SDDR09 */ | ||
106 | #define US_PR_FREECOM 0xf1 /* Freecom */ | ||
107 | #define US_PR_DATAFAB 0xf2 /* Datafab chipsets */ | ||
108 | #define US_PR_JUMPSHOT 0xf3 /* Lexar Jumpshot */ | ||
109 | #define US_PR_ALAUDA 0xf4 /* Alauda chipsets */ | ||
110 | #define US_PR_KARMA 0xf5 /* Rio Karma */ | ||
111 | |||
112 | #define US_PR_DEVICE 0xff /* Use device's value */ | ||
113 | 82 | ||
114 | /* | 83 | /* |
115 | */ | 84 | */ |
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index 6228b5b77d35..e9e1524b582c 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h | |||
@@ -93,8 +93,11 @@ extern void vga_set_legacy_decoding(struct pci_dev *pdev, | |||
93 | * Nested calls are supported (a per-resource counter is maintained) | 93 | * Nested calls are supported (a per-resource counter is maintained) |
94 | */ | 94 | */ |
95 | 95 | ||
96 | extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, | 96 | #if defined(CONFIG_VGA_ARB) |
97 | int interruptible); | 97 | extern int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible); |
98 | #else | ||
99 | static inline int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible) { return 0; } | ||
100 | #endif | ||
98 | 101 | ||
99 | /** | 102 | /** |
100 | * vga_get_interruptible | 103 | * vga_get_interruptible |
@@ -131,7 +134,11 @@ static inline int vga_get_uninterruptible(struct pci_dev *pdev, | |||
131 | * are already locked by another card. It can be called in any context | 134 | * are already locked by another card. It can be called in any context |
132 | */ | 135 | */ |
133 | 136 | ||
137 | #if defined(CONFIG_VGA_ARB) | ||
134 | extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); | 138 | extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); |
139 | #else | ||
140 | static inline int vga_tryget(struct pci_dev *pdev, unsigned int rsrc) { return 0; } | ||
141 | #endif | ||
135 | 142 | ||
136 | /** | 143 | /** |
137 | * vga_put - release lock on legacy VGA resources | 144 | * vga_put - release lock on legacy VGA resources |
@@ -146,7 +153,11 @@ extern int vga_tryget(struct pci_dev *pdev, unsigned int rsrc); | |||
146 | * released if the counter reaches 0. | 153 | * released if the counter reaches 0. |
147 | */ | 154 | */ |
148 | 155 | ||
156 | #if defined(CONFIG_VGA_ARB) | ||
149 | extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); | 157 | extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); |
158 | #else | ||
159 | #define vga_put(pdev, rsrc) | ||
160 | #endif | ||
150 | 161 | ||
151 | 162 | ||
152 | /** | 163 | /** |
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h index 01c2145118dc..63a4fe6d51bd 100644 --- a/include/linux/vmalloc.h +++ b/include/linux/vmalloc.h | |||
@@ -117,10 +117,12 @@ extern rwlock_t vmlist_lock; | |||
117 | extern struct vm_struct *vmlist; | 117 | extern struct vm_struct *vmlist; |
118 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); | 118 | extern __init void vm_area_register_early(struct vm_struct *vm, size_t align); |
119 | 119 | ||
120 | #ifdef CONFIG_SMP | ||
120 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, | 121 | struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets, |
121 | const size_t *sizes, int nr_vms, | 122 | const size_t *sizes, int nr_vms, |
122 | size_t align, gfp_t gfp_mask); | 123 | size_t align, gfp_t gfp_mask); |
123 | 124 | ||
124 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); | 125 | void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); |
126 | #endif | ||
125 | 127 | ||
126 | #endif /* _LINUX_VMALLOC_H */ | 128 | #endif /* _LINUX_VMALLOC_H */ |
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 7f43ccdc1d38..eaaea37b3b75 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -170,6 +170,28 @@ static inline unsigned long zone_page_state(struct zone *zone, | |||
170 | return x; | 170 | return x; |
171 | } | 171 | } |
172 | 172 | ||
173 | /* | ||
174 | * More accurate version that also considers the currently pending | ||
175 | * deltas. For that we need to loop over all cpus to find the current | ||
176 | * deltas. There is no synchronization so the result cannot be | ||
177 | * exactly accurate either. | ||
178 | */ | ||
179 | static inline unsigned long zone_page_state_snapshot(struct zone *zone, | ||
180 | enum zone_stat_item item) | ||
181 | { | ||
182 | long x = atomic_long_read(&zone->vm_stat[item]); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | int cpu; | ||
186 | for_each_online_cpu(cpu) | ||
187 | x += per_cpu_ptr(zone->pageset, cpu)->vm_stat_diff[item]; | ||
188 | |||
189 | if (x < 0) | ||
190 | x = 0; | ||
191 | #endif | ||
192 | return x; | ||
193 | } | ||
194 | |||
173 | extern unsigned long global_reclaimable_pages(void); | 195 | extern unsigned long global_reclaimable_pages(void); |
174 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 196 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
175 | 197 | ||
diff --git a/include/linux/wait.h b/include/linux/wait.h index 0836ccc57121..3efc9f3f43a0 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -614,6 +614,7 @@ int wake_bit_function(wait_queue_t *wait, unsigned mode, int sync, void *key); | |||
614 | (wait)->private = current; \ | 614 | (wait)->private = current; \ |
615 | (wait)->func = autoremove_wake_function; \ | 615 | (wait)->func = autoremove_wake_function; \ |
616 | INIT_LIST_HEAD(&(wait)->task_list); \ | 616 | INIT_LIST_HEAD(&(wait)->task_list); \ |
617 | (wait)->flags = 0; \ | ||
617 | } while (0) | 618 | } while (0) |
618 | 619 | ||
619 | /** | 620 | /** |
diff --git a/include/linux/wireless.h b/include/linux/wireless.h index e6827eedf18b..4395b28bb86c 100644 --- a/include/linux/wireless.h +++ b/include/linux/wireless.h | |||
@@ -1157,6 +1157,6 @@ struct __compat_iw_event { | |||
1157 | #define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) | 1157 | #define IW_EV_PARAM_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_param)) |
1158 | #define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) | 1158 | #define IW_EV_ADDR_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct sockaddr)) |
1159 | #define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) | 1159 | #define IW_EV_QUAL_PK_LEN (IW_EV_LCP_PK_LEN + sizeof(struct iw_quality)) |
1160 | #define IW_EV_POINT_PK_LEN (IW_EV_LCP_LEN + 4) | 1160 | #define IW_EV_POINT_PK_LEN (IW_EV_LCP_PK_LEN + 4) |
1161 | 1161 | ||
1162 | #endif /* _LINUX_WIRELESS_H */ | 1162 | #endif /* _LINUX_WIRELESS_H */ |
diff --git a/include/linux/spi/wl12xx.h b/include/linux/wl12xx.h index a223ecbc71ef..4f902e1908aa 100644 --- a/include/linux/spi/wl12xx.h +++ b/include/linux/wl12xx.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * | 3 | * |
4 | * Copyright (C) 2009 Nokia Corporation | 4 | * Copyright (C) 2009 Nokia Corporation |
5 | * | 5 | * |
6 | * Contact: Kalle Valo <kalle.valo@nokia.com> | 6 | * Contact: Luciano Coelho <luciano.coelho@nokia.com> |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
@@ -21,14 +21,31 @@ | |||
21 | * | 21 | * |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #ifndef _LINUX_SPI_WL12XX_H | 24 | #ifndef _LINUX_WL12XX_H |
25 | #define _LINUX_SPI_WL12XX_H | 25 | #define _LINUX_WL12XX_H |
26 | 26 | ||
27 | struct wl12xx_platform_data { | 27 | struct wl12xx_platform_data { |
28 | void (*set_power)(bool enable); | 28 | void (*set_power)(bool enable); |
29 | /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ | 29 | /* SDIO only: IRQ number if WLAN_IRQ line is used, 0 for SDIO IRQs */ |
30 | int irq; | 30 | int irq; |
31 | bool use_eeprom; | 31 | bool use_eeprom; |
32 | int board_ref_clock; | ||
32 | }; | 33 | }; |
33 | 34 | ||
35 | #ifdef CONFIG_WL12XX_PLATFORM_DATA | ||
36 | |||
37 | int wl12xx_set_platform_data(const struct wl12xx_platform_data *data); | ||
38 | |||
39 | #else | ||
40 | |||
41 | static inline | ||
42 | int wl12xx_set_platform_data(const struct wl12xx_platform_data *data) | ||
43 | { | ||
44 | return -ENOSYS; | ||
45 | } | ||
46 | |||
47 | #endif | ||
48 | |||
49 | const struct wl12xx_platform_data *wl12xx_get_platform_data(void); | ||
50 | |||
34 | #endif | 51 | #endif |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 4f9d277bcd9a..070bb7a88936 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
@@ -25,18 +25,20 @@ typedef void (*work_func_t)(struct work_struct *work); | |||
25 | 25 | ||
26 | enum { | 26 | enum { |
27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ | 27 | WORK_STRUCT_PENDING_BIT = 0, /* work item is pending execution */ |
28 | WORK_STRUCT_CWQ_BIT = 1, /* data points to cwq */ | 28 | WORK_STRUCT_DELAYED_BIT = 1, /* work item is delayed */ |
29 | WORK_STRUCT_LINKED_BIT = 2, /* next work is linked to this one */ | 29 | WORK_STRUCT_CWQ_BIT = 2, /* data points to cwq */ |
30 | WORK_STRUCT_LINKED_BIT = 3, /* next work is linked to this one */ | ||
30 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 31 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
31 | WORK_STRUCT_STATIC_BIT = 3, /* static initializer (debugobjects) */ | 32 | WORK_STRUCT_STATIC_BIT = 4, /* static initializer (debugobjects) */ |
32 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ | 33 | WORK_STRUCT_COLOR_SHIFT = 5, /* color for workqueue flushing */ |
33 | #else | 34 | #else |
34 | WORK_STRUCT_COLOR_SHIFT = 3, /* color for workqueue flushing */ | 35 | WORK_STRUCT_COLOR_SHIFT = 4, /* color for workqueue flushing */ |
35 | #endif | 36 | #endif |
36 | 37 | ||
37 | WORK_STRUCT_COLOR_BITS = 4, | 38 | WORK_STRUCT_COLOR_BITS = 4, |
38 | 39 | ||
39 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, | 40 | WORK_STRUCT_PENDING = 1 << WORK_STRUCT_PENDING_BIT, |
41 | WORK_STRUCT_DELAYED = 1 << WORK_STRUCT_DELAYED_BIT, | ||
40 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, | 42 | WORK_STRUCT_CWQ = 1 << WORK_STRUCT_CWQ_BIT, |
41 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, | 43 | WORK_STRUCT_LINKED = 1 << WORK_STRUCT_LINKED_BIT, |
42 | #ifdef CONFIG_DEBUG_OBJECTS_WORK | 44 | #ifdef CONFIG_DEBUG_OBJECTS_WORK |
@@ -59,8 +61,8 @@ enum { | |||
59 | 61 | ||
60 | /* | 62 | /* |
61 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned | 63 | * Reserve 7 bits off of cwq pointer w/ debugobjects turned |
62 | * off. This makes cwqs aligned to 128 bytes which isn't too | 64 | * off. This makes cwqs aligned to 256 bytes and allows 15 |
63 | * excessive while allowing 15 workqueue flush colors. | 65 | * workqueue flush colors. |
64 | */ | 66 | */ |
65 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + | 67 | WORK_STRUCT_FLAG_BITS = WORK_STRUCT_COLOR_SHIFT + |
66 | WORK_STRUCT_COLOR_BITS, | 68 | WORK_STRUCT_COLOR_BITS, |
@@ -233,14 +235,21 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } | |||
233 | #define work_clear_pending(work) \ | 235 | #define work_clear_pending(work) \ |
234 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) | 236 | clear_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work)) |
235 | 237 | ||
238 | /* | ||
239 | * Workqueue flags and constants. For details, please refer to | ||
240 | * Documentation/workqueue.txt. | ||
241 | */ | ||
236 | enum { | 242 | enum { |
237 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ | 243 | WQ_NON_REENTRANT = 1 << 0, /* guarantee non-reentrance */ |
238 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ | 244 | WQ_UNBOUND = 1 << 1, /* not bound to any cpu */ |
239 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ | 245 | WQ_FREEZEABLE = 1 << 2, /* freeze during suspend */ |
240 | WQ_RESCUER = 1 << 3, /* has an rescue worker */ | 246 | WQ_MEM_RECLAIM = 1 << 3, /* may be used for memory reclaim */ |
241 | WQ_HIGHPRI = 1 << 4, /* high priority */ | 247 | WQ_HIGHPRI = 1 << 4, /* high priority */ |
242 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ | 248 | WQ_CPU_INTENSIVE = 1 << 5, /* cpu instensive workqueue */ |
243 | 249 | ||
250 | WQ_DYING = 1 << 6, /* internal: workqueue is dying */ | ||
251 | WQ_RESCUER = 1 << 7, /* internal: workqueue has rescuer */ | ||
252 | |||
244 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ | 253 | WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */ |
245 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ | 254 | WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */ |
246 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, | 255 | WQ_DFL_ACTIVE = WQ_MAX_ACTIVE / 2, |
@@ -298,12 +307,30 @@ __alloc_workqueue_key(const char *name, unsigned int flags, int max_active, | |||
298 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) | 307 | __alloc_workqueue_key((name), (flags), (max_active), NULL, NULL) |
299 | #endif | 308 | #endif |
300 | 309 | ||
310 | /** | ||
311 | * alloc_ordered_workqueue - allocate an ordered workqueue | ||
312 | * @name: name of the workqueue | ||
313 | * @flags: WQ_* flags (only WQ_FREEZEABLE and WQ_MEM_RECLAIM are meaningful) | ||
314 | * | ||
315 | * Allocate an ordered workqueue. An ordered workqueue executes at | ||
316 | * most one work item at any given time in the queued order. They are | ||
317 | * implemented as unbound workqueues with @max_active of one. | ||
318 | * | ||
319 | * RETURNS: | ||
320 | * Pointer to the allocated workqueue on success, %NULL on failure. | ||
321 | */ | ||
322 | static inline struct workqueue_struct * | ||
323 | alloc_ordered_workqueue(const char *name, unsigned int flags) | ||
324 | { | ||
325 | return alloc_workqueue(name, WQ_UNBOUND | flags, 1); | ||
326 | } | ||
327 | |||
301 | #define create_workqueue(name) \ | 328 | #define create_workqueue(name) \ |
302 | alloc_workqueue((name), WQ_RESCUER, 1) | 329 | alloc_workqueue((name), WQ_MEM_RECLAIM, 1) |
303 | #define create_freezeable_workqueue(name) \ | 330 | #define create_freezeable_workqueue(name) \ |
304 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_RESCUER, 1) | 331 | alloc_workqueue((name), WQ_FREEZEABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
305 | #define create_singlethread_workqueue(name) \ | 332 | #define create_singlethread_workqueue(name) \ |
306 | alloc_workqueue((name), WQ_UNBOUND | WQ_RESCUER, 1) | 333 | alloc_workqueue((name), WQ_UNBOUND | WQ_MEM_RECLAIM, 1) |
307 | 334 | ||
308 | extern void destroy_workqueue(struct workqueue_struct *wq); | 335 | extern void destroy_workqueue(struct workqueue_struct *wq); |
309 | 336 | ||
@@ -317,7 +344,6 @@ extern int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | |||
317 | 344 | ||
318 | extern void flush_workqueue(struct workqueue_struct *wq); | 345 | extern void flush_workqueue(struct workqueue_struct *wq); |
319 | extern void flush_scheduled_work(void); | 346 | extern void flush_scheduled_work(void); |
320 | extern void flush_delayed_work(struct delayed_work *work); | ||
321 | 347 | ||
322 | extern int schedule_work(struct work_struct *work); | 348 | extern int schedule_work(struct work_struct *work); |
323 | extern int schedule_work_on(int cpu, struct work_struct *work); | 349 | extern int schedule_work_on(int cpu, struct work_struct *work); |
@@ -329,8 +355,13 @@ extern int keventd_up(void); | |||
329 | 355 | ||
330 | int execute_in_process_context(work_func_t fn, struct execute_work *); | 356 | int execute_in_process_context(work_func_t fn, struct execute_work *); |
331 | 357 | ||
332 | extern int flush_work(struct work_struct *work); | 358 | extern bool flush_work(struct work_struct *work); |
333 | extern int cancel_work_sync(struct work_struct *work); | 359 | extern bool flush_work_sync(struct work_struct *work); |
360 | extern bool cancel_work_sync(struct work_struct *work); | ||
361 | |||
362 | extern bool flush_delayed_work(struct delayed_work *dwork); | ||
363 | extern bool flush_delayed_work_sync(struct delayed_work *work); | ||
364 | extern bool cancel_delayed_work_sync(struct delayed_work *dwork); | ||
334 | 365 | ||
335 | extern void workqueue_set_max_active(struct workqueue_struct *wq, | 366 | extern void workqueue_set_max_active(struct workqueue_struct *wq, |
336 | int max_active); | 367 | int max_active); |
@@ -344,9 +375,9 @@ extern unsigned int work_busy(struct work_struct *work); | |||
344 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or | 375 | * it returns 1 and the work doesn't re-arm itself. Run flush_workqueue() or |
345 | * cancel_work_sync() to wait on it. | 376 | * cancel_work_sync() to wait on it. |
346 | */ | 377 | */ |
347 | static inline int cancel_delayed_work(struct delayed_work *work) | 378 | static inline bool cancel_delayed_work(struct delayed_work *work) |
348 | { | 379 | { |
349 | int ret; | 380 | bool ret; |
350 | 381 | ||
351 | ret = del_timer_sync(&work->timer); | 382 | ret = del_timer_sync(&work->timer); |
352 | if (ret) | 383 | if (ret) |
@@ -359,9 +390,9 @@ static inline int cancel_delayed_work(struct delayed_work *work) | |||
359 | * if it returns 0 the timer function may be running and the queueing is in | 390 | * if it returns 0 the timer function may be running and the queueing is in |
360 | * progress. | 391 | * progress. |
361 | */ | 392 | */ |
362 | static inline int __cancel_delayed_work(struct delayed_work *work) | 393 | static inline bool __cancel_delayed_work(struct delayed_work *work) |
363 | { | 394 | { |
364 | int ret; | 395 | bool ret; |
365 | 396 | ||
366 | ret = del_timer(&work->timer); | 397 | ret = del_timer(&work->timer); |
367 | if (ret) | 398 | if (ret) |
@@ -369,8 +400,6 @@ static inline int __cancel_delayed_work(struct delayed_work *work) | |||
369 | return ret; | 400 | return ret; |
370 | } | 401 | } |
371 | 402 | ||
372 | extern int cancel_delayed_work_sync(struct delayed_work *work); | ||
373 | |||
374 | /* Obsolete. use cancel_delayed_work_sync() */ | 403 | /* Obsolete. use cancel_delayed_work_sync() */ |
375 | static inline | 404 | static inline |
376 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, | 405 | void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq, |
@@ -401,8 +430,4 @@ extern bool freeze_workqueues_busy(void); | |||
401 | extern void thaw_workqueues(void); | 430 | extern void thaw_workqueues(void); |
402 | #endif /* CONFIG_FREEZER */ | 431 | #endif /* CONFIG_FREEZER */ |
403 | 432 | ||
404 | #ifdef CONFIG_LOCKDEP | ||
405 | int in_workqueue_context(struct workqueue_struct *wq); | ||
406 | #endif | ||
407 | |||
408 | #endif | 433 | #endif |
diff --git a/include/media/videobuf-dma-sg.h b/include/media/videobuf-dma-sg.h index 97e07f46a0fa..aa4ebb42a565 100644 --- a/include/media/videobuf-dma-sg.h +++ b/include/media/videobuf-dma-sg.h | |||
@@ -48,6 +48,7 @@ struct videobuf_dmabuf { | |||
48 | 48 | ||
49 | /* for userland buffer */ | 49 | /* for userland buffer */ |
50 | int offset; | 50 | int offset; |
51 | size_t size; | ||
51 | struct page **pages; | 52 | struct page **pages; |
52 | 53 | ||
53 | /* for kernel buffers */ | 54 | /* for kernel buffers */ |
diff --git a/include/net/9p/client.h b/include/net/9p/client.h index d1aa2cfb30f0..7f63d5ab7b44 100644 --- a/include/net/9p/client.h +++ b/include/net/9p/client.h | |||
@@ -212,15 +212,12 @@ struct p9_dirent { | |||
212 | 212 | ||
213 | int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); | 213 | int p9_client_statfs(struct p9_fid *fid, struct p9_rstatfs *sb); |
214 | int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name); | 214 | int p9_client_rename(struct p9_fid *fid, struct p9_fid *newdirfid, char *name); |
215 | int p9_client_version(struct p9_client *); | ||
216 | struct p9_client *p9_client_create(const char *dev_name, char *options); | 215 | struct p9_client *p9_client_create(const char *dev_name, char *options); |
217 | void p9_client_destroy(struct p9_client *clnt); | 216 | void p9_client_destroy(struct p9_client *clnt); |
218 | void p9_client_disconnect(struct p9_client *clnt); | 217 | void p9_client_disconnect(struct p9_client *clnt); |
219 | void p9_client_begin_disconnect(struct p9_client *clnt); | 218 | void p9_client_begin_disconnect(struct p9_client *clnt); |
220 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, | 219 | struct p9_fid *p9_client_attach(struct p9_client *clnt, struct p9_fid *afid, |
221 | char *uname, u32 n_uname, char *aname); | 220 | char *uname, u32 n_uname, char *aname); |
222 | struct p9_fid *p9_client_auth(struct p9_client *clnt, char *uname, | ||
223 | u32 n_uname, char *aname); | ||
224 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, | 221 | struct p9_fid *p9_client_walk(struct p9_fid *oldfid, int nwname, char **wnames, |
225 | int clone); | 222 | int clone); |
226 | int p9_client_open(struct p9_fid *fid, int mode); | 223 | int p9_client_open(struct p9_fid *fid, int mode); |
diff --git a/include/net/addrconf.h b/include/net/addrconf.h index 45375b41a2a0..a9441249306c 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h | |||
@@ -121,6 +121,7 @@ static inline int addrconf_finite_timeout(unsigned long timeout) | |||
121 | * IPv6 Address Label subsystem (addrlabel.c) | 121 | * IPv6 Address Label subsystem (addrlabel.c) |
122 | */ | 122 | */ |
123 | extern int ipv6_addr_label_init(void); | 123 | extern int ipv6_addr_label_init(void); |
124 | extern void ipv6_addr_label_cleanup(void); | ||
124 | extern void ipv6_addr_label_rtnl_register(void); | 125 | extern void ipv6_addr_label_rtnl_register(void); |
125 | extern u32 ipv6_addr_label(struct net *net, | 126 | extern u32 ipv6_addr_label(struct net *net, |
126 | const struct in6_addr *addr, | 127 | const struct in6_addr *addr, |
@@ -174,20 +175,32 @@ extern int ipv6_chk_acast_addr(struct net *net, struct net_device *dev, | |||
174 | extern int register_inet6addr_notifier(struct notifier_block *nb); | 175 | extern int register_inet6addr_notifier(struct notifier_block *nb); |
175 | extern int unregister_inet6addr_notifier(struct notifier_block *nb); | 176 | extern int unregister_inet6addr_notifier(struct notifier_block *nb); |
176 | 177 | ||
177 | static inline struct inet6_dev * | 178 | /** |
178 | __in6_dev_get(struct net_device *dev) | 179 | * __in6_dev_get - get inet6_dev pointer from netdevice |
180 | * @dev: network device | ||
181 | * | ||
182 | * Caller must hold rcu_read_lock or RTNL, because this function | ||
183 | * does not take a reference on the inet6_dev. | ||
184 | */ | ||
185 | static inline struct inet6_dev *__in6_dev_get(const struct net_device *dev) | ||
179 | { | 186 | { |
180 | return rcu_dereference_check(dev->ip6_ptr, | 187 | return rcu_dereference_rtnl(dev->ip6_ptr); |
181 | rcu_read_lock_held() || | ||
182 | lockdep_rtnl_is_held()); | ||
183 | } | 188 | } |
184 | 189 | ||
185 | static inline struct inet6_dev * | 190 | /** |
186 | in6_dev_get(struct net_device *dev) | 191 | * in6_dev_get - get inet6_dev pointer from netdevice |
192 | * @dev: network device | ||
193 | * | ||
194 | * This version can be used in any context, and takes a reference | ||
195 | * on the inet6_dev. Callers must use in6_dev_put() later to | ||
196 | * release this reference. | ||
197 | */ | ||
198 | static inline struct inet6_dev *in6_dev_get(const struct net_device *dev) | ||
187 | { | 199 | { |
188 | struct inet6_dev *idev = NULL; | 200 | struct inet6_dev *idev; |
201 | |||
189 | rcu_read_lock(); | 202 | rcu_read_lock(); |
190 | idev = __in6_dev_get(dev); | 203 | idev = rcu_dereference(dev->ip6_ptr); |
191 | if (idev) | 204 | if (idev) |
192 | atomic_inc(&idev->refcnt); | 205 | atomic_inc(&idev->refcnt); |
193 | rcu_read_unlock(); | 206 | rcu_read_unlock(); |
@@ -196,16 +209,21 @@ in6_dev_get(struct net_device *dev) | |||
196 | 209 | ||
197 | extern void in6_dev_finish_destroy(struct inet6_dev *idev); | 210 | extern void in6_dev_finish_destroy(struct inet6_dev *idev); |
198 | 211 | ||
199 | static inline void | 212 | static inline void in6_dev_put(struct inet6_dev *idev) |
200 | in6_dev_put(struct inet6_dev *idev) | ||
201 | { | 213 | { |
202 | if (atomic_dec_and_test(&idev->refcnt)) | 214 | if (atomic_dec_and_test(&idev->refcnt)) |
203 | in6_dev_finish_destroy(idev); | 215 | in6_dev_finish_destroy(idev); |
204 | } | 216 | } |
205 | 217 | ||
206 | #define __in6_dev_put(idev) atomic_dec(&(idev)->refcnt) | 218 | static inline void __in6_dev_put(struct inet6_dev *idev) |
207 | #define in6_dev_hold(idev) atomic_inc(&(idev)->refcnt) | 219 | { |
220 | atomic_dec(&idev->refcnt); | ||
221 | } | ||
208 | 222 | ||
223 | static inline void in6_dev_hold(struct inet6_dev *idev) | ||
224 | { | ||
225 | atomic_inc(&idev->refcnt); | ||
226 | } | ||
209 | 227 | ||
210 | extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); | 228 | extern void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp); |
211 | 229 | ||
@@ -215,9 +233,15 @@ static inline void in6_ifa_put(struct inet6_ifaddr *ifp) | |||
215 | inet6_ifa_finish_destroy(ifp); | 233 | inet6_ifa_finish_destroy(ifp); |
216 | } | 234 | } |
217 | 235 | ||
218 | #define __in6_ifa_put(ifp) atomic_dec(&(ifp)->refcnt) | 236 | static inline void __in6_ifa_put(struct inet6_ifaddr *ifp) |
219 | #define in6_ifa_hold(ifp) atomic_inc(&(ifp)->refcnt) | 237 | { |
238 | atomic_dec(&ifp->refcnt); | ||
239 | } | ||
220 | 240 | ||
241 | static inline void in6_ifa_hold(struct inet6_ifaddr *ifp) | ||
242 | { | ||
243 | atomic_inc(&ifp->refcnt); | ||
244 | } | ||
221 | 245 | ||
222 | 246 | ||
223 | /* | 247 | /* |
@@ -240,23 +264,21 @@ static inline int ipv6_addr_is_multicast(const struct in6_addr *addr) | |||
240 | 264 | ||
241 | static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) | 265 | static inline int ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr) |
242 | { | 266 | { |
243 | return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | | 267 | return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | |
244 | addr->s6_addr32[1] | addr->s6_addr32[2] | | 268 | addr->s6_addr32[1] | addr->s6_addr32[2] | |
245 | (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0); | 269 | (addr->s6_addr32[3] ^ htonl(0x00000001))) == 0; |
246 | } | 270 | } |
247 | 271 | ||
248 | static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) | 272 | static inline int ipv6_addr_is_ll_all_routers(const struct in6_addr *addr) |
249 | { | 273 | { |
250 | return (((addr->s6_addr32[0] ^ htonl(0xff020000)) | | 274 | return ((addr->s6_addr32[0] ^ htonl(0xff020000)) | |
251 | addr->s6_addr32[1] | addr->s6_addr32[2] | | 275 | addr->s6_addr32[1] | addr->s6_addr32[2] | |
252 | (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0); | 276 | (addr->s6_addr32[3] ^ htonl(0x00000002))) == 0; |
253 | } | 277 | } |
254 | 278 | ||
255 | extern int __ipv6_isatap_ifid(u8 *eui, __be32 addr); | ||
256 | |||
257 | static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) | 279 | static inline int ipv6_addr_is_isatap(const struct in6_addr *addr) |
258 | { | 280 | { |
259 | return ((addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE)); | 281 | return (addr->s6_addr32[2] | htonl(0x02000000)) == htonl(0x02005EFE); |
260 | } | 282 | } |
261 | 283 | ||
262 | #ifdef CONFIG_PROC_FS | 284 | #ifdef CONFIG_PROC_FS |
diff --git a/include/net/arp.h b/include/net/arp.h index 716f43c5c98e..f4cf6ce66586 100644 --- a/include/net/arp.h +++ b/include/net/arp.h | |||
@@ -26,6 +26,4 @@ extern struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip, | |||
26 | const unsigned char *target_hw); | 26 | const unsigned char *target_hw); |
27 | extern void arp_xmit(struct sk_buff *skb); | 27 | extern void arp_xmit(struct sk_buff *skb); |
28 | 28 | ||
29 | extern const struct neigh_ops arp_broken_ops; | ||
30 | |||
31 | #endif /* _ARP_H */ | 29 | #endif /* _ARP_H */ |
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index 27a902d9b3a9..d81ea7997701 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h | |||
@@ -126,6 +126,8 @@ int bt_sock_unregister(int proto); | |||
126 | void bt_sock_link(struct bt_sock_list *l, struct sock *s); | 126 | void bt_sock_link(struct bt_sock_list *l, struct sock *s); |
127 | void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); | 127 | void bt_sock_unlink(struct bt_sock_list *l, struct sock *s); |
128 | int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); | 128 | int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags); |
129 | int bt_sock_stream_recvmsg(struct kiocb *iocb, struct socket *sock, | ||
130 | struct msghdr *msg, size_t len, int flags); | ||
129 | uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); | 131 | uint bt_sock_poll(struct file * file, struct socket *sock, poll_table *wait); |
130 | int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); | 132 | int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg); |
131 | int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); | 133 | int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo); |
@@ -161,12 +163,30 @@ static inline struct sk_buff *bt_skb_send_alloc(struct sock *sk, unsigned long l | |||
161 | { | 163 | { |
162 | struct sk_buff *skb; | 164 | struct sk_buff *skb; |
163 | 165 | ||
166 | release_sock(sk); | ||
164 | if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { | 167 | if ((skb = sock_alloc_send_skb(sk, len + BT_SKB_RESERVE, nb, err))) { |
165 | skb_reserve(skb, BT_SKB_RESERVE); | 168 | skb_reserve(skb, BT_SKB_RESERVE); |
166 | bt_cb(skb)->incoming = 0; | 169 | bt_cb(skb)->incoming = 0; |
167 | } | 170 | } |
171 | lock_sock(sk); | ||
172 | |||
173 | if (!skb && *err) | ||
174 | return NULL; | ||
175 | |||
176 | *err = sock_error(sk); | ||
177 | if (*err) | ||
178 | goto out; | ||
179 | |||
180 | if (sk->sk_shutdown) { | ||
181 | *err = -ECONNRESET; | ||
182 | goto out; | ||
183 | } | ||
168 | 184 | ||
169 | return skb; | 185 | return skb; |
186 | |||
187 | out: | ||
188 | kfree_skb(skb); | ||
189 | return NULL; | ||
170 | } | 190 | } |
171 | 191 | ||
172 | int bt_err(__u16 code); | 192 | int bt_err(__u16 code); |
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index bcbdd6d4e6dd..e30e00834340 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h | |||
@@ -54,7 +54,7 @@ | |||
54 | 54 | ||
55 | /* HCI controller types */ | 55 | /* HCI controller types */ |
56 | #define HCI_BREDR 0x00 | 56 | #define HCI_BREDR 0x00 |
57 | #define HCI_80211 0x01 | 57 | #define HCI_AMP 0x01 |
58 | 58 | ||
59 | /* HCI device quirks */ | 59 | /* HCI device quirks */ |
60 | enum { | 60 | enum { |
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index 4568b938ca35..ebec8c9a929d 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h | |||
@@ -233,7 +233,7 @@ static inline void inquiry_cache_init(struct hci_dev *hdev) | |||
233 | static inline int inquiry_cache_empty(struct hci_dev *hdev) | 233 | static inline int inquiry_cache_empty(struct hci_dev *hdev) |
234 | { | 234 | { |
235 | struct inquiry_cache *c = &hdev->inq_cache; | 235 | struct inquiry_cache *c = &hdev->inq_cache; |
236 | return (c->list == NULL); | 236 | return c->list == NULL; |
237 | } | 237 | } |
238 | 238 | ||
239 | static inline long inquiry_cache_age(struct hci_dev *hdev) | 239 | static inline long inquiry_cache_age(struct hci_dev *hdev) |
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 6c241444f902..c819c8bf9b68 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h | |||
@@ -414,7 +414,7 @@ static inline int l2cap_tx_window_full(struct sock *sk) | |||
414 | if (sub < 0) | 414 | if (sub < 0) |
415 | sub += 64; | 415 | sub += 64; |
416 | 416 | ||
417 | return (sub == pi->remote_tx_win); | 417 | return sub == pi->remote_tx_win; |
418 | } | 418 | } |
419 | 419 | ||
420 | #define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1 | 420 | #define __get_txseq(ctrl) ((ctrl) & L2CAP_CTRL_TXSEQ) >> 1 |
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h index a140847d622c..71047bc0af84 100644 --- a/include/net/bluetooth/rfcomm.h +++ b/include/net/bluetooth/rfcomm.h | |||
@@ -213,11 +213,6 @@ struct rfcomm_dlc { | |||
213 | #define RFCOMM_DEFER_SETUP 8 | 213 | #define RFCOMM_DEFER_SETUP 8 |
214 | 214 | ||
215 | /* Scheduling flags and events */ | 215 | /* Scheduling flags and events */ |
216 | #define RFCOMM_SCHED_STATE 0 | ||
217 | #define RFCOMM_SCHED_RX 1 | ||
218 | #define RFCOMM_SCHED_TX 2 | ||
219 | #define RFCOMM_SCHED_TIMEO 3 | ||
220 | #define RFCOMM_SCHED_AUTH 4 | ||
221 | #define RFCOMM_SCHED_WAKEUP 31 | 216 | #define RFCOMM_SCHED_WAKEUP 31 |
222 | 217 | ||
223 | /* MSC exchange flags */ | 218 | /* MSC exchange flags */ |
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 2fd06c60ffbb..2a7936d7851d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h | |||
@@ -25,6 +25,43 @@ | |||
25 | #include <linux/wireless.h> | 25 | #include <linux/wireless.h> |
26 | 26 | ||
27 | 27 | ||
28 | /** | ||
29 | * DOC: Introduction | ||
30 | * | ||
31 | * cfg80211 is the configuration API for 802.11 devices in Linux. It bridges | ||
32 | * userspace and drivers, and offers some utility functionality associated | ||
33 | * with 802.11. cfg80211 must, directly or indirectly via mac80211, be used | ||
34 | * by all modern wireless drivers in Linux, so that they offer a consistent | ||
35 | * API through nl80211. For backward compatibility, cfg80211 also offers | ||
36 | * wireless extensions to userspace, but hides them from drivers completely. | ||
37 | * | ||
38 | * Additionally, cfg80211 contains code to help enforce regulatory spectrum | ||
39 | * use restrictions. | ||
40 | */ | ||
41 | |||
42 | |||
43 | /** | ||
44 | * DOC: Device registration | ||
45 | * | ||
46 | * In order for a driver to use cfg80211, it must register the hardware device | ||
47 | * with cfg80211. This happens through a number of hardware capability structs | ||
48 | * described below. | ||
49 | * | ||
50 | * The fundamental structure for each device is the 'wiphy', of which each | ||
51 | * instance describes a physical wireless device connected to the system. Each | ||
52 | * such wiphy can have zero, one, or many virtual interfaces associated with | ||
53 | * it, which need to be identified as such by pointing the network interface's | ||
54 | * @ieee80211_ptr pointer to a &struct wireless_dev which further describes | ||
55 | * the wireless part of the interface, normally this struct is embedded in the | ||
56 | * network interface's private data area. Drivers can optionally allow creating | ||
57 | * or destroying virtual interfaces on the fly, but without at least one or the | ||
58 | * ability to create some the wireless device isn't useful. | ||
59 | * | ||
60 | * Each wiphy structure contains device capability information, and also has | ||
61 | * a pointer to the various operations the driver offers. The definitions and | ||
62 | * structures here describe these capabilities in detail. | ||
63 | */ | ||
64 | |||
28 | /* | 65 | /* |
29 | * wireless hardware capability structures | 66 | * wireless hardware capability structures |
30 | */ | 67 | */ |
@@ -205,6 +242,21 @@ struct ieee80211_supported_band { | |||
205 | */ | 242 | */ |
206 | 243 | ||
207 | /** | 244 | /** |
245 | * DOC: Actions and configuration | ||
246 | * | ||
247 | * Each wireless device and each virtual interface offer a set of configuration | ||
248 | * operations and other actions that are invoked by userspace. Each of these | ||
249 | * actions is described in the operations structure, and the parameters these | ||
250 | * operations use are described separately. | ||
251 | * | ||
252 | * Additionally, some operations are asynchronous and expect to get status | ||
253 | * information via some functions that drivers need to call. | ||
254 | * | ||
255 | * Scanning and BSS list handling with its associated functionality is described | ||
256 | * in a separate chapter. | ||
257 | */ | ||
258 | |||
259 | /** | ||
208 | * struct vif_params - describes virtual interface parameters | 260 | * struct vif_params - describes virtual interface parameters |
209 | * @mesh_id: mesh ID to use | 261 | * @mesh_id: mesh ID to use |
210 | * @mesh_id_len: length of the mesh ID | 262 | * @mesh_id_len: length of the mesh ID |
@@ -241,12 +293,24 @@ struct key_params { | |||
241 | * enum survey_info_flags - survey information flags | 293 | * enum survey_info_flags - survey information flags |
242 | * | 294 | * |
243 | * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in | 295 | * @SURVEY_INFO_NOISE_DBM: noise (in dBm) was filled in |
296 | * @SURVEY_INFO_IN_USE: channel is currently being used | ||
297 | * @SURVEY_INFO_CHANNEL_TIME: channel active time (in ms) was filled in | ||
298 | * @SURVEY_INFO_CHANNEL_TIME_BUSY: channel busy time was filled in | ||
299 | * @SURVEY_INFO_CHANNEL_TIME_EXT_BUSY: extension channel busy time was filled in | ||
300 | * @SURVEY_INFO_CHANNEL_TIME_RX: channel receive time was filled in | ||
301 | * @SURVEY_INFO_CHANNEL_TIME_TX: channel transmit time was filled in | ||
244 | * | 302 | * |
245 | * Used by the driver to indicate which info in &struct survey_info | 303 | * Used by the driver to indicate which info in &struct survey_info |
246 | * it has filled in during the get_survey(). | 304 | * it has filled in during the get_survey(). |
247 | */ | 305 | */ |
248 | enum survey_info_flags { | 306 | enum survey_info_flags { |
249 | SURVEY_INFO_NOISE_DBM = 1<<0, | 307 | SURVEY_INFO_NOISE_DBM = 1<<0, |
308 | SURVEY_INFO_IN_USE = 1<<1, | ||
309 | SURVEY_INFO_CHANNEL_TIME = 1<<2, | ||
310 | SURVEY_INFO_CHANNEL_TIME_BUSY = 1<<3, | ||
311 | SURVEY_INFO_CHANNEL_TIME_EXT_BUSY = 1<<4, | ||
312 | SURVEY_INFO_CHANNEL_TIME_RX = 1<<5, | ||
313 | SURVEY_INFO_CHANNEL_TIME_TX = 1<<6, | ||
250 | }; | 314 | }; |
251 | 315 | ||
252 | /** | 316 | /** |
@@ -256,6 +320,11 @@ enum survey_info_flags { | |||
256 | * @filled: bitflag of flags from &enum survey_info_flags | 320 | * @filled: bitflag of flags from &enum survey_info_flags |
257 | * @noise: channel noise in dBm. This and all following fields are | 321 | * @noise: channel noise in dBm. This and all following fields are |
258 | * optional | 322 | * optional |
323 | * @channel_time: amount of time in ms the radio spent on the channel | ||
324 | * @channel_time_busy: amount of time the primary channel was sensed busy | ||
325 | * @channel_time_ext_busy: amount of time the extension channel was sensed busy | ||
326 | * @channel_time_rx: amount of time the radio spent receiving data | ||
327 | * @channel_time_tx: amount of time the radio spent transmitting data | ||
259 | * | 328 | * |
260 | * Used by dump_survey() to report back per-channel survey information. | 329 | * Used by dump_survey() to report back per-channel survey information. |
261 | * | 330 | * |
@@ -264,6 +333,11 @@ enum survey_info_flags { | |||
264 | */ | 333 | */ |
265 | struct survey_info { | 334 | struct survey_info { |
266 | struct ieee80211_channel *channel; | 335 | struct ieee80211_channel *channel; |
336 | u64 channel_time; | ||
337 | u64 channel_time_busy; | ||
338 | u64 channel_time_ext_busy; | ||
339 | u64 channel_time_rx; | ||
340 | u64 channel_time_tx; | ||
267 | u32 filled; | 341 | u32 filled; |
268 | s8 noise; | 342 | s8 noise; |
269 | }; | 343 | }; |
@@ -347,6 +421,9 @@ struct station_parameters { | |||
347 | * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) | 421 | * (tx_bitrate, tx_bitrate_flags and tx_bitrate_mcs) |
348 | * @STATION_INFO_RX_PACKETS: @rx_packets filled | 422 | * @STATION_INFO_RX_PACKETS: @rx_packets filled |
349 | * @STATION_INFO_TX_PACKETS: @tx_packets filled | 423 | * @STATION_INFO_TX_PACKETS: @tx_packets filled |
424 | * @STATION_INFO_TX_RETRIES: @tx_retries filled | ||
425 | * @STATION_INFO_TX_FAILED: @tx_failed filled | ||
426 | * @STATION_INFO_RX_DROP_MISC: @rx_dropped_misc filled | ||
350 | */ | 427 | */ |
351 | enum station_info_flags { | 428 | enum station_info_flags { |
352 | STATION_INFO_INACTIVE_TIME = 1<<0, | 429 | STATION_INFO_INACTIVE_TIME = 1<<0, |
@@ -359,6 +436,9 @@ enum station_info_flags { | |||
359 | STATION_INFO_TX_BITRATE = 1<<7, | 436 | STATION_INFO_TX_BITRATE = 1<<7, |
360 | STATION_INFO_RX_PACKETS = 1<<8, | 437 | STATION_INFO_RX_PACKETS = 1<<8, |
361 | STATION_INFO_TX_PACKETS = 1<<9, | 438 | STATION_INFO_TX_PACKETS = 1<<9, |
439 | STATION_INFO_TX_RETRIES = 1<<10, | ||
440 | STATION_INFO_TX_FAILED = 1<<11, | ||
441 | STATION_INFO_RX_DROP_MISC = 1<<12, | ||
362 | }; | 442 | }; |
363 | 443 | ||
364 | /** | 444 | /** |
@@ -408,6 +488,9 @@ struct rate_info { | |||
408 | * @txrate: current unicast bitrate to this station | 488 | * @txrate: current unicast bitrate to this station |
409 | * @rx_packets: packets received from this station | 489 | * @rx_packets: packets received from this station |
410 | * @tx_packets: packets transmitted to this station | 490 | * @tx_packets: packets transmitted to this station |
491 | * @tx_retries: cumulative retry counts | ||
492 | * @tx_failed: number of failed transmissions (retries exceeded, no ACK) | ||
493 | * @rx_dropped_misc: Dropped for un-specified reason. | ||
411 | * @generation: generation number for nl80211 dumps. | 494 | * @generation: generation number for nl80211 dumps. |
412 | * This number should increase every time the list of stations | 495 | * This number should increase every time the list of stations |
413 | * changes, i.e. when a station is added or removed, so that | 496 | * changes, i.e. when a station is added or removed, so that |
@@ -425,6 +508,9 @@ struct station_info { | |||
425 | struct rate_info txrate; | 508 | struct rate_info txrate; |
426 | u32 rx_packets; | 509 | u32 rx_packets; |
427 | u32 tx_packets; | 510 | u32 tx_packets; |
511 | u32 tx_retries; | ||
512 | u32 tx_failed; | ||
513 | u32 rx_dropped_misc; | ||
428 | 514 | ||
429 | int generation; | 515 | int generation; |
430 | }; | 516 | }; |
@@ -570,8 +656,28 @@ struct ieee80211_txq_params { | |||
570 | /* from net/wireless.h */ | 656 | /* from net/wireless.h */ |
571 | struct wiphy; | 657 | struct wiphy; |
572 | 658 | ||
573 | /* from net/ieee80211.h */ | 659 | /** |
574 | struct ieee80211_channel; | 660 | * DOC: Scanning and BSS list handling |
661 | * | ||
662 | * The scanning process itself is fairly simple, but cfg80211 offers quite | ||
663 | * a bit of helper functionality. To start a scan, the scan operation will | ||
664 | * be invoked with a scan definition. This scan definition contains the | ||
665 | * channels to scan, and the SSIDs to send probe requests for (including the | ||
666 | * wildcard, if desired). A passive scan is indicated by having no SSIDs to | ||
667 | * probe. Additionally, a scan request may contain extra information elements | ||
668 | * that should be added to the probe request. The IEs are guaranteed to be | ||
669 | * well-formed, and will not exceed the maximum length the driver advertised | ||
670 | * in the wiphy structure. | ||
671 | * | ||
672 | * When scanning finds a BSS, cfg80211 needs to be notified of that, because | ||
673 | * it is responsible for maintaining the BSS list; the driver should not | ||
674 | * maintain a list itself. For this notification, various functions exist. | ||
675 | * | ||
676 | * Since drivers do not maintain a BSS list, there are also a number of | ||
677 | * functions to search for a BSS and obtain information about it from the | ||
678 | * BSS structure cfg80211 maintains. The BSS list is also made available | ||
679 | * to userspace. | ||
680 | */ | ||
575 | 681 | ||
576 | /** | 682 | /** |
577 | * struct cfg80211_ssid - SSID description | 683 | * struct cfg80211_ssid - SSID description |
@@ -691,6 +797,10 @@ const u8 *ieee80211_bss_get_ie(struct cfg80211_bss *bss, u8 ie); | |||
691 | * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is | 797 | * sets/clears %NL80211_STA_FLAG_AUTHORIZED. If true, the driver is |
692 | * required to assume that the port is unauthorized until authorized by | 798 | * required to assume that the port is unauthorized until authorized by |
693 | * user space. Otherwise, port is marked authorized by default. | 799 | * user space. Otherwise, port is marked authorized by default. |
800 | * @control_port_ethertype: the control port protocol that should be | ||
801 | * allowed through even on unauthorized ports | ||
802 | * @control_port_no_encrypt: TRUE to prevent encryption of control port | ||
803 | * protocol frames. | ||
694 | */ | 804 | */ |
695 | struct cfg80211_crypto_settings { | 805 | struct cfg80211_crypto_settings { |
696 | u32 wpa_versions; | 806 | u32 wpa_versions; |
@@ -700,6 +810,8 @@ struct cfg80211_crypto_settings { | |||
700 | int n_akm_suites; | 810 | int n_akm_suites; |
701 | u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; | 811 | u32 akm_suites[NL80211_MAX_NR_AKM_SUITES]; |
702 | bool control_port; | 812 | bool control_port; |
813 | __be16 control_port_ethertype; | ||
814 | bool control_port_no_encrypt; | ||
703 | }; | 815 | }; |
704 | 816 | ||
705 | /** | 817 | /** |
@@ -1020,7 +1132,7 @@ struct cfg80211_pmksa { | |||
1020 | * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. | 1132 | * @cancel_remain_on_channel: Cancel an on-going remain-on-channel operation. |
1021 | * This allows the operation to be terminated prior to timeout based on | 1133 | * This allows the operation to be terminated prior to timeout based on |
1022 | * the duration value. | 1134 | * the duration value. |
1023 | * @action: Transmit an action frame | 1135 | * @mgmt_tx: Transmit a management frame |
1024 | * | 1136 | * |
1025 | * @testmode_cmd: run a test mode command | 1137 | * @testmode_cmd: run a test mode command |
1026 | * | 1138 | * |
@@ -1035,6 +1147,9 @@ struct cfg80211_pmksa { | |||
1035 | * allows the driver to adjust the dynamic ps timeout value. | 1147 | * allows the driver to adjust the dynamic ps timeout value. |
1036 | * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. | 1148 | * @set_cqm_rssi_config: Configure connection quality monitor RSSI threshold. |
1037 | * | 1149 | * |
1150 | * @mgmt_frame_register: Notify driver that a management frame type was | ||
1151 | * registered. Note that this callback may not sleep, and cannot run | ||
1152 | * concurrently with itself. | ||
1038 | */ | 1153 | */ |
1039 | struct cfg80211_ops { | 1154 | struct cfg80211_ops { |
1040 | int (*suspend)(struct wiphy *wiphy); | 1155 | int (*suspend)(struct wiphy *wiphy); |
@@ -1050,13 +1165,14 @@ struct cfg80211_ops { | |||
1050 | struct vif_params *params); | 1165 | struct vif_params *params); |
1051 | 1166 | ||
1052 | int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, | 1167 | int (*add_key)(struct wiphy *wiphy, struct net_device *netdev, |
1053 | u8 key_index, const u8 *mac_addr, | 1168 | u8 key_index, bool pairwise, const u8 *mac_addr, |
1054 | struct key_params *params); | 1169 | struct key_params *params); |
1055 | int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, | 1170 | int (*get_key)(struct wiphy *wiphy, struct net_device *netdev, |
1056 | u8 key_index, const u8 *mac_addr, void *cookie, | 1171 | u8 key_index, bool pairwise, const u8 *mac_addr, |
1172 | void *cookie, | ||
1057 | void (*callback)(void *cookie, struct key_params*)); | 1173 | void (*callback)(void *cookie, struct key_params*)); |
1058 | int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, | 1174 | int (*del_key)(struct wiphy *wiphy, struct net_device *netdev, |
1059 | u8 key_index, const u8 *mac_addr); | 1175 | u8 key_index, bool pairwise, const u8 *mac_addr); |
1060 | int (*set_default_key)(struct wiphy *wiphy, | 1176 | int (*set_default_key)(struct wiphy *wiphy, |
1061 | struct net_device *netdev, | 1177 | struct net_device *netdev, |
1062 | u8 key_index); | 1178 | u8 key_index); |
@@ -1140,7 +1256,7 @@ struct cfg80211_ops { | |||
1140 | int (*get_tx_power)(struct wiphy *wiphy, int *dbm); | 1256 | int (*get_tx_power)(struct wiphy *wiphy, int *dbm); |
1141 | 1257 | ||
1142 | int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, | 1258 | int (*set_wds_peer)(struct wiphy *wiphy, struct net_device *dev, |
1143 | u8 *addr); | 1259 | const u8 *addr); |
1144 | 1260 | ||
1145 | void (*rfkill_poll)(struct wiphy *wiphy); | 1261 | void (*rfkill_poll)(struct wiphy *wiphy); |
1146 | 1262 | ||
@@ -1172,7 +1288,7 @@ struct cfg80211_ops { | |||
1172 | struct net_device *dev, | 1288 | struct net_device *dev, |
1173 | u64 cookie); | 1289 | u64 cookie); |
1174 | 1290 | ||
1175 | int (*action)(struct wiphy *wiphy, struct net_device *dev, | 1291 | int (*mgmt_tx)(struct wiphy *wiphy, struct net_device *dev, |
1176 | struct ieee80211_channel *chan, | 1292 | struct ieee80211_channel *chan, |
1177 | enum nl80211_channel_type channel_type, | 1293 | enum nl80211_channel_type channel_type, |
1178 | bool channel_type_valid, | 1294 | bool channel_type_valid, |
@@ -1184,6 +1300,10 @@ struct cfg80211_ops { | |||
1184 | int (*set_cqm_rssi_config)(struct wiphy *wiphy, | 1300 | int (*set_cqm_rssi_config)(struct wiphy *wiphy, |
1185 | struct net_device *dev, | 1301 | struct net_device *dev, |
1186 | s32 rssi_thold, u32 rssi_hyst); | 1302 | s32 rssi_thold, u32 rssi_hyst); |
1303 | |||
1304 | void (*mgmt_frame_register)(struct wiphy *wiphy, | ||
1305 | struct net_device *dev, | ||
1306 | u16 frame_type, bool reg); | ||
1187 | }; | 1307 | }; |
1188 | 1308 | ||
1189 | /* | 1309 | /* |
@@ -1221,21 +1341,31 @@ struct cfg80211_ops { | |||
1221 | * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station | 1341 | * @WIPHY_FLAG_4ADDR_AP: supports 4addr mode even on AP (with a single station |
1222 | * on a VLAN interface) | 1342 | * on a VLAN interface) |
1223 | * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station | 1343 | * @WIPHY_FLAG_4ADDR_STATION: supports 4addr mode even as a station |
1344 | * @WIPHY_FLAG_CONTROL_PORT_PROTOCOL: This device supports setting the | ||
1345 | * control port protocol ethertype. The device also honours the | ||
1346 | * control_port_no_encrypt flag. | ||
1347 | * @WIPHY_FLAG_IBSS_RSN: The device supports IBSS RSN. | ||
1224 | */ | 1348 | */ |
1225 | enum wiphy_flags { | 1349 | enum wiphy_flags { |
1226 | WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), | 1350 | WIPHY_FLAG_CUSTOM_REGULATORY = BIT(0), |
1227 | WIPHY_FLAG_STRICT_REGULATORY = BIT(1), | 1351 | WIPHY_FLAG_STRICT_REGULATORY = BIT(1), |
1228 | WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2), | 1352 | WIPHY_FLAG_DISABLE_BEACON_HINTS = BIT(2), |
1229 | WIPHY_FLAG_NETNS_OK = BIT(3), | 1353 | WIPHY_FLAG_NETNS_OK = BIT(3), |
1230 | WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), | 1354 | WIPHY_FLAG_PS_ON_BY_DEFAULT = BIT(4), |
1231 | WIPHY_FLAG_4ADDR_AP = BIT(5), | 1355 | WIPHY_FLAG_4ADDR_AP = BIT(5), |
1232 | WIPHY_FLAG_4ADDR_STATION = BIT(6), | 1356 | WIPHY_FLAG_4ADDR_STATION = BIT(6), |
1357 | WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7), | ||
1358 | WIPHY_FLAG_IBSS_RSN = BIT(7), | ||
1233 | }; | 1359 | }; |
1234 | 1360 | ||
1235 | struct mac_address { | 1361 | struct mac_address { |
1236 | u8 addr[ETH_ALEN]; | 1362 | u8 addr[ETH_ALEN]; |
1237 | }; | 1363 | }; |
1238 | 1364 | ||
1365 | struct ieee80211_txrx_stypes { | ||
1366 | u16 tx, rx; | ||
1367 | }; | ||
1368 | |||
1239 | /** | 1369 | /** |
1240 | * struct wiphy - wireless hardware description | 1370 | * struct wiphy - wireless hardware description |
1241 | * @reg_notifier: the driver's regulatory notification callback | 1371 | * @reg_notifier: the driver's regulatory notification callback |
@@ -1286,6 +1416,10 @@ struct mac_address { | |||
1286 | * @privid: a pointer that drivers can use to identify if an arbitrary | 1416 | * @privid: a pointer that drivers can use to identify if an arbitrary |
1287 | * wiphy is theirs, e.g. in global notifiers | 1417 | * wiphy is theirs, e.g. in global notifiers |
1288 | * @bands: information about bands/channels supported by this device | 1418 | * @bands: information about bands/channels supported by this device |
1419 | * | ||
1420 | * @mgmt_stypes: bitmasks of frame subtypes that can be subscribed to or | ||
1421 | * transmitted through nl80211, points to an array indexed by interface | ||
1422 | * type | ||
1289 | */ | 1423 | */ |
1290 | struct wiphy { | 1424 | struct wiphy { |
1291 | /* assign these fields before you register the wiphy */ | 1425 | /* assign these fields before you register the wiphy */ |
@@ -1294,9 +1428,12 @@ struct wiphy { | |||
1294 | u8 perm_addr[ETH_ALEN]; | 1428 | u8 perm_addr[ETH_ALEN]; |
1295 | u8 addr_mask[ETH_ALEN]; | 1429 | u8 addr_mask[ETH_ALEN]; |
1296 | 1430 | ||
1297 | u16 n_addresses; | ||
1298 | struct mac_address *addresses; | 1431 | struct mac_address *addresses; |
1299 | 1432 | ||
1433 | const struct ieee80211_txrx_stypes *mgmt_stypes; | ||
1434 | |||
1435 | u16 n_addresses; | ||
1436 | |||
1300 | /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ | 1437 | /* Supported interface modes, OR together BIT(NL80211_IFTYPE_...) */ |
1301 | u16 interface_modes; | 1438 | u16 interface_modes; |
1302 | 1439 | ||
@@ -1492,8 +1629,8 @@ struct cfg80211_cached_keys; | |||
1492 | * set by driver (if supported) on add_interface BEFORE registering the | 1629 | * set by driver (if supported) on add_interface BEFORE registering the |
1493 | * netdev and may otherwise be used by driver read-only, will be update | 1630 | * netdev and may otherwise be used by driver read-only, will be update |
1494 | * by cfg80211 on change_interface | 1631 | * by cfg80211 on change_interface |
1495 | * @action_registrations: list of registrations for action frames | 1632 | * @mgmt_registrations: list of registrations for management frames |
1496 | * @action_registrations_lock: lock for the list | 1633 | * @mgmt_registrations_lock: lock for the list |
1497 | * @mtx: mutex used to lock data in this struct | 1634 | * @mtx: mutex used to lock data in this struct |
1498 | * @cleanup_work: work struct used for cleanup that can't be done directly | 1635 | * @cleanup_work: work struct used for cleanup that can't be done directly |
1499 | */ | 1636 | */ |
@@ -1505,8 +1642,8 @@ struct wireless_dev { | |||
1505 | struct list_head list; | 1642 | struct list_head list; |
1506 | struct net_device *netdev; | 1643 | struct net_device *netdev; |
1507 | 1644 | ||
1508 | struct list_head action_registrations; | 1645 | struct list_head mgmt_registrations; |
1509 | spinlock_t action_registrations_lock; | 1646 | spinlock_t mgmt_registrations_lock; |
1510 | 1647 | ||
1511 | struct mutex mtx; | 1648 | struct mutex mtx; |
1512 | 1649 | ||
@@ -1563,8 +1700,10 @@ static inline void *wdev_priv(struct wireless_dev *wdev) | |||
1563 | return wiphy_priv(wdev->wiphy); | 1700 | return wiphy_priv(wdev->wiphy); |
1564 | } | 1701 | } |
1565 | 1702 | ||
1566 | /* | 1703 | /** |
1567 | * Utility functions | 1704 | * DOC: Utility functions |
1705 | * | ||
1706 | * cfg80211 offers a number of utility functions that can be useful. | ||
1568 | */ | 1707 | */ |
1569 | 1708 | ||
1570 | /** | 1709 | /** |
@@ -1715,7 +1854,15 @@ unsigned int ieee80211_get_hdrlen_from_skb(const struct sk_buff *skb); | |||
1715 | * ieee80211_hdrlen - get header length in bytes from frame control | 1854 | * ieee80211_hdrlen - get header length in bytes from frame control |
1716 | * @fc: frame control field in little-endian format | 1855 | * @fc: frame control field in little-endian format |
1717 | */ | 1856 | */ |
1718 | unsigned int ieee80211_hdrlen(__le16 fc); | 1857 | unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc); |
1858 | |||
1859 | /** | ||
1860 | * DOC: Data path helpers | ||
1861 | * | ||
1862 | * In addition to generic utilities, cfg80211 also offers | ||
1863 | * functions that help implement the data path for devices | ||
1864 | * that do not do the 802.11/802.3 conversion on the device. | ||
1865 | */ | ||
1719 | 1866 | ||
1720 | /** | 1867 | /** |
1721 | * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 | 1868 | * ieee80211_data_to_8023 - convert an 802.11 data frame to 802.3 |
@@ -1777,8 +1924,10 @@ unsigned int cfg80211_classify8021d(struct sk_buff *skb); | |||
1777 | */ | 1924 | */ |
1778 | const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); | 1925 | const u8 *cfg80211_find_ie(u8 eid, const u8 *ies, int len); |
1779 | 1926 | ||
1780 | /* | 1927 | /** |
1781 | * Regulatory helper functions for wiphys | 1928 | * DOC: Regulatory enforcement infrastructure |
1929 | * | ||
1930 | * TODO | ||
1782 | */ | 1931 | */ |
1783 | 1932 | ||
1784 | /** | 1933 | /** |
@@ -2181,6 +2330,20 @@ void cfg80211_michael_mic_failure(struct net_device *dev, const u8 *addr, | |||
2181 | void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp); | 2330 | void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp); |
2182 | 2331 | ||
2183 | /** | 2332 | /** |
2333 | * DOC: RFkill integration | ||
2334 | * | ||
2335 | * RFkill integration in cfg80211 is almost invisible to drivers, | ||
2336 | * as cfg80211 automatically registers an rfkill instance for each | ||
2337 | * wireless device it knows about. Soft kill is also translated | ||
2338 | * into disconnecting and turning all interfaces off, drivers are | ||
2339 | * expected to turn off the device when all interfaces are down. | ||
2340 | * | ||
2341 | * However, devices may have a hard RFkill line, in which case they | ||
2342 | * also need to interact with the rfkill subsystem, via cfg80211. | ||
2343 | * They can do this with a few helper functions documented here. | ||
2344 | */ | ||
2345 | |||
2346 | /** | ||
2184 | * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state | 2347 | * wiphy_rfkill_set_hw_state - notify cfg80211 about hw block state |
2185 | * @wiphy: the wiphy | 2348 | * @wiphy: the wiphy |
2186 | * @blocked: block status | 2349 | * @blocked: block status |
@@ -2201,6 +2364,17 @@ void wiphy_rfkill_stop_polling(struct wiphy *wiphy); | |||
2201 | 2364 | ||
2202 | #ifdef CONFIG_NL80211_TESTMODE | 2365 | #ifdef CONFIG_NL80211_TESTMODE |
2203 | /** | 2366 | /** |
2367 | * DOC: Test mode | ||
2368 | * | ||
2369 | * Test mode is a set of utility functions to allow drivers to | ||
2370 | * interact with driver-specific tools to aid, for instance, | ||
2371 | * factory programming. | ||
2372 | * | ||
2373 | * This chapter describes how drivers interact with it, for more | ||
2374 | * information see the nl80211 book's chapter on it. | ||
2375 | */ | ||
2376 | |||
2377 | /** | ||
2204 | * cfg80211_testmode_alloc_reply_skb - allocate testmode reply | 2378 | * cfg80211_testmode_alloc_reply_skb - allocate testmode reply |
2205 | * @wiphy: the wiphy | 2379 | * @wiphy: the wiphy |
2206 | * @approxlen: an upper bound of the length of the data that will | 2380 | * @approxlen: an upper bound of the length of the data that will |
@@ -2373,38 +2547,39 @@ void cfg80211_new_sta(struct net_device *dev, const u8 *mac_addr, | |||
2373 | struct station_info *sinfo, gfp_t gfp); | 2547 | struct station_info *sinfo, gfp_t gfp); |
2374 | 2548 | ||
2375 | /** | 2549 | /** |
2376 | * cfg80211_rx_action - notification of received, unprocessed Action frame | 2550 | * cfg80211_rx_mgmt - notification of received, unprocessed management frame |
2377 | * @dev: network device | 2551 | * @dev: network device |
2378 | * @freq: Frequency on which the frame was received in MHz | 2552 | * @freq: Frequency on which the frame was received in MHz |
2379 | * @buf: Action frame (header + body) | 2553 | * @buf: Management frame (header + body) |
2380 | * @len: length of the frame data | 2554 | * @len: length of the frame data |
2381 | * @gfp: context flags | 2555 | * @gfp: context flags |
2382 | * Returns %true if a user space application is responsible for rejecting the | 2556 | * |
2383 | * unrecognized Action frame; %false if no such application is registered | 2557 | * Returns %true if a user space application has registered for this frame. |
2384 | * (i.e., the driver is responsible for rejecting the unrecognized Action | 2558 | * For action frames, that makes it responsible for rejecting unrecognized |
2385 | * frame) | 2559 | * action frames; %false otherwise, in which case for action frames the |
2560 | * driver is responsible for rejecting the frame. | ||
2386 | * | 2561 | * |
2387 | * This function is called whenever an Action frame is received for a station | 2562 | * This function is called whenever an Action frame is received for a station |
2388 | * mode interface, but is not processed in kernel. | 2563 | * mode interface, but is not processed in kernel. |
2389 | */ | 2564 | */ |
2390 | bool cfg80211_rx_action(struct net_device *dev, int freq, const u8 *buf, | 2565 | bool cfg80211_rx_mgmt(struct net_device *dev, int freq, const u8 *buf, |
2391 | size_t len, gfp_t gfp); | 2566 | size_t len, gfp_t gfp); |
2392 | 2567 | ||
2393 | /** | 2568 | /** |
2394 | * cfg80211_action_tx_status - notification of TX status for Action frame | 2569 | * cfg80211_mgmt_tx_status - notification of TX status for management frame |
2395 | * @dev: network device | 2570 | * @dev: network device |
2396 | * @cookie: Cookie returned by cfg80211_ops::action() | 2571 | * @cookie: Cookie returned by cfg80211_ops::mgmt_tx() |
2397 | * @buf: Action frame (header + body) | 2572 | * @buf: Management frame (header + body) |
2398 | * @len: length of the frame data | 2573 | * @len: length of the frame data |
2399 | * @ack: Whether frame was acknowledged | 2574 | * @ack: Whether frame was acknowledged |
2400 | * @gfp: context flags | 2575 | * @gfp: context flags |
2401 | * | 2576 | * |
2402 | * This function is called whenever an Action frame was requested to be | 2577 | * This function is called whenever a management frame was requested to be |
2403 | * transmitted with cfg80211_ops::action() to report the TX status of the | 2578 | * transmitted with cfg80211_ops::mgmt_tx() to report the TX status of the |
2404 | * transmission attempt. | 2579 | * transmission attempt. |
2405 | */ | 2580 | */ |
2406 | void cfg80211_action_tx_status(struct net_device *dev, u64 cookie, | 2581 | void cfg80211_mgmt_tx_status(struct net_device *dev, u64 cookie, |
2407 | const u8 *buf, size_t len, bool ack, gfp_t gfp); | 2582 | const u8 *buf, size_t len, bool ack, gfp_t gfp); |
2408 | 2583 | ||
2409 | 2584 | ||
2410 | /** | 2585 | /** |
@@ -2420,56 +2595,41 @@ void cfg80211_cqm_rssi_notify(struct net_device *dev, | |||
2420 | enum nl80211_cqm_rssi_threshold_event rssi_event, | 2595 | enum nl80211_cqm_rssi_threshold_event rssi_event, |
2421 | gfp_t gfp); | 2596 | gfp_t gfp); |
2422 | 2597 | ||
2423 | #ifdef __KERNEL__ | ||
2424 | |||
2425 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ | 2598 | /* Logging, debugging and troubleshooting/diagnostic helpers. */ |
2426 | 2599 | ||
2427 | /* wiphy_printk helpers, similar to dev_printk */ | 2600 | /* wiphy_printk helpers, similar to dev_printk */ |
2428 | 2601 | ||
2429 | #define wiphy_printk(level, wiphy, format, args...) \ | 2602 | #define wiphy_printk(level, wiphy, format, args...) \ |
2430 | printk(level "%s: " format, wiphy_name(wiphy), ##args) | 2603 | dev_printk(level, &(wiphy)->dev, format, ##args) |
2431 | #define wiphy_emerg(wiphy, format, args...) \ | 2604 | #define wiphy_emerg(wiphy, format, args...) \ |
2432 | wiphy_printk(KERN_EMERG, wiphy, format, ##args) | 2605 | dev_emerg(&(wiphy)->dev, format, ##args) |
2433 | #define wiphy_alert(wiphy, format, args...) \ | 2606 | #define wiphy_alert(wiphy, format, args...) \ |
2434 | wiphy_printk(KERN_ALERT, wiphy, format, ##args) | 2607 | dev_alert(&(wiphy)->dev, format, ##args) |
2435 | #define wiphy_crit(wiphy, format, args...) \ | 2608 | #define wiphy_crit(wiphy, format, args...) \ |
2436 | wiphy_printk(KERN_CRIT, wiphy, format, ##args) | 2609 | dev_crit(&(wiphy)->dev, format, ##args) |
2437 | #define wiphy_err(wiphy, format, args...) \ | 2610 | #define wiphy_err(wiphy, format, args...) \ |
2438 | wiphy_printk(KERN_ERR, wiphy, format, ##args) | 2611 | dev_err(&(wiphy)->dev, format, ##args) |
2439 | #define wiphy_warn(wiphy, format, args...) \ | 2612 | #define wiphy_warn(wiphy, format, args...) \ |
2440 | wiphy_printk(KERN_WARNING, wiphy, format, ##args) | 2613 | dev_warn(&(wiphy)->dev, format, ##args) |
2441 | #define wiphy_notice(wiphy, format, args...) \ | 2614 | #define wiphy_notice(wiphy, format, args...) \ |
2442 | wiphy_printk(KERN_NOTICE, wiphy, format, ##args) | 2615 | dev_notice(&(wiphy)->dev, format, ##args) |
2443 | #define wiphy_info(wiphy, format, args...) \ | 2616 | #define wiphy_info(wiphy, format, args...) \ |
2444 | wiphy_printk(KERN_INFO, wiphy, format, ##args) | 2617 | dev_info(&(wiphy)->dev, format, ##args) |
2445 | 2618 | ||
2446 | int wiphy_debug(const struct wiphy *wiphy, const char *format, ...) | 2619 | #define wiphy_debug(wiphy, format, args...) \ |
2447 | __attribute__ ((format (printf, 2, 3))); | ||
2448 | |||
2449 | #if defined(DEBUG) | ||
2450 | #define wiphy_dbg(wiphy, format, args...) \ | ||
2451 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) | 2620 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args) |
2452 | #elif defined(CONFIG_DYNAMIC_DEBUG) | 2621 | |
2453 | #define wiphy_dbg(wiphy, format, args...) \ | 2622 | #define wiphy_dbg(wiphy, format, args...) \ |
2454 | dynamic_pr_debug("%s: " format, wiphy_name(wiphy), ##args) | 2623 | dev_dbg(&(wiphy)->dev, format, ##args) |
2455 | #else | ||
2456 | #define wiphy_dbg(wiphy, format, args...) \ | ||
2457 | ({ \ | ||
2458 | if (0) \ | ||
2459 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ | ||
2460 | 0; \ | ||
2461 | }) | ||
2462 | #endif | ||
2463 | 2624 | ||
2464 | #if defined(VERBOSE_DEBUG) | 2625 | #if defined(VERBOSE_DEBUG) |
2465 | #define wiphy_vdbg wiphy_dbg | 2626 | #define wiphy_vdbg wiphy_dbg |
2466 | #else | 2627 | #else |
2467 | |||
2468 | #define wiphy_vdbg(wiphy, format, args...) \ | 2628 | #define wiphy_vdbg(wiphy, format, args...) \ |
2469 | ({ \ | 2629 | ({ \ |
2470 | if (0) \ | 2630 | if (0) \ |
2471 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ | 2631 | wiphy_printk(KERN_DEBUG, wiphy, format, ##args); \ |
2472 | 0; \ | 2632 | 0; \ |
2473 | }) | 2633 | }) |
2474 | #endif | 2634 | #endif |
2475 | 2635 | ||
@@ -2481,6 +2641,4 @@ int wiphy_debug(const struct wiphy *wiphy, const char *format, ...) | |||
2481 | #define wiphy_WARN(wiphy, format, args...) \ | 2641 | #define wiphy_WARN(wiphy, format, args...) \ |
2482 | WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); | 2642 | WARN(1, "wiphy: %s\n" format, wiphy_name(wiphy), ##args); |
2483 | 2643 | ||
2484 | #endif | ||
2485 | |||
2486 | #endif /* __NET_CFG80211_H */ | 2644 | #endif /* __NET_CFG80211_H */ |
diff --git a/include/net/cls_cgroup.h b/include/net/cls_cgroup.h index 726cc3536409..a4dc5b027bd9 100644 --- a/include/net/cls_cgroup.h +++ b/include/net/cls_cgroup.h | |||
@@ -27,11 +27,17 @@ struct cgroup_cls_state | |||
27 | #ifdef CONFIG_NET_CLS_CGROUP | 27 | #ifdef CONFIG_NET_CLS_CGROUP |
28 | static inline u32 task_cls_classid(struct task_struct *p) | 28 | static inline u32 task_cls_classid(struct task_struct *p) |
29 | { | 29 | { |
30 | int classid; | ||
31 | |||
30 | if (in_interrupt()) | 32 | if (in_interrupt()) |
31 | return 0; | 33 | return 0; |
32 | 34 | ||
33 | return container_of(task_subsys_state(p, net_cls_subsys_id), | 35 | rcu_read_lock(); |
34 | struct cgroup_cls_state, css)->classid; | 36 | classid = container_of(task_subsys_state(p, net_cls_subsys_id), |
37 | struct cgroup_cls_state, css)->classid; | ||
38 | rcu_read_unlock(); | ||
39 | |||
40 | return classid; | ||
35 | } | 41 | } |
36 | #else | 42 | #else |
37 | extern int net_cls_subsys_id; | 43 | extern int net_cls_subsys_id; |
@@ -45,7 +51,8 @@ static inline u32 task_cls_classid(struct task_struct *p) | |||
45 | return 0; | 51 | return 0; |
46 | 52 | ||
47 | rcu_read_lock(); | 53 | rcu_read_lock(); |
48 | id = rcu_dereference(net_cls_subsys_id); | 54 | id = rcu_dereference_index_check(net_cls_subsys_id, |
55 | rcu_read_lock_held()); | ||
49 | if (id >= 0) | 56 | if (id >= 0) |
50 | classid = container_of(task_subsys_state(p, id), | 57 | classid = container_of(task_subsys_state(p, id), |
51 | struct cgroup_cls_state, css)->classid; | 58 | struct cgroup_cls_state, css)->classid; |
diff --git a/include/net/dst.h b/include/net/dst.h index 81d1413a8701..a217c838ec0d 100644 --- a/include/net/dst.h +++ b/include/net/dst.h | |||
@@ -43,10 +43,11 @@ struct dst_entry { | |||
43 | short error; | 43 | short error; |
44 | short obsolete; | 44 | short obsolete; |
45 | int flags; | 45 | int flags; |
46 | #define DST_HOST 1 | 46 | #define DST_HOST 0x0001 |
47 | #define DST_NOXFRM 2 | 47 | #define DST_NOXFRM 0x0002 |
48 | #define DST_NOPOLICY 4 | 48 | #define DST_NOPOLICY 0x0004 |
49 | #define DST_NOHASH 8 | 49 | #define DST_NOHASH 0x0008 |
50 | #define DST_NOCACHE 0x0010 | ||
50 | unsigned long expires; | 51 | unsigned long expires; |
51 | 52 | ||
52 | unsigned short header_len; /* more space at head required */ | 53 | unsigned short header_len; /* more space at head required */ |
@@ -228,22 +229,37 @@ static inline void skb_dst_force(struct sk_buff *skb) | |||
228 | 229 | ||
229 | 230 | ||
230 | /** | 231 | /** |
232 | * __skb_tunnel_rx - prepare skb for rx reinsert | ||
233 | * @skb: buffer | ||
234 | * @dev: tunnel device | ||
235 | * | ||
236 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | ||
237 | * so make some cleanups. (no accounting done) | ||
238 | */ | ||
239 | static inline void __skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | ||
240 | { | ||
241 | skb->dev = dev; | ||
242 | skb->rxhash = 0; | ||
243 | skb_set_queue_mapping(skb, 0); | ||
244 | skb_dst_drop(skb); | ||
245 | nf_reset(skb); | ||
246 | } | ||
247 | |||
248 | /** | ||
231 | * skb_tunnel_rx - prepare skb for rx reinsert | 249 | * skb_tunnel_rx - prepare skb for rx reinsert |
232 | * @skb: buffer | 250 | * @skb: buffer |
233 | * @dev: tunnel device | 251 | * @dev: tunnel device |
234 | * | 252 | * |
235 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, | 253 | * After decapsulation, packet is going to re-enter (netif_rx()) our stack, |
236 | * so make some cleanups, and perform accounting. | 254 | * so make some cleanups, and perform accounting. |
255 | * Note: this accounting is not SMP safe. | ||
237 | */ | 256 | */ |
238 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) | 257 | static inline void skb_tunnel_rx(struct sk_buff *skb, struct net_device *dev) |
239 | { | 258 | { |
240 | skb->dev = dev; | ||
241 | /* TODO : stats should be SMP safe */ | 259 | /* TODO : stats should be SMP safe */ |
242 | dev->stats.rx_packets++; | 260 | dev->stats.rx_packets++; |
243 | dev->stats.rx_bytes += skb->len; | 261 | dev->stats.rx_bytes += skb->len; |
244 | skb->rxhash = 0; | 262 | __skb_tunnel_rx(skb, dev); |
245 | skb_dst_drop(skb); | ||
246 | nf_reset(skb); | ||
247 | } | 263 | } |
248 | 264 | ||
249 | /* Children define the path of the packet through the | 265 | /* Children define the path of the packet through the |
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h index d1ff9b7e99b8..1fa5306e3e23 100644 --- a/include/net/dst_ops.h +++ b/include/net/dst_ops.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _NET_DST_OPS_H | 1 | #ifndef _NET_DST_OPS_H |
2 | #define _NET_DST_OPS_H | 2 | #define _NET_DST_OPS_H |
3 | #include <linux/types.h> | 3 | #include <linux/types.h> |
4 | #include <linux/percpu_counter.h> | ||
4 | 5 | ||
5 | struct dst_entry; | 6 | struct dst_entry; |
6 | struct kmem_cachep; | 7 | struct kmem_cachep; |
@@ -22,7 +23,41 @@ struct dst_ops { | |||
22 | void (*update_pmtu)(struct dst_entry *dst, u32 mtu); | 23 | void (*update_pmtu)(struct dst_entry *dst, u32 mtu); |
23 | int (*local_out)(struct sk_buff *skb); | 24 | int (*local_out)(struct sk_buff *skb); |
24 | 25 | ||
25 | atomic_t entries; | ||
26 | struct kmem_cache *kmem_cachep; | 26 | struct kmem_cache *kmem_cachep; |
27 | |||
28 | struct percpu_counter pcpuc_entries ____cacheline_aligned_in_smp; | ||
27 | }; | 29 | }; |
30 | |||
31 | static inline int dst_entries_get_fast(struct dst_ops *dst) | ||
32 | { | ||
33 | return percpu_counter_read_positive(&dst->pcpuc_entries); | ||
34 | } | ||
35 | |||
36 | static inline int dst_entries_get_slow(struct dst_ops *dst) | ||
37 | { | ||
38 | int res; | ||
39 | |||
40 | local_bh_disable(); | ||
41 | res = percpu_counter_sum_positive(&dst->pcpuc_entries); | ||
42 | local_bh_enable(); | ||
43 | return res; | ||
44 | } | ||
45 | |||
46 | static inline void dst_entries_add(struct dst_ops *dst, int val) | ||
47 | { | ||
48 | local_bh_disable(); | ||
49 | percpu_counter_add(&dst->pcpuc_entries, val); | ||
50 | local_bh_enable(); | ||
51 | } | ||
52 | |||
53 | static inline int dst_entries_init(struct dst_ops *dst) | ||
54 | { | ||
55 | return percpu_counter_init(&dst->pcpuc_entries, 0); | ||
56 | } | ||
57 | |||
58 | static inline void dst_entries_destroy(struct dst_ops *dst) | ||
59 | { | ||
60 | percpu_counter_destroy(&dst->pcpuc_entries); | ||
61 | } | ||
62 | |||
28 | #endif | 63 | #endif |
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h index e8923bc20f9f..106f3097d384 100644 --- a/include/net/fib_rules.h +++ b/include/net/fib_rules.h | |||
@@ -31,6 +31,8 @@ struct fib_lookup_arg { | |||
31 | void *lookup_ptr; | 31 | void *lookup_ptr; |
32 | void *result; | 32 | void *result; |
33 | struct fib_rule *rule; | 33 | struct fib_rule *rule; |
34 | int flags; | ||
35 | #define FIB_LOOKUP_NOREF 1 | ||
34 | }; | 36 | }; |
35 | 37 | ||
36 | struct fib_rules_ops { | 38 | struct fib_rules_ops { |
@@ -106,7 +108,6 @@ static inline u32 frh_get_table(struct fib_rule_hdr *frh, struct nlattr **nla) | |||
106 | 108 | ||
107 | extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *); | 109 | extern struct fib_rules_ops *fib_rules_register(const struct fib_rules_ops *, struct net *); |
108 | extern void fib_rules_unregister(struct fib_rules_ops *); | 110 | extern void fib_rules_unregister(struct fib_rules_ops *); |
109 | extern void fib_rules_cleanup_ops(struct fib_rules_ops *); | ||
110 | 111 | ||
111 | extern int fib_rules_lookup(struct fib_rules_ops *, | 112 | extern int fib_rules_lookup(struct fib_rules_ops *, |
112 | struct flowi *, int flags, | 113 | struct flowi *, int flags, |
diff --git a/include/net/flow.h b/include/net/flow.h index bb08692a20b0..0ac3fb5e0973 100644 --- a/include/net/flow.h +++ b/include/net/flow.h | |||
@@ -49,6 +49,7 @@ struct flowi { | |||
49 | __u8 proto; | 49 | __u8 proto; |
50 | __u8 flags; | 50 | __u8 flags; |
51 | #define FLOWI_FLAG_ANYSRC 0x01 | 51 | #define FLOWI_FLAG_ANYSRC 0x01 |
52 | #define FLOWI_FLAG_MATCH_ANY_IIF 0x02 | ||
52 | union { | 53 | union { |
53 | struct { | 54 | struct { |
54 | __be16 sport; | 55 | __be16 sport; |
diff --git a/include/net/genetlink.h b/include/net/genetlink.h index f7dcd2c70412..8a64b811a39a 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h | |||
@@ -20,6 +20,9 @@ struct genl_multicast_group { | |||
20 | u32 id; | 20 | u32 id; |
21 | }; | 21 | }; |
22 | 22 | ||
23 | struct genl_ops; | ||
24 | struct genl_info; | ||
25 | |||
23 | /** | 26 | /** |
24 | * struct genl_family - generic netlink family | 27 | * struct genl_family - generic netlink family |
25 | * @id: protocol family idenfitier | 28 | * @id: protocol family idenfitier |
@@ -29,6 +32,10 @@ struct genl_multicast_group { | |||
29 | * @maxattr: maximum number of attributes supported | 32 | * @maxattr: maximum number of attributes supported |
30 | * @netnsok: set to true if the family can handle network | 33 | * @netnsok: set to true if the family can handle network |
31 | * namespaces and should be presented in all of them | 34 | * namespaces and should be presented in all of them |
35 | * @pre_doit: called before an operation's doit callback, it may | ||
36 | * do additional, common, filtering and return an error | ||
37 | * @post_doit: called after an operation's doit callback, it may | ||
38 | * undo operations done by pre_doit, for example release locks | ||
32 | * @attrbuf: buffer to store parsed attributes | 39 | * @attrbuf: buffer to store parsed attributes |
33 | * @ops_list: list of all assigned operations | 40 | * @ops_list: list of all assigned operations |
34 | * @family_list: family list | 41 | * @family_list: family list |
@@ -41,6 +48,12 @@ struct genl_family { | |||
41 | unsigned int version; | 48 | unsigned int version; |
42 | unsigned int maxattr; | 49 | unsigned int maxattr; |
43 | bool netnsok; | 50 | bool netnsok; |
51 | int (*pre_doit)(struct genl_ops *ops, | ||
52 | struct sk_buff *skb, | ||
53 | struct genl_info *info); | ||
54 | void (*post_doit)(struct genl_ops *ops, | ||
55 | struct sk_buff *skb, | ||
56 | struct genl_info *info); | ||
44 | struct nlattr ** attrbuf; /* private */ | 57 | struct nlattr ** attrbuf; /* private */ |
45 | struct list_head ops_list; /* private */ | 58 | struct list_head ops_list; /* private */ |
46 | struct list_head family_list; /* private */ | 59 | struct list_head family_list; /* private */ |
@@ -55,6 +68,8 @@ struct genl_family { | |||
55 | * @genlhdr: generic netlink message header | 68 | * @genlhdr: generic netlink message header |
56 | * @userhdr: user specific header | 69 | * @userhdr: user specific header |
57 | * @attrs: netlink attributes | 70 | * @attrs: netlink attributes |
71 | * @_net: network namespace | ||
72 | * @user_ptr: user pointers | ||
58 | */ | 73 | */ |
59 | struct genl_info { | 74 | struct genl_info { |
60 | u32 snd_seq; | 75 | u32 snd_seq; |
@@ -66,6 +81,7 @@ struct genl_info { | |||
66 | #ifdef CONFIG_NET_NS | 81 | #ifdef CONFIG_NET_NS |
67 | struct net * _net; | 82 | struct net * _net; |
68 | #endif | 83 | #endif |
84 | void * user_ptr[2]; | ||
69 | }; | 85 | }; |
70 | 86 | ||
71 | static inline struct net *genl_info_net(struct genl_info *info) | 87 | static inline struct net *genl_info_net(struct genl_info *info) |
@@ -81,6 +97,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) | |||
81 | /** | 97 | /** |
82 | * struct genl_ops - generic netlink operations | 98 | * struct genl_ops - generic netlink operations |
83 | * @cmd: command identifier | 99 | * @cmd: command identifier |
100 | * @internal_flags: flags used by the family | ||
84 | * @flags: flags | 101 | * @flags: flags |
85 | * @policy: attribute validation policy | 102 | * @policy: attribute validation policy |
86 | * @doit: standard command callback | 103 | * @doit: standard command callback |
@@ -90,6 +107,7 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net) | |||
90 | */ | 107 | */ |
91 | struct genl_ops { | 108 | struct genl_ops { |
92 | u8 cmd; | 109 | u8 cmd; |
110 | u8 internal_flags; | ||
93 | unsigned int flags; | 111 | unsigned int flags; |
94 | const struct nla_policy *policy; | 112 | const struct nla_policy *policy; |
95 | int (*doit)(struct sk_buff *skb, | 113 | int (*doit)(struct sk_buff *skb, |
diff --git a/include/net/gre.h b/include/net/gre.h new file mode 100644 index 000000000000..82665474bcb7 --- /dev/null +++ b/include/net/gre.h | |||
@@ -0,0 +1,18 @@ | |||
1 | #ifndef __LINUX_GRE_H | ||
2 | #define __LINUX_GRE_H | ||
3 | |||
4 | #include <linux/skbuff.h> | ||
5 | |||
6 | #define GREPROTO_CISCO 0 | ||
7 | #define GREPROTO_PPTP 1 | ||
8 | #define GREPROTO_MAX 2 | ||
9 | |||
10 | struct gre_protocol { | ||
11 | int (*handler)(struct sk_buff *skb); | ||
12 | void (*err_handler)(struct sk_buff *skb, u32 info); | ||
13 | }; | ||
14 | |||
15 | int gre_add_protocol(const struct gre_protocol *proto, u8 version); | ||
16 | int gre_del_protocol(const struct gre_protocol *proto, u8 version); | ||
17 | |||
18 | #endif | ||
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h index b6d3b55da19b..e4f494b42e06 100644 --- a/include/net/inet_connection_sock.h +++ b/include/net/inet_connection_sock.h | |||
@@ -125,6 +125,7 @@ struct inet_connection_sock { | |||
125 | int probe_size; | 125 | int probe_size; |
126 | } icsk_mtup; | 126 | } icsk_mtup; |
127 | u32 icsk_ca_priv[16]; | 127 | u32 icsk_ca_priv[16]; |
128 | u32 icsk_user_timeout; | ||
128 | #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) | 129 | #define ICSK_CA_PRIV_SIZE (16 * sizeof(u32)) |
129 | }; | 130 | }; |
130 | 131 | ||
diff --git a/include/net/inet_ecn.h b/include/net/inet_ecn.h index 9b5d08f4f6e8..88bdd010d65d 100644 --- a/include/net/inet_ecn.h +++ b/include/net/inet_ecn.h | |||
@@ -27,7 +27,7 @@ static inline int INET_ECN_is_not_ect(__u8 dsfield) | |||
27 | 27 | ||
28 | static inline int INET_ECN_is_capable(__u8 dsfield) | 28 | static inline int INET_ECN_is_capable(__u8 dsfield) |
29 | { | 29 | { |
30 | return (dsfield & INET_ECN_ECT_0); | 30 | return dsfield & INET_ECN_ECT_0; |
31 | } | 31 | } |
32 | 32 | ||
33 | static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) | 33 | static inline __u8 INET_ECN_encapsulate(__u8 outer, __u8 inner) |
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h index 74358d1b3f43..e9c2ed8af864 100644 --- a/include/net/inet_hashtables.h +++ b/include/net/inet_hashtables.h | |||
@@ -245,7 +245,7 @@ static inline int inet_sk_listen_hashfn(const struct sock *sk) | |||
245 | } | 245 | } |
246 | 246 | ||
247 | /* Caller must disable local BH processing. */ | 247 | /* Caller must disable local BH processing. */ |
248 | extern void __inet_inherit_port(struct sock *sk, struct sock *child); | 248 | extern int __inet_inherit_port(struct sock *sk, struct sock *child); |
249 | 249 | ||
250 | extern void inet_put_port(struct sock *sk); | 250 | extern void inet_put_port(struct sock *sk); |
251 | 251 | ||
diff --git a/include/net/ip.h b/include/net/ip.h index 890f9725d681..dbee3fe260e1 100644 --- a/include/net/ip.h +++ b/include/net/ip.h | |||
@@ -53,7 +53,7 @@ struct ipcm_cookie { | |||
53 | __be32 addr; | 53 | __be32 addr; |
54 | int oif; | 54 | int oif; |
55 | struct ip_options *opt; | 55 | struct ip_options *opt; |
56 | union skb_shared_tx shtx; | 56 | __u8 tx_flags; |
57 | }; | 57 | }; |
58 | 58 | ||
59 | #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) | 59 | #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb)) |
@@ -238,9 +238,9 @@ int ip_decrease_ttl(struct iphdr *iph) | |||
238 | static inline | 238 | static inline |
239 | int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) | 239 | int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) |
240 | { | 240 | { |
241 | return (inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || | 241 | return inet_sk(sk)->pmtudisc == IP_PMTUDISC_DO || |
242 | (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && | 242 | (inet_sk(sk)->pmtudisc == IP_PMTUDISC_WANT && |
243 | !(dst_metric_locked(dst, RTAX_MTU)))); | 243 | !(dst_metric_locked(dst, RTAX_MTU))); |
244 | } | 244 | } |
245 | 245 | ||
246 | extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); | 246 | extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); |
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h index c93f94edc610..ba3666d31766 100644 --- a/include/net/ip_fib.h +++ b/include/net/ip_fib.h | |||
@@ -86,6 +86,7 @@ struct fib_info { | |||
86 | #ifdef CONFIG_IP_ROUTE_MULTIPATH | 86 | #ifdef CONFIG_IP_ROUTE_MULTIPATH |
87 | int fib_power; | 87 | int fib_power; |
88 | #endif | 88 | #endif |
89 | struct rcu_head rcu; | ||
89 | struct fib_nh fib_nh[0]; | 90 | struct fib_nh fib_nh[0]; |
90 | #define fib_dev fib_nh[0].nh_dev | 91 | #define fib_dev fib_nh[0].nh_dev |
91 | }; | 92 | }; |
@@ -148,7 +149,7 @@ struct fib_table { | |||
148 | }; | 149 | }; |
149 | 150 | ||
150 | extern int fib_table_lookup(struct fib_table *tb, const struct flowi *flp, | 151 | extern int fib_table_lookup(struct fib_table *tb, const struct flowi *flp, |
151 | struct fib_result *res); | 152 | struct fib_result *res, int fib_flags); |
152 | extern int fib_table_insert(struct fib_table *, struct fib_config *); | 153 | extern int fib_table_insert(struct fib_table *, struct fib_config *); |
153 | extern int fib_table_delete(struct fib_table *, struct fib_config *); | 154 | extern int fib_table_delete(struct fib_table *, struct fib_config *); |
154 | extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb, | 155 | extern int fib_table_dump(struct fib_table *table, struct sk_buff *skb, |
@@ -185,11 +186,11 @@ static inline int fib_lookup(struct net *net, const struct flowi *flp, | |||
185 | struct fib_table *table; | 186 | struct fib_table *table; |
186 | 187 | ||
187 | table = fib_get_table(net, RT_TABLE_LOCAL); | 188 | table = fib_get_table(net, RT_TABLE_LOCAL); |
188 | if (!fib_table_lookup(table, flp, res)) | 189 | if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF)) |
189 | return 0; | 190 | return 0; |
190 | 191 | ||
191 | table = fib_get_table(net, RT_TABLE_MAIN); | 192 | table = fib_get_table(net, RT_TABLE_MAIN); |
192 | if (!fib_table_lookup(table, flp, res)) | 193 | if (!fib_table_lookup(table, flp, res, FIB_LOOKUP_NOREF)) |
193 | return 0; | 194 | return 0; |
194 | return -ENETUNREACH; | 195 | return -ENETUNREACH; |
195 | } | 196 | } |
@@ -254,16 +255,6 @@ static inline void fib_info_put(struct fib_info *fi) | |||
254 | free_fib_info(fi); | 255 | free_fib_info(fi); |
255 | } | 256 | } |
256 | 257 | ||
257 | static inline void fib_res_put(struct fib_result *res) | ||
258 | { | ||
259 | if (res->fi) | ||
260 | fib_info_put(res->fi); | ||
261 | #ifdef CONFIG_IP_MULTIPLE_TABLES | ||
262 | if (res->r) | ||
263 | fib_rule_put(res->r); | ||
264 | #endif | ||
265 | } | ||
266 | |||
267 | #ifdef CONFIG_PROC_FS | 258 | #ifdef CONFIG_PROC_FS |
268 | extern int __net_init fib_proc_init(struct net *net); | 259 | extern int __net_init fib_proc_init(struct net *net); |
269 | extern void __net_exit fib_proc_exit(struct net *net); | 260 | extern void __net_exit fib_proc_exit(struct net *net); |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index a4747a0f7303..b7bbd6c28cfa 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -25,7 +25,9 @@ | |||
25 | #include <linux/ip.h> | 25 | #include <linux/ip.h> |
26 | #include <linux/ipv6.h> /* for struct ipv6hdr */ | 26 | #include <linux/ipv6.h> /* for struct ipv6hdr */ |
27 | #include <net/ipv6.h> /* for ipv6_addr_copy */ | 27 | #include <net/ipv6.h> /* for ipv6_addr_copy */ |
28 | 28 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | |
29 | #include <net/netfilter/nf_conntrack.h> | ||
30 | #endif | ||
29 | 31 | ||
30 | /* Connections' size value needed by ip_vs_ctl.c */ | 32 | /* Connections' size value needed by ip_vs_ctl.c */ |
31 | extern int ip_vs_conn_tab_size; | 33 | extern int ip_vs_conn_tab_size; |
@@ -134,24 +136,24 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, | |||
134 | if (net_ratelimit()) \ | 136 | if (net_ratelimit()) \ |
135 | printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ | 137 | printk(KERN_DEBUG pr_fmt(msg), ##__VA_ARGS__); \ |
136 | } while (0) | 138 | } while (0) |
137 | #define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) \ | 139 | #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) \ |
138 | do { \ | 140 | do { \ |
139 | if (level <= ip_vs_get_debug_level()) \ | 141 | if (level <= ip_vs_get_debug_level()) \ |
140 | pp->debug_packet(pp, skb, ofs, msg); \ | 142 | pp->debug_packet(af, pp, skb, ofs, msg); \ |
141 | } while (0) | 143 | } while (0) |
142 | #define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) \ | 144 | #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) \ |
143 | do { \ | 145 | do { \ |
144 | if (level <= ip_vs_get_debug_level() && \ | 146 | if (level <= ip_vs_get_debug_level() && \ |
145 | net_ratelimit()) \ | 147 | net_ratelimit()) \ |
146 | pp->debug_packet(pp, skb, ofs, msg); \ | 148 | pp->debug_packet(af, pp, skb, ofs, msg); \ |
147 | } while (0) | 149 | } while (0) |
148 | #else /* NO DEBUGGING at ALL */ | 150 | #else /* NO DEBUGGING at ALL */ |
149 | #define IP_VS_DBG_BUF(level, msg...) do {} while (0) | 151 | #define IP_VS_DBG_BUF(level, msg...) do {} while (0) |
150 | #define IP_VS_ERR_BUF(msg...) do {} while (0) | 152 | #define IP_VS_ERR_BUF(msg...) do {} while (0) |
151 | #define IP_VS_DBG(level, msg...) do {} while (0) | 153 | #define IP_VS_DBG(level, msg...) do {} while (0) |
152 | #define IP_VS_DBG_RL(msg...) do {} while (0) | 154 | #define IP_VS_DBG_RL(msg...) do {} while (0) |
153 | #define IP_VS_DBG_PKT(level, pp, skb, ofs, msg) do {} while (0) | 155 | #define IP_VS_DBG_PKT(level, af, pp, skb, ofs, msg) do {} while (0) |
154 | #define IP_VS_DBG_RL_PKT(level, pp, skb, ofs, msg) do {} while (0) | 156 | #define IP_VS_DBG_RL_PKT(level, af, pp, skb, ofs, msg) do {} while (0) |
155 | #endif | 157 | #endif |
156 | 158 | ||
157 | #define IP_VS_BUG() BUG() | 159 | #define IP_VS_BUG() BUG() |
@@ -343,7 +345,7 @@ struct ip_vs_protocol { | |||
343 | 345 | ||
344 | int (*app_conn_bind)(struct ip_vs_conn *cp); | 346 | int (*app_conn_bind)(struct ip_vs_conn *cp); |
345 | 347 | ||
346 | void (*debug_packet)(struct ip_vs_protocol *pp, | 348 | void (*debug_packet)(int af, struct ip_vs_protocol *pp, |
347 | const struct sk_buff *skb, | 349 | const struct sk_buff *skb, |
348 | int offset, | 350 | int offset, |
349 | const char *msg); | 351 | const char *msg); |
@@ -355,6 +357,19 @@ struct ip_vs_protocol { | |||
355 | 357 | ||
356 | extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto); | 358 | extern struct ip_vs_protocol * ip_vs_proto_get(unsigned short proto); |
357 | 359 | ||
360 | struct ip_vs_conn_param { | ||
361 | const union nf_inet_addr *caddr; | ||
362 | const union nf_inet_addr *vaddr; | ||
363 | __be16 cport; | ||
364 | __be16 vport; | ||
365 | __u16 protocol; | ||
366 | u16 af; | ||
367 | |||
368 | const struct ip_vs_pe *pe; | ||
369 | char *pe_data; | ||
370 | __u8 pe_data_len; | ||
371 | }; | ||
372 | |||
358 | /* | 373 | /* |
359 | * IP_VS structure allocated for each dynamically scheduled connection | 374 | * IP_VS structure allocated for each dynamically scheduled connection |
360 | */ | 375 | */ |
@@ -366,6 +381,7 @@ struct ip_vs_conn { | |||
366 | union nf_inet_addr caddr; /* client address */ | 381 | union nf_inet_addr caddr; /* client address */ |
367 | union nf_inet_addr vaddr; /* virtual address */ | 382 | union nf_inet_addr vaddr; /* virtual address */ |
368 | union nf_inet_addr daddr; /* destination address */ | 383 | union nf_inet_addr daddr; /* destination address */ |
384 | volatile __u32 flags; /* status flags */ | ||
369 | __be16 cport; | 385 | __be16 cport; |
370 | __be16 vport; | 386 | __be16 vport; |
371 | __be16 dport; | 387 | __be16 dport; |
@@ -378,7 +394,6 @@ struct ip_vs_conn { | |||
378 | 394 | ||
379 | /* Flags and state transition */ | 395 | /* Flags and state transition */ |
380 | spinlock_t lock; /* lock for state transition */ | 396 | spinlock_t lock; /* lock for state transition */ |
381 | volatile __u16 flags; /* status flags */ | ||
382 | volatile __u16 state; /* state info */ | 397 | volatile __u16 state; /* state info */ |
383 | volatile __u16 old_state; /* old state, to be used for | 398 | volatile __u16 old_state; /* old state, to be used for |
384 | * state transition triggerd | 399 | * state transition triggerd |
@@ -394,6 +409,7 @@ struct ip_vs_conn { | |||
394 | /* packet transmitter for different forwarding methods. If it | 409 | /* packet transmitter for different forwarding methods. If it |
395 | mangles the packet, it must return NF_DROP or better NF_STOLEN, | 410 | mangles the packet, it must return NF_DROP or better NF_STOLEN, |
396 | otherwise this must be changed to a sk_buff **. | 411 | otherwise this must be changed to a sk_buff **. |
412 | NF_ACCEPT can be returned when destination is local. | ||
397 | */ | 413 | */ |
398 | int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, | 414 | int (*packet_xmit)(struct sk_buff *skb, struct ip_vs_conn *cp, |
399 | struct ip_vs_protocol *pp); | 415 | struct ip_vs_protocol *pp); |
@@ -405,6 +421,9 @@ struct ip_vs_conn { | |||
405 | void *app_data; /* Application private data */ | 421 | void *app_data; /* Application private data */ |
406 | struct ip_vs_seq in_seq; /* incoming seq. struct */ | 422 | struct ip_vs_seq in_seq; /* incoming seq. struct */ |
407 | struct ip_vs_seq out_seq; /* outgoing seq. struct */ | 423 | struct ip_vs_seq out_seq; /* outgoing seq. struct */ |
424 | |||
425 | char *pe_data; | ||
426 | __u8 pe_data_len; | ||
408 | }; | 427 | }; |
409 | 428 | ||
410 | 429 | ||
@@ -426,6 +445,7 @@ struct ip_vs_service_user_kern { | |||
426 | 445 | ||
427 | /* virtual service options */ | 446 | /* virtual service options */ |
428 | char *sched_name; | 447 | char *sched_name; |
448 | char *pe_name; | ||
429 | unsigned flags; /* virtual service flags */ | 449 | unsigned flags; /* virtual service flags */ |
430 | unsigned timeout; /* persistent timeout in sec */ | 450 | unsigned timeout; /* persistent timeout in sec */ |
431 | u32 netmask; /* persistent netmask */ | 451 | u32 netmask; /* persistent netmask */ |
@@ -475,6 +495,9 @@ struct ip_vs_service { | |||
475 | struct ip_vs_scheduler *scheduler; /* bound scheduler object */ | 495 | struct ip_vs_scheduler *scheduler; /* bound scheduler object */ |
476 | rwlock_t sched_lock; /* lock sched_data */ | 496 | rwlock_t sched_lock; /* lock sched_data */ |
477 | void *sched_data; /* scheduler application data */ | 497 | void *sched_data; /* scheduler application data */ |
498 | |||
499 | /* alternate persistence engine */ | ||
500 | struct ip_vs_pe *pe; | ||
478 | }; | 501 | }; |
479 | 502 | ||
480 | 503 | ||
@@ -507,6 +530,10 @@ struct ip_vs_dest { | |||
507 | spinlock_t dst_lock; /* lock of dst_cache */ | 530 | spinlock_t dst_lock; /* lock of dst_cache */ |
508 | struct dst_entry *dst_cache; /* destination cache entry */ | 531 | struct dst_entry *dst_cache; /* destination cache entry */ |
509 | u32 dst_rtos; /* RT_TOS(tos) for dst */ | 532 | u32 dst_rtos; /* RT_TOS(tos) for dst */ |
533 | u32 dst_cookie; | ||
534 | #ifdef CONFIG_IP_VS_IPV6 | ||
535 | struct in6_addr dst_saddr; | ||
536 | #endif | ||
510 | 537 | ||
511 | /* for virtual service */ | 538 | /* for virtual service */ |
512 | struct ip_vs_service *svc; /* service it belongs to */ | 539 | struct ip_vs_service *svc; /* service it belongs to */ |
@@ -538,6 +565,21 @@ struct ip_vs_scheduler { | |||
538 | const struct sk_buff *skb); | 565 | const struct sk_buff *skb); |
539 | }; | 566 | }; |
540 | 567 | ||
568 | /* The persistence engine object */ | ||
569 | struct ip_vs_pe { | ||
570 | struct list_head n_list; /* d-linked list head */ | ||
571 | char *name; /* scheduler name */ | ||
572 | atomic_t refcnt; /* reference counter */ | ||
573 | struct module *module; /* THIS_MODULE/NULL */ | ||
574 | |||
575 | /* get the connection template, if any */ | ||
576 | int (*fill_param)(struct ip_vs_conn_param *p, struct sk_buff *skb); | ||
577 | bool (*ct_match)(const struct ip_vs_conn_param *p, | ||
578 | struct ip_vs_conn *ct); | ||
579 | u32 (*hashkey_raw)(const struct ip_vs_conn_param *p, u32 initval, | ||
580 | bool inverse); | ||
581 | int (*show_pe_data)(const struct ip_vs_conn *cp, char *buf); | ||
582 | }; | ||
541 | 583 | ||
542 | /* | 584 | /* |
543 | * The application module object (a.k.a. app incarnation) | 585 | * The application module object (a.k.a. app incarnation) |
@@ -556,11 +598,19 @@ struct ip_vs_app { | |||
556 | __be16 port; /* port number in net order */ | 598 | __be16 port; /* port number in net order */ |
557 | atomic_t usecnt; /* usage counter */ | 599 | atomic_t usecnt; /* usage counter */ |
558 | 600 | ||
559 | /* output hook: return false if can't linearize. diff set for TCP. */ | 601 | /* |
602 | * output hook: Process packet in inout direction, diff set for TCP. | ||
603 | * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, | ||
604 | * 2=Mangled but checksum was not updated | ||
605 | */ | ||
560 | int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, | 606 | int (*pkt_out)(struct ip_vs_app *, struct ip_vs_conn *, |
561 | struct sk_buff *, int *diff); | 607 | struct sk_buff *, int *diff); |
562 | 608 | ||
563 | /* input hook: return false if can't linearize. diff set for TCP. */ | 609 | /* |
610 | * input hook: Process packet in outin direction, diff set for TCP. | ||
611 | * Return: 0=Error, 1=Payload Not Mangled/Mangled but checksum is ok, | ||
612 | * 2=Mangled but checksum was not updated | ||
613 | */ | ||
564 | int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, | 614 | int (*pkt_in)(struct ip_vs_app *, struct ip_vs_conn *, |
565 | struct sk_buff *, int *diff); | 615 | struct sk_buff *, int *diff); |
566 | 616 | ||
@@ -624,13 +674,25 @@ enum { | |||
624 | IP_VS_DIR_LAST, | 674 | IP_VS_DIR_LAST, |
625 | }; | 675 | }; |
626 | 676 | ||
627 | extern struct ip_vs_conn *ip_vs_conn_in_get | 677 | static inline void ip_vs_conn_fill_param(int af, int protocol, |
628 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, | 678 | const union nf_inet_addr *caddr, |
629 | const union nf_inet_addr *d_addr, __be16 d_port); | 679 | __be16 cport, |
680 | const union nf_inet_addr *vaddr, | ||
681 | __be16 vport, | ||
682 | struct ip_vs_conn_param *p) | ||
683 | { | ||
684 | p->af = af; | ||
685 | p->protocol = protocol; | ||
686 | p->caddr = caddr; | ||
687 | p->cport = cport; | ||
688 | p->vaddr = vaddr; | ||
689 | p->vport = vport; | ||
690 | p->pe = NULL; | ||
691 | p->pe_data = NULL; | ||
692 | } | ||
630 | 693 | ||
631 | extern struct ip_vs_conn *ip_vs_ct_in_get | 694 | struct ip_vs_conn *ip_vs_conn_in_get(const struct ip_vs_conn_param *p); |
632 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, | 695 | struct ip_vs_conn *ip_vs_ct_in_get(const struct ip_vs_conn_param *p); |
633 | const union nf_inet_addr *d_addr, __be16 d_port); | ||
634 | 696 | ||
635 | struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, | 697 | struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, |
636 | struct ip_vs_protocol *pp, | 698 | struct ip_vs_protocol *pp, |
@@ -638,9 +700,7 @@ struct ip_vs_conn * ip_vs_conn_in_get_proto(int af, const struct sk_buff *skb, | |||
638 | unsigned int proto_off, | 700 | unsigned int proto_off, |
639 | int inverse); | 701 | int inverse); |
640 | 702 | ||
641 | extern struct ip_vs_conn *ip_vs_conn_out_get | 703 | struct ip_vs_conn *ip_vs_conn_out_get(const struct ip_vs_conn_param *p); |
642 | (int af, int protocol, const union nf_inet_addr *s_addr, __be16 s_port, | ||
643 | const union nf_inet_addr *d_addr, __be16 d_port); | ||
644 | 704 | ||
645 | struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, | 705 | struct ip_vs_conn * ip_vs_conn_out_get_proto(int af, const struct sk_buff *skb, |
646 | struct ip_vs_protocol *pp, | 706 | struct ip_vs_protocol *pp, |
@@ -656,11 +716,10 @@ static inline void __ip_vs_conn_put(struct ip_vs_conn *cp) | |||
656 | extern void ip_vs_conn_put(struct ip_vs_conn *cp); | 716 | extern void ip_vs_conn_put(struct ip_vs_conn *cp); |
657 | extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); | 717 | extern void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport); |
658 | 718 | ||
659 | extern struct ip_vs_conn * | 719 | struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, |
660 | ip_vs_conn_new(int af, int proto, const union nf_inet_addr *caddr, __be16 cport, | 720 | const union nf_inet_addr *daddr, |
661 | const union nf_inet_addr *vaddr, __be16 vport, | 721 | __be16 dport, unsigned flags, |
662 | const union nf_inet_addr *daddr, __be16 dport, unsigned flags, | 722 | struct ip_vs_dest *dest); |
663 | struct ip_vs_dest *dest); | ||
664 | extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); | 723 | extern void ip_vs_conn_expire_now(struct ip_vs_conn *cp); |
665 | 724 | ||
666 | extern const char * ip_vs_state_name(__u16 proto, int state); | 725 | extern const char * ip_vs_state_name(__u16 proto, int state); |
@@ -751,6 +810,12 @@ extern int ip_vs_app_pkt_in(struct ip_vs_conn *, struct sk_buff *skb); | |||
751 | extern int ip_vs_app_init(void); | 810 | extern int ip_vs_app_init(void); |
752 | extern void ip_vs_app_cleanup(void); | 811 | extern void ip_vs_app_cleanup(void); |
753 | 812 | ||
813 | void ip_vs_bind_pe(struct ip_vs_service *svc, struct ip_vs_pe *pe); | ||
814 | void ip_vs_unbind_pe(struct ip_vs_service *svc); | ||
815 | int register_ip_vs_pe(struct ip_vs_pe *pe); | ||
816 | int unregister_ip_vs_pe(struct ip_vs_pe *pe); | ||
817 | extern struct ip_vs_pe *ip_vs_pe_get(const char *name); | ||
818 | extern void ip_vs_pe_put(struct ip_vs_pe *pe); | ||
754 | 819 | ||
755 | /* | 820 | /* |
756 | * IPVS protocol functions (from ip_vs_proto.c) | 821 | * IPVS protocol functions (from ip_vs_proto.c) |
@@ -763,7 +828,8 @@ extern int | |||
763 | ip_vs_set_state_timeout(int *table, int num, const char *const *names, | 828 | ip_vs_set_state_timeout(int *table, int num, const char *const *names, |
764 | const char *name, int to); | 829 | const char *name, int to); |
765 | extern void | 830 | extern void |
766 | ip_vs_tcpudp_debug_packet(struct ip_vs_protocol *pp, const struct sk_buff *skb, | 831 | ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp, |
832 | const struct sk_buff *skb, | ||
767 | int offset, const char *msg); | 833 | int offset, const char *msg); |
768 | 834 | ||
769 | extern struct ip_vs_protocol ip_vs_protocol_tcp; | 835 | extern struct ip_vs_protocol ip_vs_protocol_tcp; |
@@ -785,7 +851,8 @@ extern int ip_vs_unbind_scheduler(struct ip_vs_service *svc); | |||
785 | extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); | 851 | extern struct ip_vs_scheduler *ip_vs_scheduler_get(const char *sched_name); |
786 | extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); | 852 | extern void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler); |
787 | extern struct ip_vs_conn * | 853 | extern struct ip_vs_conn * |
788 | ip_vs_schedule(struct ip_vs_service *svc, const struct sk_buff *skb); | 854 | ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb, |
855 | struct ip_vs_protocol *pp, int *ignored); | ||
789 | extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, | 856 | extern int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb, |
790 | struct ip_vs_protocol *pp); | 857 | struct ip_vs_protocol *pp); |
791 | 858 | ||
@@ -798,6 +865,8 @@ extern int sysctl_ip_vs_expire_nodest_conn; | |||
798 | extern int sysctl_ip_vs_expire_quiescent_template; | 865 | extern int sysctl_ip_vs_expire_quiescent_template; |
799 | extern int sysctl_ip_vs_sync_threshold[2]; | 866 | extern int sysctl_ip_vs_sync_threshold[2]; |
800 | extern int sysctl_ip_vs_nat_icmp_send; | 867 | extern int sysctl_ip_vs_nat_icmp_send; |
868 | extern int sysctl_ip_vs_conntrack; | ||
869 | extern int sysctl_ip_vs_snat_reroute; | ||
801 | extern struct ip_vs_stats ip_vs_stats; | 870 | extern struct ip_vs_stats ip_vs_stats; |
802 | extern const struct ctl_path net_vs_ctl_path[]; | 871 | extern const struct ctl_path net_vs_ctl_path[]; |
803 | 872 | ||
@@ -955,6 +1024,66 @@ static inline __wsum ip_vs_check_diff2(__be16 old, __be16 new, __wsum oldsum) | |||
955 | return csum_partial(diff, sizeof(diff), oldsum); | 1024 | return csum_partial(diff, sizeof(diff), oldsum); |
956 | } | 1025 | } |
957 | 1026 | ||
1027 | /* | ||
1028 | * Forget current conntrack (unconfirmed) and attach notrack entry | ||
1029 | */ | ||
1030 | static inline void ip_vs_notrack(struct sk_buff *skb) | ||
1031 | { | ||
1032 | #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE) | ||
1033 | enum ip_conntrack_info ctinfo; | ||
1034 | struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo); | ||
1035 | |||
1036 | if (!ct || !nf_ct_is_untracked(ct)) { | ||
1037 | nf_reset(skb); | ||
1038 | skb->nfct = &nf_ct_untracked_get()->ct_general; | ||
1039 | skb->nfctinfo = IP_CT_NEW; | ||
1040 | nf_conntrack_get(skb->nfct); | ||
1041 | } | ||
1042 | #endif | ||
1043 | } | ||
1044 | |||
1045 | #ifdef CONFIG_IP_VS_NFCT | ||
1046 | /* | ||
1047 | * Netfilter connection tracking | ||
1048 | * (from ip_vs_nfct.c) | ||
1049 | */ | ||
1050 | static inline int ip_vs_conntrack_enabled(void) | ||
1051 | { | ||
1052 | return sysctl_ip_vs_conntrack; | ||
1053 | } | ||
1054 | |||
1055 | extern void ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, | ||
1056 | int outin); | ||
1057 | extern int ip_vs_confirm_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp); | ||
1058 | extern void ip_vs_nfct_expect_related(struct sk_buff *skb, struct nf_conn *ct, | ||
1059 | struct ip_vs_conn *cp, u_int8_t proto, | ||
1060 | const __be16 port, int from_rs); | ||
1061 | extern void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp); | ||
1062 | |||
1063 | #else | ||
1064 | |||
1065 | static inline int ip_vs_conntrack_enabled(void) | ||
1066 | { | ||
1067 | return 0; | ||
1068 | } | ||
1069 | |||
1070 | static inline void ip_vs_update_conntrack(struct sk_buff *skb, | ||
1071 | struct ip_vs_conn *cp, int outin) | ||
1072 | { | ||
1073 | } | ||
1074 | |||
1075 | static inline int ip_vs_confirm_conntrack(struct sk_buff *skb, | ||
1076 | struct ip_vs_conn *cp) | ||
1077 | { | ||
1078 | return NF_ACCEPT; | ||
1079 | } | ||
1080 | |||
1081 | static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) | ||
1082 | { | ||
1083 | } | ||
1084 | /* CONFIG_IP_VS_NFCT */ | ||
1085 | #endif | ||
1086 | |||
958 | #endif /* __KERNEL__ */ | 1087 | #endif /* __KERNEL__ */ |
959 | 1088 | ||
960 | #endif /* _NET_IP_VS_H */ | 1089 | #endif /* _NET_IP_VS_H */ |
diff --git a/include/net/ipip.h b/include/net/ipip.h index 65caea8b414f..58abbf966b0c 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h | |||
@@ -45,7 +45,7 @@ struct ip_tunnel_prl_entry { | |||
45 | struct rcu_head rcu_head; | 45 | struct rcu_head rcu_head; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define IPTUNNEL_XMIT() do { \ | 48 | #define __IPTUNNEL_XMIT(stats1, stats2) do { \ |
49 | int err; \ | 49 | int err; \ |
50 | int pkt_len = skb->len - skb_transport_offset(skb); \ | 50 | int pkt_len = skb->len - skb_transport_offset(skb); \ |
51 | \ | 51 | \ |
@@ -54,12 +54,14 @@ struct ip_tunnel_prl_entry { | |||
54 | \ | 54 | \ |
55 | err = ip_local_out(skb); \ | 55 | err = ip_local_out(skb); \ |
56 | if (likely(net_xmit_eval(err) == 0)) { \ | 56 | if (likely(net_xmit_eval(err) == 0)) { \ |
57 | txq->tx_bytes += pkt_len; \ | 57 | (stats1)->tx_bytes += pkt_len; \ |
58 | txq->tx_packets++; \ | 58 | (stats1)->tx_packets++; \ |
59 | } else { \ | 59 | } else { \ |
60 | stats->tx_errors++; \ | 60 | (stats2)->tx_errors++; \ |
61 | stats->tx_aborted_errors++; \ | 61 | (stats2)->tx_aborted_errors++; \ |
62 | } \ | 62 | } \ |
63 | } while (0) | 63 | } while (0) |
64 | 64 | ||
65 | #define IPTUNNEL_XMIT() __IPTUNNEL_XMIT(txq, stats) | ||
66 | |||
65 | #endif | 67 | #endif |
diff --git a/include/net/ipv6.h b/include/net/ipv6.h index 1f8412410998..4a3cd2cd2f5e 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h | |||
@@ -262,7 +262,7 @@ static inline int ipv6_addr_scope(const struct in6_addr *addr) | |||
262 | 262 | ||
263 | static inline int __ipv6_addr_src_scope(int type) | 263 | static inline int __ipv6_addr_src_scope(int type) |
264 | { | 264 | { |
265 | return (type == IPV6_ADDR_ANY ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16)); | 265 | return (type == IPV6_ADDR_ANY) ? __IPV6_ADDR_SCOPE_INVALID : (type >> 16); |
266 | } | 266 | } |
267 | 267 | ||
268 | static inline int ipv6_addr_src_scope(const struct in6_addr *addr) | 268 | static inline int ipv6_addr_src_scope(const struct in6_addr *addr) |
@@ -279,10 +279,10 @@ static inline int | |||
279 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, | 279 | ipv6_masked_addr_cmp(const struct in6_addr *a1, const struct in6_addr *m, |
280 | const struct in6_addr *a2) | 280 | const struct in6_addr *a2) |
281 | { | 281 | { |
282 | return (!!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | | 282 | return !!(((a1->s6_addr32[0] ^ a2->s6_addr32[0]) & m->s6_addr32[0]) | |
283 | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | | 283 | ((a1->s6_addr32[1] ^ a2->s6_addr32[1]) & m->s6_addr32[1]) | |
284 | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | | 284 | ((a1->s6_addr32[2] ^ a2->s6_addr32[2]) & m->s6_addr32[2]) | |
285 | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3]))); | 285 | ((a1->s6_addr32[3] ^ a2->s6_addr32[3]) & m->s6_addr32[3])); |
286 | } | 286 | } |
287 | 287 | ||
288 | static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) | 288 | static inline void ipv6_addr_copy(struct in6_addr *a1, const struct in6_addr *a2) |
@@ -317,10 +317,10 @@ static inline void ipv6_addr_set(struct in6_addr *addr, | |||
317 | static inline int ipv6_addr_equal(const struct in6_addr *a1, | 317 | static inline int ipv6_addr_equal(const struct in6_addr *a1, |
318 | const struct in6_addr *a2) | 318 | const struct in6_addr *a2) |
319 | { | 319 | { |
320 | return (((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | | 320 | return ((a1->s6_addr32[0] ^ a2->s6_addr32[0]) | |
321 | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | | 321 | (a1->s6_addr32[1] ^ a2->s6_addr32[1]) | |
322 | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | | 322 | (a1->s6_addr32[2] ^ a2->s6_addr32[2]) | |
323 | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0); | 323 | (a1->s6_addr32[3] ^ a2->s6_addr32[3])) == 0; |
324 | } | 324 | } |
325 | 325 | ||
326 | static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, | 326 | static inline int __ipv6_prefix_equal(const __be32 *a1, const __be32 *a2, |
@@ -373,20 +373,20 @@ int ip6_frag_match(struct inet_frag_queue *q, void *a); | |||
373 | 373 | ||
374 | static inline int ipv6_addr_any(const struct in6_addr *a) | 374 | static inline int ipv6_addr_any(const struct in6_addr *a) |
375 | { | 375 | { |
376 | return ((a->s6_addr32[0] | a->s6_addr32[1] | | 376 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
377 | a->s6_addr32[2] | a->s6_addr32[3] ) == 0); | 377 | a->s6_addr32[2] | a->s6_addr32[3]) == 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | static inline int ipv6_addr_loopback(const struct in6_addr *a) | 380 | static inline int ipv6_addr_loopback(const struct in6_addr *a) |
381 | { | 381 | { |
382 | return ((a->s6_addr32[0] | a->s6_addr32[1] | | 382 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
383 | a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0); | 383 | a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0; |
384 | } | 384 | } |
385 | 385 | ||
386 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) | 386 | static inline int ipv6_addr_v4mapped(const struct in6_addr *a) |
387 | { | 387 | { |
388 | return ((a->s6_addr32[0] | a->s6_addr32[1] | | 388 | return (a->s6_addr32[0] | a->s6_addr32[1] | |
389 | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0); | 389 | (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0; |
390 | } | 390 | } |
391 | 391 | ||
392 | /* | 392 | /* |
@@ -395,8 +395,7 @@ static inline int ipv6_addr_v4mapped(const struct in6_addr *a) | |||
395 | */ | 395 | */ |
396 | static inline int ipv6_addr_orchid(const struct in6_addr *a) | 396 | static inline int ipv6_addr_orchid(const struct in6_addr *a) |
397 | { | 397 | { |
398 | return ((a->s6_addr32[0] & htonl(0xfffffff0)) | 398 | return (a->s6_addr32[0] & htonl(0xfffffff0)) == htonl(0x20010010); |
399 | == htonl(0x20010010)); | ||
400 | } | 399 | } |
401 | 400 | ||
402 | static inline void ipv6_addr_set_v4mapped(const __be32 addr, | 401 | static inline void ipv6_addr_set_v4mapped(const __be32 addr, |
@@ -441,7 +440,7 @@ static inline int __ipv6_addr_diff(const void *token1, const void *token2, int a | |||
441 | * if returned value is greater than prefix length. | 440 | * if returned value is greater than prefix length. |
442 | * --ANK (980803) | 441 | * --ANK (980803) |
443 | */ | 442 | */ |
444 | return (addrlen << 5); | 443 | return addrlen << 5; |
445 | } | 444 | } |
446 | 445 | ||
447 | static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) | 446 | static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_addr *a2) |
diff --git a/include/net/irda/irlan_common.h b/include/net/irda/irlan_common.h index 73cacb3ac16c..0af8b8dfbc22 100644 --- a/include/net/irda/irlan_common.h +++ b/include/net/irda/irlan_common.h | |||
@@ -171,7 +171,6 @@ struct irlan_cb { | |||
171 | int magic; | 171 | int magic; |
172 | struct list_head dev_list; | 172 | struct list_head dev_list; |
173 | struct net_device *dev; /* Ethernet device structure*/ | 173 | struct net_device *dev; /* Ethernet device structure*/ |
174 | struct net_device_stats stats; | ||
175 | 174 | ||
176 | __u32 saddr; /* Source device address */ | 175 | __u32 saddr; /* Source device address */ |
177 | __u32 daddr; /* Destination device address */ | 176 | __u32 daddr; /* Destination device address */ |
diff --git a/include/net/irda/irlan_event.h b/include/net/irda/irlan_event.h index 6d9539f05806..018b5a77e610 100644 --- a/include/net/irda/irlan_event.h +++ b/include/net/irda/irlan_event.h | |||
@@ -67,7 +67,7 @@ typedef enum { | |||
67 | IRLAN_WATCHDOG_TIMEOUT, | 67 | IRLAN_WATCHDOG_TIMEOUT, |
68 | } IRLAN_EVENT; | 68 | } IRLAN_EVENT; |
69 | 69 | ||
70 | extern char *irlan_state[]; | 70 | extern const char * const irlan_state[]; |
71 | 71 | ||
72 | void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, | 72 | void irlan_do_client_event(struct irlan_cb *self, IRLAN_EVENT event, |
73 | struct sk_buff *skb); | 73 | struct sk_buff *skb); |
diff --git a/include/net/irda/irlap.h b/include/net/irda/irlap.h index 9d0c78ea92f5..17fcd964f9d9 100644 --- a/include/net/irda/irlap.h +++ b/include/net/irda/irlap.h | |||
@@ -282,7 +282,7 @@ static inline int irlap_is_primary(struct irlap_cb *self) | |||
282 | default: | 282 | default: |
283 | ret = -1; | 283 | ret = -1; |
284 | } | 284 | } |
285 | return(ret); | 285 | return ret; |
286 | } | 286 | } |
287 | 287 | ||
288 | /* Clear a pending IrLAP disconnect. - Jean II */ | 288 | /* Clear a pending IrLAP disconnect. - Jean II */ |
diff --git a/include/net/irda/irlmp.h b/include/net/irda/irlmp.h index 3ffc1d0f93d6..fff11b7fe8a4 100644 --- a/include/net/irda/irlmp.h +++ b/include/net/irda/irlmp.h | |||
@@ -274,7 +274,7 @@ static inline int irlmp_lap_tx_queue_full(struct lsap_cb *self) | |||
274 | if (self->lap->irlap == NULL) | 274 | if (self->lap->irlap == NULL) |
275 | return 0; | 275 | return 0; |
276 | 276 | ||
277 | return(IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD); | 277 | return IRLAP_GET_TX_QUEUE_LEN(self->lap->irlap) >= LAP_HIGH_THRESHOLD; |
278 | } | 278 | } |
279 | 279 | ||
280 | /* After doing a irlmp_dup(), this get one of the two socket back into | 280 | /* After doing a irlmp_dup(), this get one of the two socket back into |
diff --git a/include/net/irda/irttp.h b/include/net/irda/irttp.h index 11aee7a2972a..af4b87721d13 100644 --- a/include/net/irda/irttp.h +++ b/include/net/irda/irttp.h | |||
@@ -204,7 +204,7 @@ static inline int irttp_is_primary(struct tsap_cb *self) | |||
204 | (self->lsap->lap == NULL) || | 204 | (self->lsap->lap == NULL) || |
205 | (self->lsap->lap->irlap == NULL)) | 205 | (self->lsap->lap->irlap == NULL)) |
206 | return -2; | 206 | return -2; |
207 | return(irlap_is_primary(self->lsap->lap->irlap)); | 207 | return irlap_is_primary(self->lsap->lap->irlap); |
208 | } | 208 | } |
209 | 209 | ||
210 | #endif /* IRTTP_H */ | 210 | #endif /* IRTTP_H */ |
diff --git a/include/net/mac80211.h b/include/net/mac80211.h index b0787a1dea90..9fdf982d1286 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h | |||
@@ -149,6 +149,7 @@ struct ieee80211_low_level_stats { | |||
149 | * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. | 149 | * @BSS_CHANGED_ARP_FILTER: Hardware ARP filter address list or state changed. |
150 | * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note | 150 | * @BSS_CHANGED_QOS: QoS for this association was enabled/disabled. Note |
151 | * that it is only ever disabled for station mode. | 151 | * that it is only ever disabled for station mode. |
152 | * @BSS_CHANGED_IDLE: Idle changed for this BSS/interface. | ||
152 | */ | 153 | */ |
153 | enum ieee80211_bss_change { | 154 | enum ieee80211_bss_change { |
154 | BSS_CHANGED_ASSOC = 1<<0, | 155 | BSS_CHANGED_ASSOC = 1<<0, |
@@ -165,6 +166,7 @@ enum ieee80211_bss_change { | |||
165 | BSS_CHANGED_IBSS = 1<<11, | 166 | BSS_CHANGED_IBSS = 1<<11, |
166 | BSS_CHANGED_ARP_FILTER = 1<<12, | 167 | BSS_CHANGED_ARP_FILTER = 1<<12, |
167 | BSS_CHANGED_QOS = 1<<13, | 168 | BSS_CHANGED_QOS = 1<<13, |
169 | BSS_CHANGED_IDLE = 1<<14, | ||
168 | 170 | ||
169 | /* when adding here, make sure to change ieee80211_reconfig */ | 171 | /* when adding here, make sure to change ieee80211_reconfig */ |
170 | }; | 172 | }; |
@@ -223,6 +225,9 @@ enum ieee80211_bss_change { | |||
223 | * hardware must not perform any ARP filtering. Note, that the filter will | 225 | * hardware must not perform any ARP filtering. Note, that the filter will |
224 | * be enabled also in promiscuous mode. | 226 | * be enabled also in promiscuous mode. |
225 | * @qos: This is a QoS-enabled BSS. | 227 | * @qos: This is a QoS-enabled BSS. |
228 | * @idle: This interface is idle. There's also a global idle flag in the | ||
229 | * hardware config which may be more appropriate depending on what | ||
230 | * your driver/device needs to do. | ||
226 | */ | 231 | */ |
227 | struct ieee80211_bss_conf { | 232 | struct ieee80211_bss_conf { |
228 | const u8 *bssid; | 233 | const u8 *bssid; |
@@ -247,6 +252,7 @@ struct ieee80211_bss_conf { | |||
247 | u8 arp_addr_cnt; | 252 | u8 arp_addr_cnt; |
248 | bool arp_filter_enabled; | 253 | bool arp_filter_enabled; |
249 | bool qos; | 254 | bool qos; |
255 | bool idle; | ||
250 | }; | 256 | }; |
251 | 257 | ||
252 | /** | 258 | /** |
@@ -315,6 +321,9 @@ struct ieee80211_bss_conf { | |||
315 | * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame | 321 | * @IEEE80211_TX_CTL_LDPC: tells the driver to use LDPC for this frame |
316 | * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this | 322 | * @IEEE80211_TX_CTL_STBC: Enables Space-Time Block Coding (STBC) for this |
317 | * frame and selects the maximum number of streams that it can use. | 323 | * frame and selects the maximum number of streams that it can use. |
324 | * | ||
325 | * Note: If you have to add new flags to the enumeration, then don't | ||
326 | * forget to update %IEEE80211_TX_TEMPORARY_FLAGS when necessary. | ||
318 | */ | 327 | */ |
319 | enum mac80211_tx_control_flags { | 328 | enum mac80211_tx_control_flags { |
320 | IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), | 329 | IEEE80211_TX_CTL_REQ_TX_STATUS = BIT(0), |
@@ -344,6 +353,19 @@ enum mac80211_tx_control_flags { | |||
344 | 353 | ||
345 | #define IEEE80211_TX_CTL_STBC_SHIFT 23 | 354 | #define IEEE80211_TX_CTL_STBC_SHIFT 23 |
346 | 355 | ||
356 | /* | ||
357 | * This definition is used as a mask to clear all temporary flags, which are | ||
358 | * set by the tx handlers for each transmission attempt by the mac80211 stack. | ||
359 | */ | ||
360 | #define IEEE80211_TX_TEMPORARY_FLAGS (IEEE80211_TX_CTL_NO_ACK | \ | ||
361 | IEEE80211_TX_CTL_CLEAR_PS_FILT | IEEE80211_TX_CTL_FIRST_FRAGMENT | \ | ||
362 | IEEE80211_TX_CTL_SEND_AFTER_DTIM | IEEE80211_TX_CTL_AMPDU | \ | ||
363 | IEEE80211_TX_STAT_TX_FILTERED | IEEE80211_TX_STAT_ACK | \ | ||
364 | IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_STAT_AMPDU_NO_BACK | \ | ||
365 | IEEE80211_TX_CTL_RATE_CTRL_PROBE | IEEE80211_TX_CTL_PSPOLL_RESPONSE | \ | ||
366 | IEEE80211_TX_CTL_MORE_FRAMES | IEEE80211_TX_CTL_LDPC | \ | ||
367 | IEEE80211_TX_CTL_STBC) | ||
368 | |||
347 | /** | 369 | /** |
348 | * enum mac80211_rate_control_flags - per-rate flags set by the | 370 | * enum mac80211_rate_control_flags - per-rate flags set by the |
349 | * Rate Control algorithm. | 371 | * Rate Control algorithm. |
@@ -559,9 +581,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info) | |||
559 | * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index | 581 | * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index |
560 | * @RX_FLAG_40MHZ: HT40 (40 MHz) was used | 582 | * @RX_FLAG_40MHZ: HT40 (40 MHz) was used |
561 | * @RX_FLAG_SHORT_GI: Short guard interval was used | 583 | * @RX_FLAG_SHORT_GI: Short guard interval was used |
562 | * @RX_FLAG_INTERNAL_CMTR: set internally after frame was reported | ||
563 | * on cooked monitor to avoid double-reporting it for multiple | ||
564 | * virtual interfaces | ||
565 | */ | 584 | */ |
566 | enum mac80211_rx_flags { | 585 | enum mac80211_rx_flags { |
567 | RX_FLAG_MMIC_ERROR = 1<<0, | 586 | RX_FLAG_MMIC_ERROR = 1<<0, |
@@ -575,7 +594,6 @@ enum mac80211_rx_flags { | |||
575 | RX_FLAG_HT = 1<<9, | 594 | RX_FLAG_HT = 1<<9, |
576 | RX_FLAG_40MHZ = 1<<10, | 595 | RX_FLAG_40MHZ = 1<<10, |
577 | RX_FLAG_SHORT_GI = 1<<11, | 596 | RX_FLAG_SHORT_GI = 1<<11, |
578 | RX_FLAG_INTERNAL_CMTR = 1<<12, | ||
579 | }; | 597 | }; |
580 | 598 | ||
581 | /** | 599 | /** |
@@ -596,6 +614,7 @@ enum mac80211_rx_flags { | |||
596 | * @rate_idx: index of data rate into band's supported rates or MCS index if | 614 | * @rate_idx: index of data rate into band's supported rates or MCS index if |
597 | * HT rates are use (RX_FLAG_HT) | 615 | * HT rates are use (RX_FLAG_HT) |
598 | * @flag: %RX_FLAG_* | 616 | * @flag: %RX_FLAG_* |
617 | * @rx_flags: internal RX flags for mac80211 | ||
599 | */ | 618 | */ |
600 | struct ieee80211_rx_status { | 619 | struct ieee80211_rx_status { |
601 | u64 mactime; | 620 | u64 mactime; |
@@ -605,6 +624,7 @@ struct ieee80211_rx_status { | |||
605 | int antenna; | 624 | int antenna; |
606 | int rate_idx; | 625 | int rate_idx; |
607 | int flag; | 626 | int flag; |
627 | unsigned int rx_flags; | ||
608 | }; | 628 | }; |
609 | 629 | ||
610 | /** | 630 | /** |
@@ -763,6 +783,8 @@ struct ieee80211_channel_switch { | |||
763 | * @bss_conf: BSS configuration for this interface, either our own | 783 | * @bss_conf: BSS configuration for this interface, either our own |
764 | * or the BSS we're associated to | 784 | * or the BSS we're associated to |
765 | * @addr: address of this interface | 785 | * @addr: address of this interface |
786 | * @p2p: indicates whether this AP or STA interface is a p2p | ||
787 | * interface, i.e. a GO or p2p-sta respectively | ||
766 | * @drv_priv: data area for driver use, will always be aligned to | 788 | * @drv_priv: data area for driver use, will always be aligned to |
767 | * sizeof(void *). | 789 | * sizeof(void *). |
768 | */ | 790 | */ |
@@ -770,6 +792,7 @@ struct ieee80211_vif { | |||
770 | enum nl80211_iftype type; | 792 | enum nl80211_iftype type; |
771 | struct ieee80211_bss_conf bss_conf; | 793 | struct ieee80211_bss_conf bss_conf; |
772 | u8 addr[ETH_ALEN]; | 794 | u8 addr[ETH_ALEN]; |
795 | bool p2p; | ||
773 | /* must be last */ | 796 | /* must be last */ |
774 | u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); | 797 | u8 drv_priv[0] __attribute__((__aligned__(sizeof(void *)))); |
775 | }; | 798 | }; |
@@ -783,20 +806,6 @@ static inline bool ieee80211_vif_is_mesh(struct ieee80211_vif *vif) | |||
783 | } | 806 | } |
784 | 807 | ||
785 | /** | 808 | /** |
786 | * enum ieee80211_key_alg - key algorithm | ||
787 | * @ALG_WEP: WEP40 or WEP104 | ||
788 | * @ALG_TKIP: TKIP | ||
789 | * @ALG_CCMP: CCMP (AES) | ||
790 | * @ALG_AES_CMAC: AES-128-CMAC | ||
791 | */ | ||
792 | enum ieee80211_key_alg { | ||
793 | ALG_WEP, | ||
794 | ALG_TKIP, | ||
795 | ALG_CCMP, | ||
796 | ALG_AES_CMAC, | ||
797 | }; | ||
798 | |||
799 | /** | ||
800 | * enum ieee80211_key_flags - key flags | 809 | * enum ieee80211_key_flags - key flags |
801 | * | 810 | * |
802 | * These flags are used for communication about keys between the driver | 811 | * These flags are used for communication about keys between the driver |
@@ -833,7 +842,7 @@ enum ieee80211_key_flags { | |||
833 | * @hw_key_idx: To be set by the driver, this is the key index the driver | 842 | * @hw_key_idx: To be set by the driver, this is the key index the driver |
834 | * wants to be given when a frame is transmitted and needs to be | 843 | * wants to be given when a frame is transmitted and needs to be |
835 | * encrypted in hardware. | 844 | * encrypted in hardware. |
836 | * @alg: The key algorithm. | 845 | * @cipher: The key's cipher suite selector. |
837 | * @flags: key flags, see &enum ieee80211_key_flags. | 846 | * @flags: key flags, see &enum ieee80211_key_flags. |
838 | * @keyidx: the key index (0-3) | 847 | * @keyidx: the key index (0-3) |
839 | * @keylen: key material length | 848 | * @keylen: key material length |
@@ -846,7 +855,7 @@ enum ieee80211_key_flags { | |||
846 | * @iv_len: The IV length for this key type | 855 | * @iv_len: The IV length for this key type |
847 | */ | 856 | */ |
848 | struct ieee80211_key_conf { | 857 | struct ieee80211_key_conf { |
849 | enum ieee80211_key_alg alg; | 858 | u32 cipher; |
850 | u8 icv_len; | 859 | u8 icv_len; |
851 | u8 iv_len; | 860 | u8 iv_len; |
852 | u8 hw_key_idx; | 861 | u8 hw_key_idx; |
@@ -1032,6 +1041,13 @@ enum ieee80211_tkip_key_type { | |||
1032 | * @IEEE80211_HW_NEED_DTIM_PERIOD: | 1041 | * @IEEE80211_HW_NEED_DTIM_PERIOD: |
1033 | * This device needs to know the DTIM period for the BSS before | 1042 | * This device needs to know the DTIM period for the BSS before |
1034 | * associating. | 1043 | * associating. |
1044 | * | ||
1045 | * @IEEE80211_HW_SUPPORTS_PER_STA_GTK: The device's crypto engine supports | ||
1046 | * per-station GTKs as used by IBSS RSN or during fast transition. If | ||
1047 | * the device doesn't support per-station GTKs, but can be asked not | ||
1048 | * to decrypt group addressed frames, then IBSS RSN support is still | ||
1049 | * possible but software crypto will be used. Advertise the wiphy flag | ||
1050 | * only in that case. | ||
1035 | */ | 1051 | */ |
1036 | enum ieee80211_hw_flags { | 1052 | enum ieee80211_hw_flags { |
1037 | IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, | 1053 | IEEE80211_HW_HAS_RATE_CONTROL = 1<<0, |
@@ -1055,6 +1071,7 @@ enum ieee80211_hw_flags { | |||
1055 | IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, | 1071 | IEEE80211_HW_REPORTS_TX_ACK_STATUS = 1<<18, |
1056 | IEEE80211_HW_CONNECTION_MONITOR = 1<<19, | 1072 | IEEE80211_HW_CONNECTION_MONITOR = 1<<19, |
1057 | IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20, | 1073 | IEEE80211_HW_SUPPORTS_CQM_RSSI = 1<<20, |
1074 | IEEE80211_HW_SUPPORTS_PER_STA_GTK = 1<<21, | ||
1058 | }; | 1075 | }; |
1059 | 1076 | ||
1060 | /** | 1077 | /** |
@@ -1100,8 +1117,15 @@ enum ieee80211_hw_flags { | |||
1100 | * @sta_data_size: size (in bytes) of the drv_priv data area | 1117 | * @sta_data_size: size (in bytes) of the drv_priv data area |
1101 | * within &struct ieee80211_sta. | 1118 | * within &struct ieee80211_sta. |
1102 | * | 1119 | * |
1103 | * @max_rates: maximum number of alternate rate retry stages | 1120 | * @max_rates: maximum number of alternate rate retry stages the hw |
1121 | * can handle. | ||
1122 | * @max_report_rates: maximum number of alternate rate retry stages | ||
1123 | * the hw can report back. | ||
1104 | * @max_rate_tries: maximum number of tries for each stage | 1124 | * @max_rate_tries: maximum number of tries for each stage |
1125 | * | ||
1126 | * @napi_weight: weight used for NAPI polling. You must specify an | ||
1127 | * appropriate value here if a napi_poll operation is provided | ||
1128 | * by your driver. | ||
1105 | */ | 1129 | */ |
1106 | struct ieee80211_hw { | 1130 | struct ieee80211_hw { |
1107 | struct ieee80211_conf conf; | 1131 | struct ieee80211_conf conf; |
@@ -1113,10 +1137,12 @@ struct ieee80211_hw { | |||
1113 | int channel_change_time; | 1137 | int channel_change_time; |
1114 | int vif_data_size; | 1138 | int vif_data_size; |
1115 | int sta_data_size; | 1139 | int sta_data_size; |
1140 | int napi_weight; | ||
1116 | u16 queues; | 1141 | u16 queues; |
1117 | u16 max_listen_interval; | 1142 | u16 max_listen_interval; |
1118 | s8 max_signal; | 1143 | s8 max_signal; |
1119 | u8 max_rates; | 1144 | u8 max_rates; |
1145 | u8 max_report_rates; | ||
1120 | u8 max_rate_tries; | 1146 | u8 max_rate_tries; |
1121 | }; | 1147 | }; |
1122 | 1148 | ||
@@ -1245,8 +1271,8 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, | |||
1245 | * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in | 1271 | * %IEEE80211_CONF_PS flag enabled means that the powersave mode defined in |
1246 | * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused | 1272 | * IEEE 802.11-2007 section 11.2 is enabled. This is not to be confused |
1247 | * with hardware wakeup and sleep states. Driver is responsible for waking | 1273 | * with hardware wakeup and sleep states. Driver is responsible for waking |
1248 | * up the hardware before issueing commands to the hardware and putting it | 1274 | * up the hardware before issuing commands to the hardware and putting it |
1249 | * back to sleep at approriate times. | 1275 | * back to sleep at appropriate times. |
1250 | * | 1276 | * |
1251 | * When PS is enabled, hardware needs to wakeup for beacons and receive the | 1277 | * When PS is enabled, hardware needs to wakeup for beacons and receive the |
1252 | * buffered multicast/broadcast frames after the beacon. Also it must be | 1278 | * buffered multicast/broadcast frames after the beacon. Also it must be |
@@ -1267,7 +1293,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, | |||
1267 | * there's data traffic and still saving significantly power in idle | 1293 | * there's data traffic and still saving significantly power in idle |
1268 | * periods. | 1294 | * periods. |
1269 | * | 1295 | * |
1270 | * Dynamic powersave is supported by simply mac80211 enabling and disabling | 1296 | * Dynamic powersave is simply supported by mac80211 enabling and disabling |
1271 | * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS | 1297 | * PS based on traffic. Driver needs to only set %IEEE80211_HW_SUPPORTS_PS |
1272 | * flag and mac80211 will handle everything automatically. Additionally, | 1298 | * flag and mac80211 will handle everything automatically. Additionally, |
1273 | * hardware having support for the dynamic PS feature may set the | 1299 | * hardware having support for the dynamic PS feature may set the |
@@ -1452,12 +1478,14 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw, | |||
1452 | * honour this flag if possible. | 1478 | * honour this flag if possible. |
1453 | * | 1479 | * |
1454 | * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS | 1480 | * @FIF_CONTROL: pass control frames (except for PS Poll), if PROMISC_IN_BSS |
1455 | * is not set then only those addressed to this station. | 1481 | * is not set then only those addressed to this station. |
1456 | * | 1482 | * |
1457 | * @FIF_OTHER_BSS: pass frames destined to other BSSes | 1483 | * @FIF_OTHER_BSS: pass frames destined to other BSSes |
1458 | * | 1484 | * |
1459 | * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only | 1485 | * @FIF_PSPOLL: pass PS Poll frames, if PROMISC_IN_BSS is not set then only |
1460 | * those addressed to this station. | 1486 | * those addressed to this station. |
1487 | * | ||
1488 | * @FIF_PROBE_REQ: pass probe request frames | ||
1461 | */ | 1489 | */ |
1462 | enum ieee80211_filter_flags { | 1490 | enum ieee80211_filter_flags { |
1463 | FIF_PROMISC_IN_BSS = 1<<0, | 1491 | FIF_PROMISC_IN_BSS = 1<<0, |
@@ -1468,6 +1496,7 @@ enum ieee80211_filter_flags { | |||
1468 | FIF_CONTROL = 1<<5, | 1496 | FIF_CONTROL = 1<<5, |
1469 | FIF_OTHER_BSS = 1<<6, | 1497 | FIF_OTHER_BSS = 1<<6, |
1470 | FIF_PSPOLL = 1<<7, | 1498 | FIF_PSPOLL = 1<<7, |
1499 | FIF_PROBE_REQ = 1<<8, | ||
1471 | }; | 1500 | }; |
1472 | 1501 | ||
1473 | /** | 1502 | /** |
@@ -1540,6 +1569,12 @@ enum ieee80211_ampdu_mlme_action { | |||
1540 | * negative error code (which will be seen in userspace.) | 1569 | * negative error code (which will be seen in userspace.) |
1541 | * Must be implemented and can sleep. | 1570 | * Must be implemented and can sleep. |
1542 | * | 1571 | * |
1572 | * @change_interface: Called when a netdevice changes type. This callback | ||
1573 | * is optional, but only if it is supported can interface types be | ||
1574 | * switched while the interface is UP. The callback may sleep. | ||
1575 | * Note that while an interface is being switched, it will not be | ||
1576 | * found by the interface iteration callbacks. | ||
1577 | * | ||
1543 | * @remove_interface: Notifies a driver that an interface is going down. | 1578 | * @remove_interface: Notifies a driver that an interface is going down. |
1544 | * The @stop callback is called after this if it is the last interface | 1579 | * The @stop callback is called after this if it is the last interface |
1545 | * and no monitor interfaces are present. | 1580 | * and no monitor interfaces are present. |
@@ -1687,6 +1722,8 @@ enum ieee80211_ampdu_mlme_action { | |||
1687 | * switch operation for CSAs received from the AP may implement this | 1722 | * switch operation for CSAs received from the AP may implement this |
1688 | * callback. They must then call ieee80211_chswitch_done() to indicate | 1723 | * callback. They must then call ieee80211_chswitch_done() to indicate |
1689 | * completion of the channel switch. | 1724 | * completion of the channel switch. |
1725 | * | ||
1726 | * @napi_poll: Poll Rx queue for incoming data frames. | ||
1690 | */ | 1727 | */ |
1691 | struct ieee80211_ops { | 1728 | struct ieee80211_ops { |
1692 | int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); | 1729 | int (*tx)(struct ieee80211_hw *hw, struct sk_buff *skb); |
@@ -1694,6 +1731,9 @@ struct ieee80211_ops { | |||
1694 | void (*stop)(struct ieee80211_hw *hw); | 1731 | void (*stop)(struct ieee80211_hw *hw); |
1695 | int (*add_interface)(struct ieee80211_hw *hw, | 1732 | int (*add_interface)(struct ieee80211_hw *hw, |
1696 | struct ieee80211_vif *vif); | 1733 | struct ieee80211_vif *vif); |
1734 | int (*change_interface)(struct ieee80211_hw *hw, | ||
1735 | struct ieee80211_vif *vif, | ||
1736 | enum nl80211_iftype new_type, bool p2p); | ||
1697 | void (*remove_interface)(struct ieee80211_hw *hw, | 1737 | void (*remove_interface)(struct ieee80211_hw *hw, |
1698 | struct ieee80211_vif *vif); | 1738 | struct ieee80211_vif *vif); |
1699 | int (*config)(struct ieee80211_hw *hw, u32 changed); | 1739 | int (*config)(struct ieee80211_hw *hw, u32 changed); |
@@ -1752,6 +1792,7 @@ struct ieee80211_ops { | |||
1752 | void (*flush)(struct ieee80211_hw *hw, bool drop); | 1792 | void (*flush)(struct ieee80211_hw *hw, bool drop); |
1753 | void (*channel_switch)(struct ieee80211_hw *hw, | 1793 | void (*channel_switch)(struct ieee80211_hw *hw, |
1754 | struct ieee80211_channel_switch *ch_switch); | 1794 | struct ieee80211_channel_switch *ch_switch); |
1795 | int (*napi_poll)(struct ieee80211_hw *hw, int budget); | ||
1755 | }; | 1796 | }; |
1756 | 1797 | ||
1757 | /** | 1798 | /** |
@@ -1897,6 +1938,22 @@ void ieee80211_free_hw(struct ieee80211_hw *hw); | |||
1897 | */ | 1938 | */ |
1898 | void ieee80211_restart_hw(struct ieee80211_hw *hw); | 1939 | void ieee80211_restart_hw(struct ieee80211_hw *hw); |
1899 | 1940 | ||
1941 | /** ieee80211_napi_schedule - schedule NAPI poll | ||
1942 | * | ||
1943 | * Use this function to schedule NAPI polling on a device. | ||
1944 | * | ||
1945 | * @hw: the hardware to start polling | ||
1946 | */ | ||
1947 | void ieee80211_napi_schedule(struct ieee80211_hw *hw); | ||
1948 | |||
1949 | /** ieee80211_napi_complete - complete NAPI polling | ||
1950 | * | ||
1951 | * Use this function to finish NAPI polling on a device. | ||
1952 | * | ||
1953 | * @hw: the hardware to stop polling | ||
1954 | */ | ||
1955 | void ieee80211_napi_complete(struct ieee80211_hw *hw); | ||
1956 | |||
1900 | /** | 1957 | /** |
1901 | * ieee80211_rx - receive frame | 1958 | * ieee80211_rx - receive frame |
1902 | * | 1959 | * |
@@ -2252,7 +2309,8 @@ void ieee80211_wake_queues(struct ieee80211_hw *hw); | |||
2252 | * | 2309 | * |
2253 | * When hardware scan offload is used (i.e. the hw_scan() callback is | 2310 | * When hardware scan offload is used (i.e. the hw_scan() callback is |
2254 | * assigned) this function needs to be called by the driver to notify | 2311 | * assigned) this function needs to be called by the driver to notify |
2255 | * mac80211 that the scan finished. | 2312 | * mac80211 that the scan finished. This function can be called from |
2313 | * any context, including hardirq context. | ||
2256 | * | 2314 | * |
2257 | * @hw: the hardware that finished the scan | 2315 | * @hw: the hardware that finished the scan |
2258 | * @aborted: set to true if scan was aborted | 2316 | * @aborted: set to true if scan was aborted |
@@ -2267,6 +2325,7 @@ void ieee80211_scan_completed(struct ieee80211_hw *hw, bool aborted); | |||
2267 | * This function allows the iterator function to sleep, when the iterator | 2325 | * This function allows the iterator function to sleep, when the iterator |
2268 | * function is atomic @ieee80211_iterate_active_interfaces_atomic can | 2326 | * function is atomic @ieee80211_iterate_active_interfaces_atomic can |
2269 | * be used. | 2327 | * be used. |
2328 | * Does not iterate over a new interface during add_interface() | ||
2270 | * | 2329 | * |
2271 | * @hw: the hardware struct of which the interfaces should be iterated over | 2330 | * @hw: the hardware struct of which the interfaces should be iterated over |
2272 | * @iterator: the iterator function to call | 2331 | * @iterator: the iterator function to call |
@@ -2284,6 +2343,7 @@ void ieee80211_iterate_active_interfaces(struct ieee80211_hw *hw, | |||
2284 | * hardware that are currently active and calls the callback for them. | 2343 | * hardware that are currently active and calls the callback for them. |
2285 | * This function requires the iterator callback function to be atomic, | 2344 | * This function requires the iterator callback function to be atomic, |
2286 | * if that is not desired, use @ieee80211_iterate_active_interfaces instead. | 2345 | * if that is not desired, use @ieee80211_iterate_active_interfaces instead. |
2346 | * Does not iterate over a new interface during add_interface() | ||
2287 | * | 2347 | * |
2288 | * @hw: the hardware struct of which the interfaces should be iterated over | 2348 | * @hw: the hardware struct of which the interfaces should be iterated over |
2289 | * @iterator: the iterator function to call, cannot sleep | 2349 | * @iterator: the iterator function to call, cannot sleep |
@@ -2385,25 +2445,28 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif, | |||
2385 | const u8 *addr); | 2445 | const u8 *addr); |
2386 | 2446 | ||
2387 | /** | 2447 | /** |
2388 | * ieee80211_find_sta_by_hw - find a station on hardware | 2448 | * ieee80211_find_sta_by_ifaddr - find a station on hardware |
2389 | * | 2449 | * |
2390 | * @hw: pointer as obtained from ieee80211_alloc_hw() | 2450 | * @hw: pointer as obtained from ieee80211_alloc_hw() |
2391 | * @addr: station's address | 2451 | * @addr: remote station's address |
2452 | * @localaddr: local address (vif->sdata->vif.addr). Use NULL for 'any'. | ||
2392 | * | 2453 | * |
2393 | * This function must be called under RCU lock and the | 2454 | * This function must be called under RCU lock and the |
2394 | * resulting pointer is only valid under RCU lock as well. | 2455 | * resulting pointer is only valid under RCU lock as well. |
2395 | * | 2456 | * |
2396 | * NOTE: This function should not be used! When mac80211 is converted | 2457 | * NOTE: You may pass NULL for localaddr, but then you will just get |
2397 | * internally to properly keep track of stations on multiple | 2458 | * the first STA that matches the remote address 'addr'. |
2398 | * virtual interfaces, it will not always know which station to | 2459 | * We can have multiple STA associated with multiple |
2399 | * return here since a single address might be used by multiple | 2460 | * logical stations (e.g. consider a station connecting to another |
2400 | * logical stations (e.g. consider a station connecting to another | 2461 | * BSSID on the same AP hardware without disconnecting first). |
2401 | * BSSID on the same AP hardware without disconnecting first). | 2462 | * In this case, the result of this method with localaddr NULL |
2463 | * is not reliable. | ||
2402 | * | 2464 | * |
2403 | * DO NOT USE THIS FUNCTION. | 2465 | * DO NOT USE THIS FUNCTION with localaddr NULL if at all possible. |
2404 | */ | 2466 | */ |
2405 | struct ieee80211_sta *ieee80211_find_sta_by_hw(struct ieee80211_hw *hw, | 2467 | struct ieee80211_sta *ieee80211_find_sta_by_ifaddr(struct ieee80211_hw *hw, |
2406 | const u8 *addr); | 2468 | const u8 *addr, |
2469 | const u8 *localaddr); | ||
2407 | 2470 | ||
2408 | /** | 2471 | /** |
2409 | * ieee80211_sta_block_awake - block station from waking up | 2472 | * ieee80211_sta_block_awake - block station from waking up |
@@ -2442,7 +2505,7 @@ void ieee80211_sta_block_awake(struct ieee80211_hw *hw, | |||
2442 | * | 2505 | * |
2443 | * @vif: &struct ieee80211_vif pointer from the add_interface callback. | 2506 | * @vif: &struct ieee80211_vif pointer from the add_interface callback. |
2444 | * | 2507 | * |
2445 | * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING and | 2508 | * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER and |
2446 | * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the | 2509 | * %IEEE80211_CONF_PS is set, the driver needs to inform whenever the |
2447 | * hardware is not receiving beacons with this function. | 2510 | * hardware is not receiving beacons with this function. |
2448 | */ | 2511 | */ |
@@ -2453,7 +2516,7 @@ void ieee80211_beacon_loss(struct ieee80211_vif *vif); | |||
2453 | * | 2516 | * |
2454 | * @vif: &struct ieee80211_vif pointer from the add_interface callback. | 2517 | * @vif: &struct ieee80211_vif pointer from the add_interface callback. |
2455 | * | 2518 | * |
2456 | * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTERING, and | 2519 | * When beacon filtering is enabled with %IEEE80211_HW_BEACON_FILTER, and |
2457 | * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver | 2520 | * %IEEE80211_CONF_PS and %IEEE80211_HW_CONNECTION_MONITOR are set, the driver |
2458 | * needs to inform if the connection to the AP has been lost. | 2521 | * needs to inform if the connection to the AP has been lost. |
2459 | * | 2522 | * |
@@ -2518,6 +2581,34 @@ void ieee80211_cqm_rssi_notify(struct ieee80211_vif *vif, | |||
2518 | */ | 2581 | */ |
2519 | void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); | 2582 | void ieee80211_chswitch_done(struct ieee80211_vif *vif, bool success); |
2520 | 2583 | ||
2584 | /** | ||
2585 | * ieee80211_request_smps - request SM PS transition | ||
2586 | * @vif: &struct ieee80211_vif pointer from the add_interface callback. | ||
2587 | * @smps_mode: new SM PS mode | ||
2588 | * | ||
2589 | * This allows the driver to request an SM PS transition in managed | ||
2590 | * mode. This is useful when the driver has more information than | ||
2591 | * the stack about possible interference, for example by bluetooth. | ||
2592 | */ | ||
2593 | void ieee80211_request_smps(struct ieee80211_vif *vif, | ||
2594 | enum ieee80211_smps_mode smps_mode); | ||
2595 | |||
2596 | /** | ||
2597 | * ieee80211_key_removed - disable hw acceleration for key | ||
2598 | * @key_conf: The key hw acceleration should be disabled for | ||
2599 | * | ||
2600 | * This allows drivers to indicate that the given key has been | ||
2601 | * removed from hardware acceleration, due to a new key that | ||
2602 | * was added. Don't use this if the key can continue to be used | ||
2603 | * for TX, if the key restriction is on RX only it is permitted | ||
2604 | * to keep the key for TX only and not call this function. | ||
2605 | * | ||
2606 | * Due to locking constraints, it may only be called during | ||
2607 | * @set_key. This function must be allowed to sleep, and the | ||
2608 | * key it tries to disable may still be used until it returns. | ||
2609 | */ | ||
2610 | void ieee80211_key_removed(struct ieee80211_key_conf *key_conf); | ||
2611 | |||
2521 | /* Rate control API */ | 2612 | /* Rate control API */ |
2522 | 2613 | ||
2523 | /** | 2614 | /** |
@@ -2681,4 +2772,26 @@ conf_is_ht(struct ieee80211_conf *conf) | |||
2681 | return conf->channel_type != NL80211_CHAN_NO_HT; | 2772 | return conf->channel_type != NL80211_CHAN_NO_HT; |
2682 | } | 2773 | } |
2683 | 2774 | ||
2775 | static inline enum nl80211_iftype | ||
2776 | ieee80211_iftype_p2p(enum nl80211_iftype type, bool p2p) | ||
2777 | { | ||
2778 | if (p2p) { | ||
2779 | switch (type) { | ||
2780 | case NL80211_IFTYPE_STATION: | ||
2781 | return NL80211_IFTYPE_P2P_CLIENT; | ||
2782 | case NL80211_IFTYPE_AP: | ||
2783 | return NL80211_IFTYPE_P2P_GO; | ||
2784 | default: | ||
2785 | break; | ||
2786 | } | ||
2787 | } | ||
2788 | return type; | ||
2789 | } | ||
2790 | |||
2791 | static inline enum nl80211_iftype | ||
2792 | ieee80211_vif_type_p2p(struct ieee80211_vif *vif) | ||
2793 | { | ||
2794 | return ieee80211_iftype_p2p(vif->type, vif->p2p); | ||
2795 | } | ||
2796 | |||
2684 | #endif /* MAC80211_H */ | 2797 | #endif /* MAC80211_H */ |
diff --git a/include/net/neighbour.h b/include/net/neighbour.h index 242879b6c4df..55590ab16b3e 100644 --- a/include/net/neighbour.h +++ b/include/net/neighbour.h | |||
@@ -91,26 +91,28 @@ struct neigh_statistics { | |||
91 | #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) | 91 | #define NEIGH_CACHE_STAT_INC(tbl, field) this_cpu_inc((tbl)->stats->field) |
92 | 92 | ||
93 | struct neighbour { | 93 | struct neighbour { |
94 | struct neighbour *next; | 94 | struct neighbour __rcu *next; |
95 | struct neigh_table *tbl; | 95 | struct neigh_table *tbl; |
96 | struct neigh_parms *parms; | 96 | struct neigh_parms *parms; |
97 | struct net_device *dev; | ||
98 | unsigned long used; | ||
99 | unsigned long confirmed; | 97 | unsigned long confirmed; |
100 | unsigned long updated; | 98 | unsigned long updated; |
101 | __u8 flags; | 99 | __u8 flags; |
102 | __u8 nud_state; | 100 | __u8 nud_state; |
103 | __u8 type; | 101 | __u8 type; |
104 | __u8 dead; | 102 | __u8 dead; |
103 | atomic_t refcnt; | ||
104 | struct sk_buff_head arp_queue; | ||
105 | struct timer_list timer; | ||
106 | unsigned long used; | ||
105 | atomic_t probes; | 107 | atomic_t probes; |
106 | rwlock_t lock; | 108 | rwlock_t lock; |
109 | seqlock_t ha_lock; | ||
107 | unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; | 110 | unsigned char ha[ALIGN(MAX_ADDR_LEN, sizeof(unsigned long))]; |
108 | struct hh_cache *hh; | 111 | struct hh_cache *hh; |
109 | atomic_t refcnt; | ||
110 | int (*output)(struct sk_buff *skb); | 112 | int (*output)(struct sk_buff *skb); |
111 | struct sk_buff_head arp_queue; | ||
112 | struct timer_list timer; | ||
113 | const struct neigh_ops *ops; | 113 | const struct neigh_ops *ops; |
114 | struct rcu_head rcu; | ||
115 | struct net_device *dev; | ||
114 | u8 primary_key[0]; | 116 | u8 primary_key[0]; |
115 | }; | 117 | }; |
116 | 118 | ||
@@ -138,13 +140,22 @@ struct pneigh_entry { | |||
138 | * neighbour table manipulation | 140 | * neighbour table manipulation |
139 | */ | 141 | */ |
140 | 142 | ||
143 | struct neigh_hash_table { | ||
144 | struct neighbour __rcu **hash_buckets; | ||
145 | unsigned int hash_mask; | ||
146 | __u32 hash_rnd; | ||
147 | struct rcu_head rcu; | ||
148 | }; | ||
149 | |||
141 | 150 | ||
142 | struct neigh_table { | 151 | struct neigh_table { |
143 | struct neigh_table *next; | 152 | struct neigh_table *next; |
144 | int family; | 153 | int family; |
145 | int entry_size; | 154 | int entry_size; |
146 | int key_len; | 155 | int key_len; |
147 | __u32 (*hash)(const void *pkey, const struct net_device *); | 156 | __u32 (*hash)(const void *pkey, |
157 | const struct net_device *dev, | ||
158 | __u32 hash_rnd); | ||
148 | int (*constructor)(struct neighbour *); | 159 | int (*constructor)(struct neighbour *); |
149 | int (*pconstructor)(struct pneigh_entry *); | 160 | int (*pconstructor)(struct pneigh_entry *); |
150 | void (*pdestructor)(struct pneigh_entry *); | 161 | void (*pdestructor)(struct pneigh_entry *); |
@@ -163,11 +174,9 @@ struct neigh_table { | |||
163 | atomic_t entries; | 174 | atomic_t entries; |
164 | rwlock_t lock; | 175 | rwlock_t lock; |
165 | unsigned long last_rand; | 176 | unsigned long last_rand; |
166 | struct kmem_cache *kmem_cachep; | 177 | struct kmem_cache *kmem_cachep; |
167 | struct neigh_statistics __percpu *stats; | 178 | struct neigh_statistics __percpu *stats; |
168 | struct neighbour **hash_buckets; | 179 | struct neigh_hash_table __rcu *nht; |
169 | unsigned int hash_mask; | ||
170 | __u32 hash_rnd; | ||
171 | struct pneigh_entry **phash_buckets; | 180 | struct pneigh_entry **phash_buckets; |
172 | }; | 181 | }; |
173 | 182 | ||
@@ -237,6 +246,7 @@ extern void pneigh_for_each(struct neigh_table *tbl, void (*cb)(struct pneigh_en | |||
237 | struct neigh_seq_state { | 246 | struct neigh_seq_state { |
238 | struct seq_net_private p; | 247 | struct seq_net_private p; |
239 | struct neigh_table *tbl; | 248 | struct neigh_table *tbl; |
249 | struct neigh_hash_table *nht; | ||
240 | void *(*neigh_sub_iter)(struct neigh_seq_state *state, | 250 | void *(*neigh_sub_iter)(struct neigh_seq_state *state, |
241 | struct neighbour *n, loff_t *pos); | 251 | struct neighbour *n, loff_t *pos); |
242 | unsigned int bucket; | 252 | unsigned int bucket; |
@@ -293,7 +303,10 @@ static inline void neigh_confirm(struct neighbour *neigh) | |||
293 | 303 | ||
294 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) | 304 | static inline int neigh_event_send(struct neighbour *neigh, struct sk_buff *skb) |
295 | { | 305 | { |
296 | neigh->used = jiffies; | 306 | unsigned long now = ACCESS_ONCE(jiffies); |
307 | |||
308 | if (neigh->used != now) | ||
309 | neigh->used = now; | ||
297 | if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) | 310 | if (!(neigh->nud_state&(NUD_CONNECTED|NUD_DELAY|NUD_PROBE))) |
298 | return __neigh_event_send(neigh, skb); | 311 | return __neigh_event_send(neigh, skb); |
299 | return 0; | 312 | return 0; |
@@ -364,4 +377,14 @@ struct neighbour_cb { | |||
364 | 377 | ||
365 | #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) | 378 | #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) |
366 | 379 | ||
380 | static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, | ||
381 | const struct net_device *dev) | ||
382 | { | ||
383 | unsigned int seq; | ||
384 | |||
385 | do { | ||
386 | seq = read_seqbegin(&n->ha_lock); | ||
387 | memcpy(dst, n->ha, dev->addr_len); | ||
388 | } while (read_seqretry(&n->ha_lock, seq)); | ||
389 | } | ||
367 | #endif | 390 | #endif |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index bd10a7908993..65af9a07cf76 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -41,6 +41,8 @@ struct net { | |||
41 | * destroy on demand | 41 | * destroy on demand |
42 | */ | 42 | */ |
43 | #endif | 43 | #endif |
44 | spinlock_t rules_mod_lock; | ||
45 | |||
44 | struct list_head list; /* list of network namespaces */ | 46 | struct list_head list; /* list of network namespaces */ |
45 | struct list_head cleanup_list; /* namespaces on death row */ | 47 | struct list_head cleanup_list; /* namespaces on death row */ |
46 | struct list_head exit_list; /* Use only net_mutex */ | 48 | struct list_head exit_list; /* Use only net_mutex */ |
@@ -52,7 +54,8 @@ struct net { | |||
52 | struct ctl_table_set sysctls; | 54 | struct ctl_table_set sysctls; |
53 | #endif | 55 | #endif |
54 | 56 | ||
55 | struct net_device *loopback_dev; /* The loopback */ | 57 | struct sock *rtnl; /* rtnetlink socket */ |
58 | struct sock *genl_sock; | ||
56 | 59 | ||
57 | struct list_head dev_base_head; | 60 | struct list_head dev_base_head; |
58 | struct hlist_head *dev_name_head; | 61 | struct hlist_head *dev_name_head; |
@@ -60,11 +63,9 @@ struct net { | |||
60 | 63 | ||
61 | /* core fib_rules */ | 64 | /* core fib_rules */ |
62 | struct list_head rules_ops; | 65 | struct list_head rules_ops; |
63 | spinlock_t rules_mod_lock; | ||
64 | 66 | ||
65 | struct sock *rtnl; /* rtnetlink socket */ | ||
66 | struct sock *genl_sock; | ||
67 | 67 | ||
68 | struct net_device *loopback_dev; /* The loopback */ | ||
68 | struct netns_core core; | 69 | struct netns_core core; |
69 | struct netns_mib mib; | 70 | struct netns_mib mib; |
70 | struct netns_packet packet; | 71 | struct netns_packet packet; |
@@ -84,13 +85,15 @@ struct net { | |||
84 | struct sock *nfnl; | 85 | struct sock *nfnl; |
85 | struct sock *nfnl_stash; | 86 | struct sock *nfnl_stash; |
86 | #endif | 87 | #endif |
87 | #ifdef CONFIG_XFRM | ||
88 | struct netns_xfrm xfrm; | ||
89 | #endif | ||
90 | #ifdef CONFIG_WEXT_CORE | 88 | #ifdef CONFIG_WEXT_CORE |
91 | struct sk_buff_head wext_nlevents; | 89 | struct sk_buff_head wext_nlevents; |
92 | #endif | 90 | #endif |
93 | struct net_generic *gen; | 91 | struct net_generic *gen; |
92 | |||
93 | /* Note : following structs are cache line aligned */ | ||
94 | #ifdef CONFIG_XFRM | ||
95 | struct netns_xfrm xfrm; | ||
96 | #endif | ||
94 | }; | 97 | }; |
95 | 98 | ||
96 | 99 | ||
diff --git a/include/net/netfilter/ipv6/nf_defrag_ipv6.h b/include/net/netfilter/ipv6/nf_defrag_ipv6.h new file mode 100644 index 000000000000..94dd54d76b48 --- /dev/null +++ b/include/net/netfilter/ipv6/nf_defrag_ipv6.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef _NF_DEFRAG_IPV6_H | ||
2 | #define _NF_DEFRAG_IPV6_H | ||
3 | |||
4 | extern void nf_defrag_ipv6_enable(void); | ||
5 | |||
6 | #endif /* _NF_DEFRAG_IPV6_H */ | ||
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h index e624dae54fa4..caf17db87dbc 100644 --- a/include/net/netfilter/nf_conntrack.h +++ b/include/net/netfilter/nf_conntrack.h | |||
@@ -75,7 +75,7 @@ struct nf_conntrack_helper; | |||
75 | /* nf_conn feature for connections that have a helper */ | 75 | /* nf_conn feature for connections that have a helper */ |
76 | struct nf_conn_help { | 76 | struct nf_conn_help { |
77 | /* Helper. if any */ | 77 | /* Helper. if any */ |
78 | struct nf_conntrack_helper *helper; | 78 | struct nf_conntrack_helper __rcu *helper; |
79 | 79 | ||
80 | union nf_conntrack_help help; | 80 | union nf_conntrack_help help; |
81 | 81 | ||
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h index 11e815084fcf..0f8a8c587532 100644 --- a/include/net/netfilter/nf_conntrack_expect.h +++ b/include/net/netfilter/nf_conntrack_expect.h | |||
@@ -67,9 +67,6 @@ struct nf_conntrack_expect_policy { | |||
67 | 67 | ||
68 | #define NF_CT_EXPECT_CLASS_DEFAULT 0 | 68 | #define NF_CT_EXPECT_CLASS_DEFAULT 0 |
69 | 69 | ||
70 | #define NF_CT_EXPECT_PERMANENT 0x1 | ||
71 | #define NF_CT_EXPECT_INACTIVE 0x2 | ||
72 | |||
73 | int nf_conntrack_expect_init(struct net *net); | 70 | int nf_conntrack_expect_init(struct net *net); |
74 | void nf_conntrack_expect_fini(struct net *net); | 71 | void nf_conntrack_expect_fini(struct net *net); |
75 | 72 | ||
@@ -85,9 +82,16 @@ struct nf_conntrack_expect * | |||
85 | nf_ct_find_expectation(struct net *net, u16 zone, | 82 | nf_ct_find_expectation(struct net *net, u16 zone, |
86 | const struct nf_conntrack_tuple *tuple); | 83 | const struct nf_conntrack_tuple *tuple); |
87 | 84 | ||
88 | void nf_ct_unlink_expect(struct nf_conntrack_expect *exp); | 85 | void nf_ct_unlink_expect_report(struct nf_conntrack_expect *exp, |
86 | u32 pid, int report); | ||
87 | static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp) | ||
88 | { | ||
89 | nf_ct_unlink_expect_report(exp, 0, 0); | ||
90 | } | ||
91 | |||
89 | void nf_ct_remove_expectations(struct nf_conn *ct); | 92 | void nf_ct_remove_expectations(struct nf_conn *ct); |
90 | void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); | 93 | void nf_ct_unexpect_related(struct nf_conntrack_expect *exp); |
94 | void nf_ct_remove_userspace_expectations(void); | ||
91 | 95 | ||
92 | /* Allocate space for an expectation: this is mandatory before calling | 96 | /* Allocate space for an expectation: this is mandatory before calling |
93 | nf_ct_expect_related. You will have to call put afterwards. */ | 97 | nf_ct_expect_related. You will have to call put afterwards. */ |
diff --git a/include/net/netfilter/nf_nat_protocol.h b/include/net/netfilter/nf_nat_protocol.h index df17bac46bf5..93cc90d28e66 100644 --- a/include/net/netfilter/nf_nat_protocol.h +++ b/include/net/netfilter/nf_nat_protocol.h | |||
@@ -45,9 +45,6 @@ struct nf_nat_protocol { | |||
45 | extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto); | 45 | extern int nf_nat_protocol_register(const struct nf_nat_protocol *proto); |
46 | extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto); | 46 | extern void nf_nat_protocol_unregister(const struct nf_nat_protocol *proto); |
47 | 47 | ||
48 | extern const struct nf_nat_protocol *nf_nat_proto_find_get(u_int8_t protocol); | ||
49 | extern void nf_nat_proto_put(const struct nf_nat_protocol *proto); | ||
50 | |||
51 | /* Built-in protocols. */ | 48 | /* Built-in protocols. */ |
52 | extern const struct nf_nat_protocol nf_nat_protocol_tcp; | 49 | extern const struct nf_nat_protocol nf_nat_protocol_tcp; |
53 | extern const struct nf_nat_protocol nf_nat_protocol_udp; | 50 | extern const struct nf_nat_protocol nf_nat_protocol_udp; |
diff --git a/include/net/netfilter/nf_tproxy_core.h b/include/net/netfilter/nf_tproxy_core.h index 208b46f4d6d2..cd85b3bc8327 100644 --- a/include/net/netfilter/nf_tproxy_core.h +++ b/include/net/netfilter/nf_tproxy_core.h | |||
@@ -5,15 +5,201 @@ | |||
5 | #include <linux/in.h> | 5 | #include <linux/in.h> |
6 | #include <linux/skbuff.h> | 6 | #include <linux/skbuff.h> |
7 | #include <net/sock.h> | 7 | #include <net/sock.h> |
8 | #include <net/inet_sock.h> | 8 | #include <net/inet_hashtables.h> |
9 | #include <net/inet6_hashtables.h> | ||
9 | #include <net/tcp.h> | 10 | #include <net/tcp.h> |
10 | 11 | ||
12 | #define NFT_LOOKUP_ANY 0 | ||
13 | #define NFT_LOOKUP_LISTENER 1 | ||
14 | #define NFT_LOOKUP_ESTABLISHED 2 | ||
15 | |||
11 | /* look up and get a reference to a matching socket */ | 16 | /* look up and get a reference to a matching socket */ |
12 | extern struct sock * | 17 | |
18 | |||
19 | /* This function is used by the 'TPROXY' target and the 'socket' | ||
20 | * match. The following lookups are supported: | ||
21 | * | ||
22 | * Explicit TProxy target rule | ||
23 | * =========================== | ||
24 | * | ||
25 | * This is used when the user wants to intercept a connection matching | ||
26 | * an explicit iptables rule. In this case the sockets are assumed | ||
27 | * matching in preference order: | ||
28 | * | ||
29 | * - match: if there's a fully established connection matching the | ||
30 | * _packet_ tuple, it is returned, assuming the redirection | ||
31 | * already took place and we process a packet belonging to an | ||
32 | * established connection | ||
33 | * | ||
34 | * - match: if there's a listening socket matching the redirection | ||
35 | * (e.g. on-port & on-ip of the connection), it is returned, | ||
36 | * regardless if it was bound to 0.0.0.0 or an explicit | ||
37 | * address. The reasoning is that if there's an explicit rule, it | ||
38 | * does not really matter if the listener is bound to an interface | ||
39 | * or to 0. The user already stated that he wants redirection | ||
40 | * (since he added the rule). | ||
41 | * | ||
42 | * "socket" match based redirection (no specific rule) | ||
43 | * =================================================== | ||
44 | * | ||
45 | * There are connections with dynamic endpoints (e.g. FTP data | ||
46 | * connection) that the user is unable to add explicit rules | ||
47 | * for. These are taken care of by a generic "socket" rule. It is | ||
48 | * assumed that the proxy application is trusted to open such | ||
49 | * connections without explicit iptables rule (except of course the | ||
50 | * generic 'socket' rule). In this case the following sockets are | ||
51 | * matched in preference order: | ||
52 | * | ||
53 | * - match: if there's a fully established connection matching the | ||
54 | * _packet_ tuple | ||
55 | * | ||
56 | * - match: if there's a non-zero bound listener (possibly with a | ||
57 | * non-local address) We don't accept zero-bound listeners, since | ||
58 | * then local services could intercept traffic going through the | ||
59 | * box. | ||
60 | * | ||
61 | * Please note that there's an overlap between what a TPROXY target | ||
62 | * and a socket match will match. Normally if you have both rules the | ||
63 | * "socket" match will be the first one, effectively all packets | ||
64 | * belonging to established connections going through that one. | ||
65 | */ | ||
66 | static inline struct sock * | ||
13 | nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, | 67 | nf_tproxy_get_sock_v4(struct net *net, const u8 protocol, |
14 | const __be32 saddr, const __be32 daddr, | 68 | const __be32 saddr, const __be32 daddr, |
15 | const __be16 sport, const __be16 dport, | 69 | const __be16 sport, const __be16 dport, |
16 | const struct net_device *in, bool listening); | 70 | const struct net_device *in, int lookup_type) |
71 | { | ||
72 | struct sock *sk; | ||
73 | |||
74 | /* look up socket */ | ||
75 | switch (protocol) { | ||
76 | case IPPROTO_TCP: | ||
77 | switch (lookup_type) { | ||
78 | case NFT_LOOKUP_ANY: | ||
79 | sk = __inet_lookup(net, &tcp_hashinfo, | ||
80 | saddr, sport, daddr, dport, | ||
81 | in->ifindex); | ||
82 | break; | ||
83 | case NFT_LOOKUP_LISTENER: | ||
84 | sk = inet_lookup_listener(net, &tcp_hashinfo, | ||
85 | daddr, dport, | ||
86 | in->ifindex); | ||
87 | |||
88 | /* NOTE: we return listeners even if bound to | ||
89 | * 0.0.0.0, those are filtered out in | ||
90 | * xt_socket, since xt_TPROXY needs 0 bound | ||
91 | * listeners too */ | ||
92 | |||
93 | break; | ||
94 | case NFT_LOOKUP_ESTABLISHED: | ||
95 | sk = inet_lookup_established(net, &tcp_hashinfo, | ||
96 | saddr, sport, daddr, dport, | ||
97 | in->ifindex); | ||
98 | break; | ||
99 | default: | ||
100 | WARN_ON(1); | ||
101 | sk = NULL; | ||
102 | break; | ||
103 | } | ||
104 | break; | ||
105 | case IPPROTO_UDP: | ||
106 | sk = udp4_lib_lookup(net, saddr, sport, daddr, dport, | ||
107 | in->ifindex); | ||
108 | if (sk && lookup_type != NFT_LOOKUP_ANY) { | ||
109 | int connected = (sk->sk_state == TCP_ESTABLISHED); | ||
110 | int wildcard = (inet_sk(sk)->inet_rcv_saddr == 0); | ||
111 | |||
112 | /* NOTE: we return listeners even if bound to | ||
113 | * 0.0.0.0, those are filtered out in | ||
114 | * xt_socket, since xt_TPROXY needs 0 bound | ||
115 | * listeners too */ | ||
116 | if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || | ||
117 | (lookup_type == NFT_LOOKUP_LISTENER && connected)) { | ||
118 | sock_put(sk); | ||
119 | sk = NULL; | ||
120 | } | ||
121 | } | ||
122 | break; | ||
123 | default: | ||
124 | WARN_ON(1); | ||
125 | sk = NULL; | ||
126 | } | ||
127 | |||
128 | pr_debug("tproxy socket lookup: proto %u %08x:%u -> %08x:%u, lookup type: %d, sock %p\n", | ||
129 | protocol, ntohl(saddr), ntohs(sport), ntohl(daddr), ntohs(dport), lookup_type, sk); | ||
130 | |||
131 | return sk; | ||
132 | } | ||
133 | |||
134 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
135 | static inline struct sock * | ||
136 | nf_tproxy_get_sock_v6(struct net *net, const u8 protocol, | ||
137 | const struct in6_addr *saddr, const struct in6_addr *daddr, | ||
138 | const __be16 sport, const __be16 dport, | ||
139 | const struct net_device *in, int lookup_type) | ||
140 | { | ||
141 | struct sock *sk; | ||
142 | |||
143 | /* look up socket */ | ||
144 | switch (protocol) { | ||
145 | case IPPROTO_TCP: | ||
146 | switch (lookup_type) { | ||
147 | case NFT_LOOKUP_ANY: | ||
148 | sk = inet6_lookup(net, &tcp_hashinfo, | ||
149 | saddr, sport, daddr, dport, | ||
150 | in->ifindex); | ||
151 | break; | ||
152 | case NFT_LOOKUP_LISTENER: | ||
153 | sk = inet6_lookup_listener(net, &tcp_hashinfo, | ||
154 | daddr, ntohs(dport), | ||
155 | in->ifindex); | ||
156 | |||
157 | /* NOTE: we return listeners even if bound to | ||
158 | * 0.0.0.0, those are filtered out in | ||
159 | * xt_socket, since xt_TPROXY needs 0 bound | ||
160 | * listeners too */ | ||
161 | |||
162 | break; | ||
163 | case NFT_LOOKUP_ESTABLISHED: | ||
164 | sk = __inet6_lookup_established(net, &tcp_hashinfo, | ||
165 | saddr, sport, daddr, ntohs(dport), | ||
166 | in->ifindex); | ||
167 | break; | ||
168 | default: | ||
169 | WARN_ON(1); | ||
170 | sk = NULL; | ||
171 | break; | ||
172 | } | ||
173 | break; | ||
174 | case IPPROTO_UDP: | ||
175 | sk = udp6_lib_lookup(net, saddr, sport, daddr, dport, | ||
176 | in->ifindex); | ||
177 | if (sk && lookup_type != NFT_LOOKUP_ANY) { | ||
178 | int connected = (sk->sk_state == TCP_ESTABLISHED); | ||
179 | int wildcard = ipv6_addr_any(&inet6_sk(sk)->rcv_saddr); | ||
180 | |||
181 | /* NOTE: we return listeners even if bound to | ||
182 | * 0.0.0.0, those are filtered out in | ||
183 | * xt_socket, since xt_TPROXY needs 0 bound | ||
184 | * listeners too */ | ||
185 | if ((lookup_type == NFT_LOOKUP_ESTABLISHED && (!connected || wildcard)) || | ||
186 | (lookup_type == NFT_LOOKUP_LISTENER && connected)) { | ||
187 | sock_put(sk); | ||
188 | sk = NULL; | ||
189 | } | ||
190 | } | ||
191 | break; | ||
192 | default: | ||
193 | WARN_ON(1); | ||
194 | sk = NULL; | ||
195 | } | ||
196 | |||
197 | pr_debug("tproxy socket lookup: proto %u %pI6:%u -> %pI6:%u, lookup type: %d, sock %p\n", | ||
198 | protocol, saddr, ntohs(sport), daddr, ntohs(dport), lookup_type, sk); | ||
199 | |||
200 | return sk; | ||
201 | } | ||
202 | #endif | ||
17 | 203 | ||
18 | static inline void | 204 | static inline void |
19 | nf_tproxy_put_sock(struct sock *sk) | 205 | nf_tproxy_put_sock(struct sock *sk) |
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h new file mode 100644 index 000000000000..0dfb34a5b53c --- /dev/null +++ b/include/net/netfilter/xt_log.h | |||
@@ -0,0 +1,54 @@ | |||
1 | #define S_SIZE (1024 - (sizeof(unsigned int) + 1)) | ||
2 | |||
3 | struct sbuff { | ||
4 | unsigned int count; | ||
5 | char buf[S_SIZE + 1]; | ||
6 | }; | ||
7 | static struct sbuff emergency, *emergency_ptr = &emergency; | ||
8 | |||
9 | static int sb_add(struct sbuff *m, const char *f, ...) | ||
10 | { | ||
11 | va_list args; | ||
12 | int len; | ||
13 | |||
14 | if (likely(m->count < S_SIZE)) { | ||
15 | va_start(args, f); | ||
16 | len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args); | ||
17 | va_end(args); | ||
18 | if (likely(m->count + len < S_SIZE)) { | ||
19 | m->count += len; | ||
20 | return 0; | ||
21 | } | ||
22 | } | ||
23 | m->count = S_SIZE; | ||
24 | printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n"); | ||
25 | return -1; | ||
26 | } | ||
27 | |||
28 | static struct sbuff *sb_open(void) | ||
29 | { | ||
30 | struct sbuff *m = kmalloc(sizeof(*m), GFP_ATOMIC); | ||
31 | |||
32 | if (unlikely(!m)) { | ||
33 | local_bh_disable(); | ||
34 | do { | ||
35 | m = xchg(&emergency_ptr, NULL); | ||
36 | } while (!m); | ||
37 | } | ||
38 | m->count = 0; | ||
39 | return m; | ||
40 | } | ||
41 | |||
42 | static void sb_close(struct sbuff *m) | ||
43 | { | ||
44 | m->buf[m->count] = 0; | ||
45 | printk("%s\n", m->buf); | ||
46 | |||
47 | if (likely(m != &emergency)) | ||
48 | kfree(m); | ||
49 | else { | ||
50 | xchg(&emergency_ptr, m); | ||
51 | local_bh_enable(); | ||
52 | } | ||
53 | } | ||
54 | |||
diff --git a/include/net/netns/xfrm.h b/include/net/netns/xfrm.h index 74f119a2829a..748f91f87cd5 100644 --- a/include/net/netns/xfrm.h +++ b/include/net/netns/xfrm.h | |||
@@ -43,10 +43,6 @@ struct netns_xfrm { | |||
43 | unsigned int policy_count[XFRM_POLICY_MAX * 2]; | 43 | unsigned int policy_count[XFRM_POLICY_MAX * 2]; |
44 | struct work_struct policy_hash_work; | 44 | struct work_struct policy_hash_work; |
45 | 45 | ||
46 | struct dst_ops xfrm4_dst_ops; | ||
47 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
48 | struct dst_ops xfrm6_dst_ops; | ||
49 | #endif | ||
50 | 46 | ||
51 | struct sock *nlsk; | 47 | struct sock *nlsk; |
52 | struct sock *nlsk_stash; | 48 | struct sock *nlsk_stash; |
@@ -58,6 +54,11 @@ struct netns_xfrm { | |||
58 | #ifdef CONFIG_SYSCTL | 54 | #ifdef CONFIG_SYSCTL |
59 | struct ctl_table_header *sysctl_hdr; | 55 | struct ctl_table_header *sysctl_hdr; |
60 | #endif | 56 | #endif |
57 | |||
58 | struct dst_ops xfrm4_dst_ops; | ||
59 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | ||
60 | struct dst_ops xfrm6_dst_ops; | ||
61 | #endif | ||
61 | }; | 62 | }; |
62 | 63 | ||
63 | #endif | 64 | #endif |
diff --git a/include/net/phonet/pep.h b/include/net/phonet/pep.h index 35672b1cf44a..b60b28c99e87 100644 --- a/include/net/phonet/pep.h +++ b/include/net/phonet/pep.h | |||
@@ -45,6 +45,10 @@ struct pep_sock { | |||
45 | u8 tx_fc; /* TX flow control */ | 45 | u8 tx_fc; /* TX flow control */ |
46 | u8 init_enable; /* auto-enable at creation */ | 46 | u8 init_enable; /* auto-enable at creation */ |
47 | u8 aligned; | 47 | u8 aligned; |
48 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
49 | u8 pipe_state; | ||
50 | struct sockaddr_pn remote_pep; | ||
51 | #endif | ||
48 | }; | 52 | }; |
49 | 53 | ||
50 | static inline struct pep_sock *pep_sk(struct sock *sk) | 54 | static inline struct pep_sock *pep_sk(struct sock *sk) |
@@ -77,6 +81,11 @@ static inline struct pnpipehdr *pnp_hdr(struct sk_buff *skb) | |||
77 | #define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4) | 81 | #define MAX_PNPIPE_HEADER (MAX_PHONET_HEADER + 4) |
78 | 82 | ||
79 | enum { | 83 | enum { |
84 | PNS_PIPE_CREATE_REQ = 0x00, | ||
85 | PNS_PIPE_CREATE_RESP, | ||
86 | PNS_PIPE_REMOVE_REQ, | ||
87 | PNS_PIPE_REMOVE_RESP, | ||
88 | |||
80 | PNS_PIPE_DATA = 0x20, | 89 | PNS_PIPE_DATA = 0x20, |
81 | PNS_PIPE_ALIGNED_DATA, | 90 | PNS_PIPE_ALIGNED_DATA, |
82 | 91 | ||
@@ -160,4 +169,21 @@ enum { | |||
160 | PEP_IND_READY, | 169 | PEP_IND_READY, |
161 | }; | 170 | }; |
162 | 171 | ||
172 | #ifdef CONFIG_PHONET_PIPECTRLR | ||
173 | #define PNS_PEP_CONNECT_UTID 0x02 | ||
174 | #define PNS_PIPE_CREATED_IND_UTID 0x04 | ||
175 | #define PNS_PIPE_ENABLE_UTID 0x0A | ||
176 | #define PNS_PIPE_ENABLED_IND_UTID 0x0C | ||
177 | #define PNS_PIPE_DISABLE_UTID 0x0F | ||
178 | #define PNS_PIPE_DISABLED_IND_UTID 0x11 | ||
179 | #define PNS_PEP_DISCONNECT_UTID 0x06 | ||
180 | |||
181 | /* Used for tracking state of a pipe */ | ||
182 | enum { | ||
183 | PIPE_IDLE, | ||
184 | PIPE_DISABLED, | ||
185 | PIPE_ENABLED, | ||
186 | }; | ||
187 | #endif /* CONFIG_PHONET_PIPECTRLR */ | ||
188 | |||
163 | #endif | 189 | #endif |
diff --git a/include/net/phonet/phonet.h b/include/net/phonet/phonet.h index 7b114079a51b..d5df797f9540 100644 --- a/include/net/phonet/phonet.h +++ b/include/net/phonet/phonet.h | |||
@@ -54,6 +54,11 @@ void pn_sock_hash(struct sock *sk); | |||
54 | void pn_sock_unhash(struct sock *sk); | 54 | void pn_sock_unhash(struct sock *sk); |
55 | int pn_sock_get_port(struct sock *sk, unsigned short sport); | 55 | int pn_sock_get_port(struct sock *sk, unsigned short sport); |
56 | 56 | ||
57 | struct sock *pn_find_sock_by_res(struct net *net, u8 res); | ||
58 | int pn_sock_bind_res(struct sock *sock, u8 res); | ||
59 | int pn_sock_unbind_res(struct sock *sk, u8 res); | ||
60 | void pn_sock_unbind_all_res(struct sock *sk); | ||
61 | |||
57 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, | 62 | int pn_skb_send(struct sock *sk, struct sk_buff *skb, |
58 | const struct sockaddr_pn *target); | 63 | const struct sockaddr_pn *target); |
59 | 64 | ||
diff --git a/include/net/phonet/pn_dev.h b/include/net/phonet/pn_dev.h index 2d16783d5e20..13649eb57413 100644 --- a/include/net/phonet/pn_dev.h +++ b/include/net/phonet/pn_dev.h | |||
@@ -57,5 +57,6 @@ struct net_device *phonet_route_output(struct net *net, u8 daddr); | |||
57 | #define PN_NO_ADDR 0xff | 57 | #define PN_NO_ADDR 0xff |
58 | 58 | ||
59 | extern const struct file_operations pn_sock_seq_fops; | 59 | extern const struct file_operations pn_sock_seq_fops; |
60 | extern const struct file_operations pn_res_seq_fops; | ||
60 | 61 | ||
61 | #endif | 62 | #endif |
diff --git a/include/net/raw.h b/include/net/raw.h index 43c57502659b..42ce6fe7a2d5 100644 --- a/include/net/raw.h +++ b/include/net/raw.h | |||
@@ -45,7 +45,10 @@ struct raw_iter_state { | |||
45 | struct raw_hashinfo *h; | 45 | struct raw_hashinfo *h; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define raw_seq_private(seq) ((struct raw_iter_state *)(seq)->private) | 48 | static inline struct raw_iter_state *raw_seq_private(struct seq_file *seq) |
49 | { | ||
50 | return seq->private; | ||
51 | } | ||
49 | void *raw_seq_start(struct seq_file *seq, loff_t *pos); | 52 | void *raw_seq_start(struct seq_file *seq, loff_t *pos); |
50 | void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); | 53 | void *raw_seq_next(struct seq_file *seq, void *v, loff_t *pos); |
51 | void raw_seq_stop(struct seq_file *seq, void *v); | 54 | void raw_seq_stop(struct seq_file *seq, void *v); |
diff --git a/include/net/route.h b/include/net/route.h index bd732d62e1c3..7e5e73bfa4de 100644 --- a/include/net/route.h +++ b/include/net/route.h | |||
@@ -199,6 +199,8 @@ static inline int ip_route_newports(struct rtable **rp, u8 protocol, | |||
199 | fl.fl_ip_sport = sport; | 199 | fl.fl_ip_sport = sport; |
200 | fl.fl_ip_dport = dport; | 200 | fl.fl_ip_dport = dport; |
201 | fl.proto = protocol; | 201 | fl.proto = protocol; |
202 | if (inet_sk(sk)->transparent) | ||
203 | fl.flags |= FLOWI_FLAG_ANYSRC; | ||
202 | ip_rt_put(*rp); | 204 | ip_rt_put(*rp); |
203 | *rp = NULL; | 205 | *rp = NULL; |
204 | security_sk_classify_flow(sk, &fl); | 206 | security_sk_classify_flow(sk, &fl); |
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h index af60fd050844..e013c68bfb00 100644 --- a/include/net/rtnetlink.h +++ b/include/net/rtnetlink.h | |||
@@ -79,7 +79,6 @@ struct rtnl_link_ops { | |||
79 | 79 | ||
80 | extern int __rtnl_link_register(struct rtnl_link_ops *ops); | 80 | extern int __rtnl_link_register(struct rtnl_link_ops *ops); |
81 | extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); | 81 | extern void __rtnl_link_unregister(struct rtnl_link_ops *ops); |
82 | extern void rtnl_kill_links(struct net *net, struct rtnl_link_ops *ops); | ||
83 | 82 | ||
84 | extern int rtnl_link_register(struct rtnl_link_ops *ops); | 83 | extern int rtnl_link_register(struct rtnl_link_ops *ops); |
85 | extern void rtnl_link_unregister(struct rtnl_link_ops *ops); | 84 | extern void rtnl_link_unregister(struct rtnl_link_ops *ops); |
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h index 3c8728aaab4e..ea1f8a83160d 100644 --- a/include/net/sch_generic.h +++ b/include/net/sch_generic.h | |||
@@ -328,8 +328,7 @@ extern void qdisc_destroy(struct Qdisc *qdisc); | |||
328 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); | 328 | extern void qdisc_tree_decrease_qlen(struct Qdisc *qdisc, unsigned int n); |
329 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, | 329 | extern struct Qdisc *qdisc_alloc(struct netdev_queue *dev_queue, |
330 | struct Qdisc_ops *ops); | 330 | struct Qdisc_ops *ops); |
331 | extern struct Qdisc *qdisc_create_dflt(struct net_device *dev, | 331 | extern struct Qdisc *qdisc_create_dflt(struct netdev_queue *dev_queue, |
332 | struct netdev_queue *dev_queue, | ||
333 | struct Qdisc_ops *ops, u32 parentid); | 332 | struct Qdisc_ops *ops, u32 parentid); |
334 | extern void qdisc_calculate_pkt_len(struct sk_buff *skb, | 333 | extern void qdisc_calculate_pkt_len(struct sk_buff *skb, |
335 | struct qdisc_size_table *stab); | 334 | struct qdisc_size_table *stab); |
@@ -601,7 +600,7 @@ static inline u32 qdisc_l2t(struct qdisc_rate_table* rtab, unsigned int pktlen) | |||
601 | slot = 0; | 600 | slot = 0; |
602 | slot >>= rtab->rate.cell_log; | 601 | slot >>= rtab->rate.cell_log; |
603 | if (slot > 255) | 602 | if (slot > 255) |
604 | return (rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]); | 603 | return rtab->data[255]*(slot >> 8) + rtab->data[slot & 0xFF]; |
605 | return rtab->data[slot]; | 604 | return rtab->data[slot]; |
606 | } | 605 | } |
607 | 606 | ||
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h index 65946bc43d00..505845ddb0be 100644 --- a/include/net/sctp/sctp.h +++ b/include/net/sctp/sctp.h | |||
@@ -275,24 +275,35 @@ struct sctp_mib { | |||
275 | /* Print debugging messages. */ | 275 | /* Print debugging messages. */ |
276 | #if SCTP_DEBUG | 276 | #if SCTP_DEBUG |
277 | extern int sctp_debug_flag; | 277 | extern int sctp_debug_flag; |
278 | #define SCTP_DEBUG_PRINTK(whatever...) \ | 278 | #define SCTP_DEBUG_PRINTK(fmt, args...) \ |
279 | ((void) (sctp_debug_flag && printk(KERN_DEBUG whatever))) | 279 | do { \ |
280 | #define SCTP_DEBUG_PRINTK_IPADDR(lead, trail, leadparm, saddr, otherparms...) \ | 280 | if (sctp_debug_flag) \ |
281 | if (sctp_debug_flag) { \ | 281 | printk(KERN_DEBUG pr_fmt(fmt), ##args); \ |
282 | if (saddr->sa.sa_family == AF_INET6) { \ | 282 | } while (0) |
283 | printk(KERN_DEBUG \ | 283 | #define SCTP_DEBUG_PRINTK_CONT(fmt, args...) \ |
284 | lead "%pI6" trail, \ | 284 | do { \ |
285 | leadparm, \ | 285 | if (sctp_debug_flag) \ |
286 | &saddr->v6.sin6_addr, \ | 286 | pr_cont(fmt, ##args); \ |
287 | otherparms); \ | 287 | } while (0) |
288 | } else { \ | 288 | #define SCTP_DEBUG_PRINTK_IPADDR(fmt_lead, fmt_trail, \ |
289 | printk(KERN_DEBUG \ | 289 | args_lead, saddr, args_trail...) \ |
290 | lead "%pI4" trail, \ | 290 | do { \ |
291 | leadparm, \ | 291 | if (sctp_debug_flag) { \ |
292 | &saddr->v4.sin_addr.s_addr, \ | 292 | if (saddr->sa.sa_family == AF_INET6) { \ |
293 | otherparms); \ | 293 | printk(KERN_DEBUG \ |
294 | } \ | 294 | pr_fmt(fmt_lead "%pI6" fmt_trail), \ |
295 | } | 295 | args_lead, \ |
296 | &saddr->v6.sin6_addr, \ | ||
297 | args_trail); \ | ||
298 | } else { \ | ||
299 | printk(KERN_DEBUG \ | ||
300 | pr_fmt(fmt_lead "%pI4" fmt_trail), \ | ||
301 | args_lead, \ | ||
302 | &saddr->v4.sin_addr.s_addr, \ | ||
303 | args_trail); \ | ||
304 | } \ | ||
305 | } \ | ||
306 | } while (0) | ||
296 | #define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } | 307 | #define SCTP_ENABLE_DEBUG { sctp_debug_flag = 1; } |
297 | #define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } | 308 | #define SCTP_DISABLE_DEBUG { sctp_debug_flag = 0; } |
298 | 309 | ||
@@ -306,6 +317,7 @@ extern int sctp_debug_flag; | |||
306 | #else /* SCTP_DEBUG */ | 317 | #else /* SCTP_DEBUG */ |
307 | 318 | ||
308 | #define SCTP_DEBUG_PRINTK(whatever...) | 319 | #define SCTP_DEBUG_PRINTK(whatever...) |
320 | #define SCTP_DEBUG_PRINTK_CONT(fmt, args...) | ||
309 | #define SCTP_DEBUG_PRINTK_IPADDR(whatever...) | 321 | #define SCTP_DEBUG_PRINTK_IPADDR(whatever...) |
310 | #define SCTP_ENABLE_DEBUG | 322 | #define SCTP_ENABLE_DEBUG |
311 | #define SCTP_DISABLE_DEBUG | 323 | #define SCTP_DISABLE_DEBUG |
@@ -393,7 +405,7 @@ static inline void sctp_v6_del_protocol(void) { return; } | |||
393 | /* Map an association to an assoc_id. */ | 405 | /* Map an association to an assoc_id. */ |
394 | static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) | 406 | static inline sctp_assoc_t sctp_assoc2id(const struct sctp_association *asoc) |
395 | { | 407 | { |
396 | return (asoc?asoc->assoc_id:0); | 408 | return asoc ? asoc->assoc_id : 0; |
397 | } | 409 | } |
398 | 410 | ||
399 | /* Look up the association by its id. */ | 411 | /* Look up the association by its id. */ |
@@ -461,7 +473,7 @@ static inline void sctp_skb_set_owner_r(struct sk_buff *skb, struct sock *sk) | |||
461 | /* Tests if the list has one and only one entry. */ | 473 | /* Tests if the list has one and only one entry. */ |
462 | static inline int sctp_list_single_entry(struct list_head *head) | 474 | static inline int sctp_list_single_entry(struct list_head *head) |
463 | { | 475 | { |
464 | return ((head->next != head) && (head->next == head->prev)); | 476 | return (head->next != head) && (head->next == head->prev); |
465 | } | 477 | } |
466 | 478 | ||
467 | /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ | 479 | /* Generate a random jitter in the range of -50% ~ +50% of input RTO. */ |
@@ -619,13 +631,13 @@ static inline int sctp_sanity_check(void) | |||
619 | /* This is the hash function for the SCTP port hash table. */ | 631 | /* This is the hash function for the SCTP port hash table. */ |
620 | static inline int sctp_phashfn(__u16 lport) | 632 | static inline int sctp_phashfn(__u16 lport) |
621 | { | 633 | { |
622 | return (lport & (sctp_port_hashsize - 1)); | 634 | return lport & (sctp_port_hashsize - 1); |
623 | } | 635 | } |
624 | 636 | ||
625 | /* This is the hash function for the endpoint hash table. */ | 637 | /* This is the hash function for the endpoint hash table. */ |
626 | static inline int sctp_ep_hashfn(__u16 lport) | 638 | static inline int sctp_ep_hashfn(__u16 lport) |
627 | { | 639 | { |
628 | return (lport & (sctp_ep_hashsize - 1)); | 640 | return lport & (sctp_ep_hashsize - 1); |
629 | } | 641 | } |
630 | 642 | ||
631 | /* This is the hash function for the association hash table. */ | 643 | /* This is the hash function for the association hash table. */ |
@@ -633,7 +645,7 @@ static inline int sctp_assoc_hashfn(__u16 lport, __u16 rport) | |||
633 | { | 645 | { |
634 | int h = (lport << 16) + rport; | 646 | int h = (lport << 16) + rport; |
635 | h ^= h>>8; | 647 | h ^= h>>8; |
636 | return (h & (sctp_assoc_hashsize - 1)); | 648 | return h & (sctp_assoc_hashsize - 1); |
637 | } | 649 | } |
638 | 650 | ||
639 | /* This is the hash function for the association hash table. This is | 651 | /* This is the hash function for the association hash table. This is |
@@ -644,7 +656,7 @@ static inline int sctp_vtag_hashfn(__u16 lport, __u16 rport, __u32 vtag) | |||
644 | { | 656 | { |
645 | int h = (lport << 16) + rport; | 657 | int h = (lport << 16) + rport; |
646 | h ^= vtag; | 658 | h ^= vtag; |
647 | return (h & (sctp_assoc_hashsize-1)); | 659 | return h & (sctp_assoc_hashsize - 1); |
648 | } | 660 | } |
649 | 661 | ||
650 | #define sctp_for_each_hentry(epb, node, head) \ | 662 | #define sctp_for_each_hentry(epb, node, head) \ |
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h index 4088c89a9055..9352d12f02de 100644 --- a/include/net/sctp/sm.h +++ b/include/net/sctp/sm.h | |||
@@ -345,12 +345,12 @@ enum { | |||
345 | 345 | ||
346 | static inline int TSN_lt(__u32 s, __u32 t) | 346 | static inline int TSN_lt(__u32 s, __u32 t) |
347 | { | 347 | { |
348 | return (((s) - (t)) & TSN_SIGN_BIT); | 348 | return ((s) - (t)) & TSN_SIGN_BIT; |
349 | } | 349 | } |
350 | 350 | ||
351 | static inline int TSN_lte(__u32 s, __u32 t) | 351 | static inline int TSN_lte(__u32 s, __u32 t) |
352 | { | 352 | { |
353 | return (((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT)); | 353 | return ((s) == (t)) || (((s) - (t)) & TSN_SIGN_BIT); |
354 | } | 354 | } |
355 | 355 | ||
356 | /* Compare two SSNs */ | 356 | /* Compare two SSNs */ |
@@ -369,12 +369,12 @@ enum { | |||
369 | 369 | ||
370 | static inline int SSN_lt(__u16 s, __u16 t) | 370 | static inline int SSN_lt(__u16 s, __u16 t) |
371 | { | 371 | { |
372 | return (((s) - (t)) & SSN_SIGN_BIT); | 372 | return ((s) - (t)) & SSN_SIGN_BIT; |
373 | } | 373 | } |
374 | 374 | ||
375 | static inline int SSN_lte(__u16 s, __u16 t) | 375 | static inline int SSN_lte(__u16 s, __u16 t) |
376 | { | 376 | { |
377 | return (((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT)); | 377 | return ((s) == (t)) || (((s) - (t)) & SSN_SIGN_BIT); |
378 | } | 378 | } |
379 | 379 | ||
380 | /* | 380 | /* |
@@ -388,7 +388,7 @@ enum { | |||
388 | 388 | ||
389 | static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) | 389 | static inline int ADDIP_SERIAL_gte(__u16 s, __u16 t) |
390 | { | 390 | { |
391 | return (((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT)); | 391 | return ((s) == (t)) || (((t) - (s)) & ADDIP_SERIAL_SIGN_BIT); |
392 | } | 392 | } |
393 | 393 | ||
394 | /* Check VTAG of the packet matches the sender's own tag. */ | 394 | /* Check VTAG of the packet matches the sender's own tag. */ |
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index f9e7473613bd..69fef4fb79c0 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h | |||
@@ -847,7 +847,7 @@ void sctp_packet_free(struct sctp_packet *); | |||
847 | 847 | ||
848 | static inline int sctp_packet_empty(struct sctp_packet *packet) | 848 | static inline int sctp_packet_empty(struct sctp_packet *packet) |
849 | { | 849 | { |
850 | return (packet->size == packet->overhead); | 850 | return packet->size == packet->overhead; |
851 | } | 851 | } |
852 | 852 | ||
853 | /* This represents a remote transport address. | 853 | /* This represents a remote transport address. |
diff --git a/include/net/sctp/tsnmap.h b/include/net/sctp/tsnmap.h index 4aabc5a96cf6..e7728bc14ccf 100644 --- a/include/net/sctp/tsnmap.h +++ b/include/net/sctp/tsnmap.h | |||
@@ -157,7 +157,7 @@ __u16 sctp_tsnmap_pending(struct sctp_tsnmap *map); | |||
157 | /* Is there a gap in the TSN map? */ | 157 | /* Is there a gap in the TSN map? */ |
158 | static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map) | 158 | static inline int sctp_tsnmap_has_gap(const struct sctp_tsnmap *map) |
159 | { | 159 | { |
160 | return (map->cumulative_tsn_ack_point != map->max_tsn_seen); | 160 | return map->cumulative_tsn_ack_point != map->max_tsn_seen; |
161 | } | 161 | } |
162 | 162 | ||
163 | /* Mark a duplicate TSN. Note: limit the storage of duplicate TSN | 163 | /* Mark a duplicate TSN. Note: limit the storage of duplicate TSN |
diff --git a/include/net/sock.h b/include/net/sock.h index ac53bfbdfe16..73a4f9702a65 100644 --- a/include/net/sock.h +++ b/include/net/sock.h | |||
@@ -752,6 +752,7 @@ struct proto { | |||
752 | /* Keeping track of sk's, looking them up, and port selection methods. */ | 752 | /* Keeping track of sk's, looking them up, and port selection methods. */ |
753 | void (*hash)(struct sock *sk); | 753 | void (*hash)(struct sock *sk); |
754 | void (*unhash)(struct sock *sk); | 754 | void (*unhash)(struct sock *sk); |
755 | void (*rehash)(struct sock *sk); | ||
755 | int (*get_port)(struct sock *sk, unsigned short snum); | 756 | int (*get_port)(struct sock *sk, unsigned short snum); |
756 | 757 | ||
757 | /* Keeping track of sockets in use */ | 758 | /* Keeping track of sockets in use */ |
@@ -1557,7 +1558,11 @@ static inline void sk_wake_async(struct sock *sk, int how, int band) | |||
1557 | } | 1558 | } |
1558 | 1559 | ||
1559 | #define SOCK_MIN_SNDBUF 2048 | 1560 | #define SOCK_MIN_SNDBUF 2048 |
1560 | #define SOCK_MIN_RCVBUF 256 | 1561 | /* |
1562 | * Since sk_rmem_alloc sums skb->truesize, even a small frame might need | ||
1563 | * sizeof(sk_buff) + MTU + padding, unless net driver perform copybreak | ||
1564 | */ | ||
1565 | #define SOCK_MIN_RCVBUF (2048 + sizeof(struct sk_buff)) | ||
1561 | 1566 | ||
1562 | static inline void sk_stream_moderate_sndbuf(struct sock *sk) | 1567 | static inline void sk_stream_moderate_sndbuf(struct sock *sk) |
1563 | { | 1568 | { |
@@ -1669,17 +1674,13 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, | |||
1669 | 1674 | ||
1670 | /** | 1675 | /** |
1671 | * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped | 1676 | * sock_tx_timestamp - checks whether the outgoing packet is to be time stamped |
1672 | * @msg: outgoing packet | ||
1673 | * @sk: socket sending this packet | 1677 | * @sk: socket sending this packet |
1674 | * @shtx: filled with instructions for time stamping | 1678 | * @tx_flags: filled with instructions for time stamping |
1675 | * | 1679 | * |
1676 | * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if | 1680 | * Currently only depends on SOCK_TIMESTAMPING* flags. Returns error code if |
1677 | * parameters are invalid. | 1681 | * parameters are invalid. |
1678 | */ | 1682 | */ |
1679 | extern int sock_tx_timestamp(struct msghdr *msg, | 1683 | extern int sock_tx_timestamp(struct sock *sk, __u8 *tx_flags); |
1680 | struct sock *sk, | ||
1681 | union skb_shared_tx *shtx); | ||
1682 | |||
1683 | 1684 | ||
1684 | /** | 1685 | /** |
1685 | * sk_eat_skb - Release a skb if it is no longer needed | 1686 | * sk_eat_skb - Release a skb if it is no longer needed |
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h new file mode 100644 index 000000000000..9e8710be7a04 --- /dev/null +++ b/include/net/tc_act/tc_csum.h | |||
@@ -0,0 +1,15 @@ | |||
1 | #ifndef __NET_TC_CSUM_H | ||
2 | #define __NET_TC_CSUM_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | #include <net/act_api.h> | ||
6 | |||
7 | struct tcf_csum { | ||
8 | struct tcf_common common; | ||
9 | |||
10 | u32 update_flags; | ||
11 | }; | ||
12 | #define to_tcf_csum(pc) \ | ||
13 | container_of(pc,struct tcf_csum,common) | ||
14 | |||
15 | #endif /* __NET_TC_CSUM_H */ | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index df6a2eb20193..4fee0424af7e 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
@@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3) | |||
268 | return seq3 - seq2 >= seq1 - seq2; | 268 | return seq3 - seq2 >= seq1 - seq2; |
269 | } | 269 | } |
270 | 270 | ||
271 | static inline int tcp_too_many_orphans(struct sock *sk, int num) | 271 | static inline bool tcp_too_many_orphans(struct sock *sk, int shift) |
272 | { | 272 | { |
273 | return (num > sysctl_tcp_max_orphans) || | 273 | struct percpu_counter *ocp = sk->sk_prot->orphan_count; |
274 | (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | 274 | int orphans = percpu_counter_read_positive(ocp); |
275 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]); | 275 | |
276 | if (orphans << shift > sysctl_tcp_max_orphans) { | ||
277 | orphans = percpu_counter_sum_positive(ocp); | ||
278 | if (orphans << shift > sysctl_tcp_max_orphans) | ||
279 | return true; | ||
280 | } | ||
281 | |||
282 | if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && | ||
283 | atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]) | ||
284 | return true; | ||
285 | return false; | ||
276 | } | 286 | } |
277 | 287 | ||
278 | /* syncookies: remember time of last synqueue overflow */ | 288 | /* syncookies: remember time of last synqueue overflow */ |
@@ -336,8 +346,6 @@ static inline void tcp_dec_quickack_mode(struct sock *sk, | |||
336 | } | 346 | } |
337 | } | 347 | } |
338 | 348 | ||
339 | extern void tcp_enter_quickack_mode(struct sock *sk); | ||
340 | |||
341 | #define TCP_ECN_OK 1 | 349 | #define TCP_ECN_OK 1 |
342 | #define TCP_ECN_QUEUE_CWR 2 | 350 | #define TCP_ECN_QUEUE_CWR 2 |
343 | #define TCP_ECN_DEMAND_CWR 4 | 351 | #define TCP_ECN_DEMAND_CWR 4 |
@@ -465,8 +473,22 @@ extern unsigned int tcp_current_mss(struct sock *sk); | |||
465 | /* Bound MSS / TSO packet size with the half of the window */ | 473 | /* Bound MSS / TSO packet size with the half of the window */ |
466 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) | 474 | static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize) |
467 | { | 475 | { |
468 | if (tp->max_window && pktsize > (tp->max_window >> 1)) | 476 | int cutoff; |
469 | return max(tp->max_window >> 1, 68U - tp->tcp_header_len); | 477 | |
478 | /* When peer uses tiny windows, there is no use in packetizing | ||
479 | * to sub-MSS pieces for the sake of SWS or making sure there | ||
480 | * are enough packets in the pipe for fast recovery. | ||
481 | * | ||
482 | * On the other hand, for extremely large MSS devices, handling | ||
483 | * smaller than MSS windows in this way does make sense. | ||
484 | */ | ||
485 | if (tp->max_window >= 512) | ||
486 | cutoff = (tp->max_window >> 1); | ||
487 | else | ||
488 | cutoff = tp->max_window; | ||
489 | |||
490 | if (cutoff && pktsize > cutoff) | ||
491 | return max_t(int, cutoff, 68U - tp->tcp_header_len); | ||
470 | else | 492 | else |
471 | return pktsize; | 493 | return pktsize; |
472 | } | 494 | } |
@@ -779,6 +801,15 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk) | |||
779 | /* Use define here intentionally to get WARN_ON location shown at the caller */ | 801 | /* Use define here intentionally to get WARN_ON location shown at the caller */ |
780 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) | 802 | #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out) |
781 | 803 | ||
804 | /* | ||
805 | * Convert RFC 3390 larger initial window into an equivalent number of packets. | ||
806 | * This is based on the numbers specified in RFC 5681, 3.1. | ||
807 | */ | ||
808 | static inline u32 rfc3390_bytes_to_packets(const u32 smss) | ||
809 | { | ||
810 | return smss <= 1095 ? 4 : (smss > 2190 ? 2 : 3); | ||
811 | } | ||
812 | |||
782 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); | 813 | extern void tcp_enter_cwr(struct sock *sk, const int set_ssthresh); |
783 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); | 814 | extern __u32 tcp_init_cwnd(struct tcp_sock *tp, struct dst_entry *dst); |
784 | 815 | ||
diff --git a/include/net/tipc/tipc.h b/include/net/tipc/tipc.h index 15af6dca0b49..1e0645e1eed2 100644 --- a/include/net/tipc/tipc.h +++ b/include/net/tipc/tipc.h | |||
@@ -50,8 +50,6 @@ | |||
50 | * TIPC operating mode routines | 50 | * TIPC operating mode routines |
51 | */ | 51 | */ |
52 | 52 | ||
53 | u32 tipc_get_addr(void); | ||
54 | |||
55 | #define TIPC_NOT_RUNNING 0 | 53 | #define TIPC_NOT_RUNNING 0 |
56 | #define TIPC_NODE_MODE 1 | 54 | #define TIPC_NODE_MODE 1 |
57 | #define TIPC_NET_MODE 2 | 55 | #define TIPC_NET_MODE 2 |
@@ -62,8 +60,6 @@ int tipc_attach(unsigned int *userref, tipc_mode_event, void *usr_handle); | |||
62 | 60 | ||
63 | void tipc_detach(unsigned int userref); | 61 | void tipc_detach(unsigned int userref); |
64 | 62 | ||
65 | int tipc_get_mode(void); | ||
66 | |||
67 | /* | 63 | /* |
68 | * TIPC port manipulation routines | 64 | * TIPC port manipulation routines |
69 | */ | 65 | */ |
@@ -153,12 +149,6 @@ int tipc_disconnect(u32 portref); | |||
153 | 149 | ||
154 | int tipc_shutdown(u32 ref); | 150 | int tipc_shutdown(u32 ref); |
155 | 151 | ||
156 | int tipc_isconnected(u32 portref, int *isconnected); | ||
157 | |||
158 | int tipc_peer(u32 portref, struct tipc_portid *peer); | ||
159 | |||
160 | int tipc_ref_valid(u32 portref); | ||
161 | |||
162 | /* | 152 | /* |
163 | * TIPC messaging routines | 153 | * TIPC messaging routines |
164 | */ | 154 | */ |
@@ -170,38 +160,12 @@ int tipc_send(u32 portref, | |||
170 | unsigned int num_sect, | 160 | unsigned int num_sect, |
171 | struct iovec const *msg_sect); | 161 | struct iovec const *msg_sect); |
172 | 162 | ||
173 | int tipc_send_buf(u32 portref, | ||
174 | struct sk_buff *buf, | ||
175 | unsigned int dsz); | ||
176 | |||
177 | int tipc_send2name(u32 portref, | 163 | int tipc_send2name(u32 portref, |
178 | struct tipc_name const *name, | 164 | struct tipc_name const *name, |
179 | u32 domain, | 165 | u32 domain, |
180 | unsigned int num_sect, | 166 | unsigned int num_sect, |
181 | struct iovec const *msg_sect); | 167 | struct iovec const *msg_sect); |
182 | 168 | ||
183 | int tipc_send_buf2name(u32 portref, | ||
184 | struct tipc_name const *name, | ||
185 | u32 domain, | ||
186 | struct sk_buff *buf, | ||
187 | unsigned int dsz); | ||
188 | |||
189 | int tipc_forward2name(u32 portref, | ||
190 | struct tipc_name const *name, | ||
191 | u32 domain, | ||
192 | unsigned int section_count, | ||
193 | struct iovec const *msg_sect, | ||
194 | struct tipc_portid const *origin, | ||
195 | unsigned int importance); | ||
196 | |||
197 | int tipc_forward_buf2name(u32 portref, | ||
198 | struct tipc_name const *name, | ||
199 | u32 domain, | ||
200 | struct sk_buff *buf, | ||
201 | unsigned int dsz, | ||
202 | struct tipc_portid const *orig, | ||
203 | unsigned int importance); | ||
204 | |||
205 | int tipc_send2port(u32 portref, | 169 | int tipc_send2port(u32 portref, |
206 | struct tipc_portid const *dest, | 170 | struct tipc_portid const *dest, |
207 | unsigned int num_sect, | 171 | unsigned int num_sect, |
@@ -212,46 +176,11 @@ int tipc_send_buf2port(u32 portref, | |||
212 | struct sk_buff *buf, | 176 | struct sk_buff *buf, |
213 | unsigned int dsz); | 177 | unsigned int dsz); |
214 | 178 | ||
215 | int tipc_forward2port(u32 portref, | ||
216 | struct tipc_portid const *dest, | ||
217 | unsigned int num_sect, | ||
218 | struct iovec const *msg_sect, | ||
219 | struct tipc_portid const *origin, | ||
220 | unsigned int importance); | ||
221 | |||
222 | int tipc_forward_buf2port(u32 portref, | ||
223 | struct tipc_portid const *dest, | ||
224 | struct sk_buff *buf, | ||
225 | unsigned int dsz, | ||
226 | struct tipc_portid const *orig, | ||
227 | unsigned int importance); | ||
228 | |||
229 | int tipc_multicast(u32 portref, | 179 | int tipc_multicast(u32 portref, |
230 | struct tipc_name_seq const *seq, | 180 | struct tipc_name_seq const *seq, |
231 | u32 domain, /* currently unused */ | 181 | u32 domain, /* currently unused */ |
232 | unsigned int section_count, | 182 | unsigned int section_count, |
233 | struct iovec const *msg); | 183 | struct iovec const *msg); |
234 | |||
235 | #if 0 | ||
236 | int tipc_multicast_buf(u32 portref, | ||
237 | struct tipc_name_seq const *seq, | ||
238 | u32 domain, | ||
239 | void *buf, | ||
240 | unsigned int size); | ||
241 | #endif | ||
242 | |||
243 | /* | ||
244 | * TIPC subscription routines | ||
245 | */ | ||
246 | |||
247 | int tipc_ispublished(struct tipc_name const *name); | ||
248 | |||
249 | /* | ||
250 | * Get number of available nodes within specified domain (excluding own node) | ||
251 | */ | ||
252 | |||
253 | unsigned int tipc_available_nodes(const u32 domain); | ||
254 | |||
255 | #endif | 184 | #endif |
256 | 185 | ||
257 | #endif | 186 | #endif |
diff --git a/include/net/tipc/tipc_msg.h b/include/net/tipc/tipc_msg.h index 2e159a812f83..ffe50b4e7b93 100644 --- a/include/net/tipc/tipc_msg.h +++ b/include/net/tipc/tipc_msg.h | |||
@@ -107,7 +107,7 @@ static inline u32 msg_hdr_sz(struct tipc_msg *m) | |||
107 | 107 | ||
108 | static inline int msg_short(struct tipc_msg *m) | 108 | static inline int msg_short(struct tipc_msg *m) |
109 | { | 109 | { |
110 | return (msg_hdr_sz(m) == 24); | 110 | return msg_hdr_sz(m) == 24; |
111 | } | 111 | } |
112 | 112 | ||
113 | static inline u32 msg_size(struct tipc_msg *m) | 113 | static inline u32 msg_size(struct tipc_msg *m) |
@@ -117,7 +117,7 @@ static inline u32 msg_size(struct tipc_msg *m) | |||
117 | 117 | ||
118 | static inline u32 msg_data_sz(struct tipc_msg *m) | 118 | static inline u32 msg_data_sz(struct tipc_msg *m) |
119 | { | 119 | { |
120 | return (msg_size(m) - msg_hdr_sz(m)); | 120 | return msg_size(m) - msg_hdr_sz(m); |
121 | } | 121 | } |
122 | 122 | ||
123 | static inline unchar *msg_data(struct tipc_msg *m) | 123 | static inline unchar *msg_data(struct tipc_msg *m) |
@@ -132,17 +132,17 @@ static inline u32 msg_type(struct tipc_msg *m) | |||
132 | 132 | ||
133 | static inline u32 msg_named(struct tipc_msg *m) | 133 | static inline u32 msg_named(struct tipc_msg *m) |
134 | { | 134 | { |
135 | return (msg_type(m) == TIPC_NAMED_MSG); | 135 | return msg_type(m) == TIPC_NAMED_MSG; |
136 | } | 136 | } |
137 | 137 | ||
138 | static inline u32 msg_mcast(struct tipc_msg *m) | 138 | static inline u32 msg_mcast(struct tipc_msg *m) |
139 | { | 139 | { |
140 | return (msg_type(m) == TIPC_MCAST_MSG); | 140 | return msg_type(m) == TIPC_MCAST_MSG; |
141 | } | 141 | } |
142 | 142 | ||
143 | static inline u32 msg_connected(struct tipc_msg *m) | 143 | static inline u32 msg_connected(struct tipc_msg *m) |
144 | { | 144 | { |
145 | return (msg_type(m) == TIPC_CONN_MSG); | 145 | return msg_type(m) == TIPC_CONN_MSG; |
146 | } | 146 | } |
147 | 147 | ||
148 | static inline u32 msg_errcode(struct tipc_msg *m) | 148 | static inline u32 msg_errcode(struct tipc_msg *m) |
diff --git a/include/net/tipc/tipc_port.h b/include/net/tipc/tipc_port.h index c54917cbfa48..1893aaf49426 100644 --- a/include/net/tipc/tipc_port.h +++ b/include/net/tipc/tipc_port.h | |||
@@ -88,8 +88,6 @@ void tipc_acknowledge(u32 port_ref,u32 ack); | |||
88 | 88 | ||
89 | struct tipc_port *tipc_get_port(const u32 ref); | 89 | struct tipc_port *tipc_get_port(const u32 ref); |
90 | 90 | ||
91 | void *tipc_get_handle(const u32 ref); | ||
92 | |||
93 | /* | 91 | /* |
94 | * The following routines require that the port be locked on entry | 92 | * The following routines require that the port be locked on entry |
95 | */ | 93 | */ |
diff --git a/include/net/udp.h b/include/net/udp.h index 7abdf305da50..200b82848c9a 100644 --- a/include/net/udp.h +++ b/include/net/udp.h | |||
@@ -151,6 +151,7 @@ static inline void udp_lib_hash(struct sock *sk) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | extern void udp_lib_unhash(struct sock *sk); | 153 | extern void udp_lib_unhash(struct sock *sk); |
154 | extern void udp_lib_rehash(struct sock *sk, u16 new_hash); | ||
154 | 155 | ||
155 | static inline void udp_lib_close(struct sock *sk, long timeout) | 156 | static inline void udp_lib_close(struct sock *sk, long timeout) |
156 | { | 157 | { |
@@ -182,6 +183,9 @@ extern int udp_lib_setsockopt(struct sock *sk, int level, int optname, | |||
182 | extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, | 183 | extern struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport, |
183 | __be32 daddr, __be16 dport, | 184 | __be32 daddr, __be16 dport, |
184 | int dif); | 185 | int dif); |
186 | extern struct sock *udp6_lib_lookup(struct net *net, const struct in6_addr *saddr, __be16 sport, | ||
187 | const struct in6_addr *daddr, __be16 dport, | ||
188 | int dif); | ||
185 | 189 | ||
186 | /* | 190 | /* |
187 | * SNMP statistics for UDP and UDP-Lite | 191 | * SNMP statistics for UDP and UDP-Lite |
diff --git a/include/net/xfrm.h b/include/net/xfrm.h index fc8f36dd0f5c..f28d7c9b9f8d 100644 --- a/include/net/xfrm.h +++ b/include/net/xfrm.h | |||
@@ -298,8 +298,8 @@ struct xfrm_state_afinfo { | |||
298 | const struct xfrm_type *type_map[IPPROTO_MAX]; | 298 | const struct xfrm_type *type_map[IPPROTO_MAX]; |
299 | struct xfrm_mode *mode_map[XFRM_MODE_MAX]; | 299 | struct xfrm_mode *mode_map[XFRM_MODE_MAX]; |
300 | int (*init_flags)(struct xfrm_state *x); | 300 | int (*init_flags)(struct xfrm_state *x); |
301 | void (*init_tempsel)(struct xfrm_state *x, struct flowi *fl, | 301 | void (*init_tempsel)(struct xfrm_selector *sel, struct flowi *fl); |
302 | struct xfrm_tmpl *tmpl, | 302 | void (*init_temprop)(struct xfrm_state *x, struct xfrm_tmpl *tmpl, |
303 | xfrm_address_t *daddr, xfrm_address_t *saddr); | 303 | xfrm_address_t *daddr, xfrm_address_t *saddr); |
304 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); | 304 | int (*tmpl_sort)(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n); |
305 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); | 305 | int (*state_sort)(struct xfrm_state **dst, struct xfrm_state **src, int n); |
@@ -1419,7 +1419,6 @@ extern int xfrm6_input_addr(struct sk_buff *skb, xfrm_address_t *daddr, | |||
1419 | extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); | 1419 | extern int xfrm6_tunnel_register(struct xfrm6_tunnel *handler, unsigned short family); |
1420 | extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); | 1420 | extern int xfrm6_tunnel_deregister(struct xfrm6_tunnel *handler, unsigned short family); |
1421 | extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); | 1421 | extern __be32 xfrm6_tunnel_alloc_spi(struct net *net, xfrm_address_t *saddr); |
1422 | extern void xfrm6_tunnel_free_spi(struct net *net, xfrm_address_t *saddr); | ||
1423 | extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); | 1422 | extern __be32 xfrm6_tunnel_spi_lookup(struct net *net, xfrm_address_t *saddr); |
1424 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); | 1423 | extern int xfrm6_extract_output(struct xfrm_state *x, struct sk_buff *skb); |
1425 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); | 1424 | extern int xfrm6_prepare_output(struct xfrm_state *x, struct sk_buff *skb); |
@@ -1466,8 +1465,6 @@ struct xfrm_state *xfrm_find_acq(struct net *net, struct xfrm_mark *mark, | |||
1466 | xfrm_address_t *saddr, int create, | 1465 | xfrm_address_t *saddr, int create, |
1467 | unsigned short family); | 1466 | unsigned short family); |
1468 | extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); | 1467 | extern int xfrm_sk_policy_insert(struct sock *sk, int dir, struct xfrm_policy *pol); |
1469 | extern int xfrm_bundle_ok(struct xfrm_policy *pol, struct xfrm_dst *xdst, | ||
1470 | struct flowi *fl, int family, int strict); | ||
1471 | 1468 | ||
1472 | #ifdef CONFIG_XFRM_MIGRATE | 1469 | #ifdef CONFIG_XFRM_MIGRATE |
1473 | extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, | 1470 | extern int km_migrate(struct xfrm_selector *sel, u8 dir, u8 type, |
diff --git a/include/pcmcia/cs.h b/include/pcmcia/cs.h deleted file mode 100644 index 68d8bde7e8d6..000000000000 --- a/include/pcmcia/cs.h +++ /dev/null | |||
@@ -1,95 +0,0 @@ | |||
1 | /* | ||
2 | * cs.h | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * The initial developer of the original code is David A. Hinds | ||
9 | * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds | ||
10 | * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. | ||
11 | * | ||
12 | * (C) 1999 David A. Hinds | ||
13 | */ | ||
14 | |||
15 | #ifndef _LINUX_CS_H | ||
16 | #define _LINUX_CS_H | ||
17 | |||
18 | #ifdef __KERNEL__ | ||
19 | #include <linux/interrupt.h> | ||
20 | #endif | ||
21 | |||
22 | /* ModifyConfiguration */ | ||
23 | typedef struct modconf_t { | ||
24 | u_int Attributes; | ||
25 | u_int Vcc, Vpp1, Vpp2; | ||
26 | } modconf_t; | ||
27 | |||
28 | /* Attributes for ModifyConfiguration */ | ||
29 | #define CONF_IRQ_CHANGE_VALID 0x0100 | ||
30 | #define CONF_VCC_CHANGE_VALID 0x0200 | ||
31 | #define CONF_VPP1_CHANGE_VALID 0x0400 | ||
32 | #define CONF_VPP2_CHANGE_VALID 0x0800 | ||
33 | #define CONF_IO_CHANGE_WIDTH 0x1000 | ||
34 | |||
35 | /* For RequestConfiguration */ | ||
36 | typedef struct config_req_t { | ||
37 | u_int Attributes; | ||
38 | u_int Vpp; /* both Vpp1 and Vpp2 */ | ||
39 | u_int IntType; | ||
40 | u_int ConfigBase; | ||
41 | u_char Status, Pin, Copy, ExtStatus; | ||
42 | u_char ConfigIndex; | ||
43 | u_int Present; | ||
44 | } config_req_t; | ||
45 | |||
46 | /* Attributes for RequestConfiguration */ | ||
47 | #define CONF_ENABLE_IRQ 0x01 | ||
48 | #define CONF_ENABLE_DMA 0x02 | ||
49 | #define CONF_ENABLE_SPKR 0x04 | ||
50 | #define CONF_ENABLE_PULSE_IRQ 0x08 | ||
51 | #define CONF_VALID_CLIENT 0x100 | ||
52 | |||
53 | /* IntType field */ | ||
54 | #define INT_MEMORY 0x01 | ||
55 | #define INT_MEMORY_AND_IO 0x02 | ||
56 | #define INT_CARDBUS 0x04 | ||
57 | #define INT_ZOOMED_VIDEO 0x08 | ||
58 | |||
59 | /* Configuration registers present */ | ||
60 | #define PRESENT_OPTION 0x001 | ||
61 | #define PRESENT_STATUS 0x002 | ||
62 | #define PRESENT_PIN_REPLACE 0x004 | ||
63 | #define PRESENT_COPY 0x008 | ||
64 | #define PRESENT_EXT_STATUS 0x010 | ||
65 | #define PRESENT_IOBASE_0 0x020 | ||
66 | #define PRESENT_IOBASE_1 0x040 | ||
67 | #define PRESENT_IOBASE_2 0x080 | ||
68 | #define PRESENT_IOBASE_3 0x100 | ||
69 | #define PRESENT_IOSIZE 0x200 | ||
70 | |||
71 | /* For RequestWindow */ | ||
72 | typedef struct win_req_t { | ||
73 | u_int Attributes; | ||
74 | u_long Base; | ||
75 | u_int Size; | ||
76 | u_int AccessSpeed; | ||
77 | } win_req_t; | ||
78 | |||
79 | /* Attributes for RequestWindow */ | ||
80 | #define WIN_MEMORY_TYPE_CM 0x00 /* default */ | ||
81 | #define WIN_MEMORY_TYPE_AM 0x20 /* MAP_ATTRIB */ | ||
82 | #define WIN_DATA_WIDTH_8 0x00 /* default */ | ||
83 | #define WIN_DATA_WIDTH_16 0x02 /* MAP_16BIT */ | ||
84 | #define WIN_ENABLE 0x01 /* MAP_ACTIVE */ | ||
85 | #define WIN_USE_WAIT 0x40 /* MAP_USE_WAIT */ | ||
86 | |||
87 | #define WIN_FLAGS_MAP 0x63 /* MAP_ATTRIB | MAP_16BIT | MAP_ACTIVE | | ||
88 | MAP_USE_WAIT */ | ||
89 | #define WIN_FLAGS_REQ 0x1c /* mapping to socket->win[i]: | ||
90 | 0x04 -> 0 | ||
91 | 0x08 -> 1 | ||
92 | 0x0c -> 2 | ||
93 | 0x10 -> 3 */ | ||
94 | |||
95 | #endif /* _LINUX_CS_H */ | ||
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h index 70c58ed2278c..8479b66c067b 100644 --- a/include/pcmcia/ds.h +++ b/include/pcmcia/ds.h | |||
@@ -24,9 +24,11 @@ | |||
24 | 24 | ||
25 | #ifdef __KERNEL__ | 25 | #ifdef __KERNEL__ |
26 | #include <linux/device.h> | 26 | #include <linux/device.h> |
27 | #include <linux/interrupt.h> | ||
27 | #include <pcmcia/ss.h> | 28 | #include <pcmcia/ss.h> |
28 | #include <asm/atomic.h> | 29 | #include <asm/atomic.h> |
29 | 30 | ||
31 | |||
30 | /* | 32 | /* |
31 | * PCMCIA device drivers (16-bit cards only; 32-bit cards require CardBus | 33 | * PCMCIA device drivers (16-bit cards only; 32-bit cards require CardBus |
32 | * a.k.a. PCI drivers | 34 | * a.k.a. PCI drivers |
@@ -36,8 +38,6 @@ struct pcmcia_device; | |||
36 | struct config_t; | 38 | struct config_t; |
37 | struct net_device; | 39 | struct net_device; |
38 | 40 | ||
39 | typedef struct resource *window_handle_t; | ||
40 | |||
41 | /* dynamic device IDs for PCMCIA device drivers. See | 41 | /* dynamic device IDs for PCMCIA device drivers. See |
42 | * Documentation/pcmcia/driver.txt for details. | 42 | * Documentation/pcmcia/driver.txt for details. |
43 | */ | 43 | */ |
@@ -47,6 +47,8 @@ struct pcmcia_dynids { | |||
47 | }; | 47 | }; |
48 | 48 | ||
49 | struct pcmcia_driver { | 49 | struct pcmcia_driver { |
50 | const char *name; | ||
51 | |||
50 | int (*probe) (struct pcmcia_device *dev); | 52 | int (*probe) (struct pcmcia_device *dev); |
51 | void (*remove) (struct pcmcia_device *dev); | 53 | void (*remove) (struct pcmcia_device *dev); |
52 | 54 | ||
@@ -90,15 +92,17 @@ struct pcmcia_device { | |||
90 | 92 | ||
91 | struct list_head socket_device_list; | 93 | struct list_head socket_device_list; |
92 | 94 | ||
93 | /* deprecated, will be cleaned up soon */ | ||
94 | config_req_t conf; | ||
95 | window_handle_t win; | ||
96 | |||
97 | /* device setup */ | 95 | /* device setup */ |
98 | unsigned int irq; | 96 | unsigned int irq; |
99 | struct resource *resource[PCMCIA_NUM_RESOURCES]; | 97 | struct resource *resource[PCMCIA_NUM_RESOURCES]; |
98 | resource_size_t card_addr; /* for the 1st IOMEM resource */ | ||
99 | unsigned int vpp; | ||
100 | 100 | ||
101 | unsigned int io_lines; /* number of I/O lines */ | 101 | unsigned int config_flags; /* CONF_ENABLE_ flags below */ |
102 | unsigned int config_base; | ||
103 | unsigned int config_index; | ||
104 | unsigned int config_regs; /* PRESENT_ flags below */ | ||
105 | unsigned int io_lines; /* number of I/O lines */ | ||
102 | 106 | ||
103 | /* Is the device suspended? */ | 107 | /* Is the device suspended? */ |
104 | u16 suspended:1; | 108 | u16 suspended:1; |
@@ -174,9 +178,6 @@ int pcmcia_parse_tuple(tuple_t *tuple, cisparse_t *parse); | |||
174 | /* loop CIS entries for valid configuration */ | 178 | /* loop CIS entries for valid configuration */ |
175 | int pcmcia_loop_config(struct pcmcia_device *p_dev, | 179 | int pcmcia_loop_config(struct pcmcia_device *p_dev, |
176 | int (*conf_check) (struct pcmcia_device *p_dev, | 180 | int (*conf_check) (struct pcmcia_device *p_dev, |
177 | cistpl_cftable_entry_t *cf, | ||
178 | cistpl_cftable_entry_t *dflt, | ||
179 | unsigned int vcc, | ||
180 | void *priv_data), | 181 | void *priv_data), |
181 | void *priv_data); | 182 | void *priv_data); |
182 | 183 | ||
@@ -206,16 +207,17 @@ pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev, | |||
206 | int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev, | 207 | int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev, |
207 | irq_handler_t handler); | 208 | irq_handler_t handler); |
208 | 209 | ||
209 | int pcmcia_request_configuration(struct pcmcia_device *p_dev, | 210 | int pcmcia_enable_device(struct pcmcia_device *p_dev); |
210 | config_req_t *req); | ||
211 | 211 | ||
212 | int pcmcia_request_window(struct pcmcia_device *p_dev, win_req_t *req, | 212 | int pcmcia_request_window(struct pcmcia_device *p_dev, struct resource *res, |
213 | window_handle_t *wh); | 213 | unsigned int speed); |
214 | int pcmcia_release_window(struct pcmcia_device *p_dev, window_handle_t win); | 214 | int pcmcia_release_window(struct pcmcia_device *p_dev, struct resource *res); |
215 | int pcmcia_map_mem_page(struct pcmcia_device *p_dev, window_handle_t win, | 215 | int pcmcia_map_mem_page(struct pcmcia_device *p_dev, struct resource *res, |
216 | unsigned int offset); | 216 | unsigned int offset); |
217 | 217 | ||
218 | int pcmcia_modify_configuration(struct pcmcia_device *p_dev, modconf_t *mod); | 218 | int pcmcia_fixup_vpp(struct pcmcia_device *p_dev, unsigned char new_vpp); |
219 | int pcmcia_fixup_iowidth(struct pcmcia_device *p_dev); | ||
220 | |||
219 | void pcmcia_disable_device(struct pcmcia_device *p_dev); | 221 | void pcmcia_disable_device(struct pcmcia_device *p_dev); |
220 | 222 | ||
221 | /* IO ports */ | 223 | /* IO ports */ |
@@ -224,15 +226,48 @@ void pcmcia_disable_device(struct pcmcia_device *p_dev); | |||
224 | #define IO_DATA_PATH_WIDTH_16 0x08 | 226 | #define IO_DATA_PATH_WIDTH_16 0x08 |
225 | #define IO_DATA_PATH_WIDTH_AUTO 0x10 | 227 | #define IO_DATA_PATH_WIDTH_AUTO 0x10 |
226 | 228 | ||
227 | /* convert flag found in cfgtable to data path width parameter */ | 229 | /* IO memory */ |
228 | static inline int pcmcia_io_cfg_data_width(unsigned int flags) | 230 | #define WIN_MEMORY_TYPE_CM 0x00 /* default */ |
229 | { | 231 | #define WIN_MEMORY_TYPE_AM 0x20 /* MAP_ATTRIB */ |
230 | if (!(flags & CISTPL_IO_8BIT)) | 232 | #define WIN_DATA_WIDTH_8 0x00 /* default */ |
231 | return IO_DATA_PATH_WIDTH_16; | 233 | #define WIN_DATA_WIDTH_16 0x02 /* MAP_16BIT */ |
232 | if (!(flags & CISTPL_IO_16BIT)) | 234 | #define WIN_ENABLE 0x01 /* MAP_ACTIVE */ |
233 | return IO_DATA_PATH_WIDTH_8; | 235 | #define WIN_USE_WAIT 0x40 /* MAP_USE_WAIT */ |
234 | return IO_DATA_PATH_WIDTH_AUTO; | 236 | |
235 | } | 237 | #define WIN_FLAGS_MAP 0x63 /* MAP_ATTRIB | MAP_16BIT | MAP_ACTIVE | |
238 | MAP_USE_WAIT */ | ||
239 | #define WIN_FLAGS_REQ 0x1c /* mapping to socket->win[i]: | ||
240 | 0x04 -> 0 | ||
241 | 0x08 -> 1 | ||
242 | 0x0c -> 2 | ||
243 | 0x10 -> 3 */ | ||
244 | |||
245 | /* config_reg{ister}s present for this PCMCIA device */ | ||
246 | #define PRESENT_OPTION 0x001 | ||
247 | #define PRESENT_STATUS 0x002 | ||
248 | #define PRESENT_PIN_REPLACE 0x004 | ||
249 | #define PRESENT_COPY 0x008 | ||
250 | #define PRESENT_EXT_STATUS 0x010 | ||
251 | #define PRESENT_IOBASE_0 0x020 | ||
252 | #define PRESENT_IOBASE_1 0x040 | ||
253 | #define PRESENT_IOBASE_2 0x080 | ||
254 | #define PRESENT_IOBASE_3 0x100 | ||
255 | #define PRESENT_IOSIZE 0x200 | ||
256 | |||
257 | /* flags to be passed to pcmcia_enable_device() */ | ||
258 | #define CONF_ENABLE_IRQ 0x0001 | ||
259 | #define CONF_ENABLE_SPKR 0x0002 | ||
260 | #define CONF_ENABLE_PULSE_IRQ 0x0004 | ||
261 | #define CONF_ENABLE_ESR 0x0008 | ||
262 | #define CONF_ENABLE_IOCARD 0x0010 /* auto-enabled if IO resources or IRQ | ||
263 | * (CONF_ENABLE_IRQ) in use */ | ||
264 | |||
265 | /* flags used by pcmcia_loop_config() autoconfiguration */ | ||
266 | #define CONF_AUTO_CHECK_VCC 0x0100 /* check for matching Vcc? */ | ||
267 | #define CONF_AUTO_SET_VPP 0x0200 /* set Vpp? */ | ||
268 | #define CONF_AUTO_AUDIO 0x0400 /* enable audio line? */ | ||
269 | #define CONF_AUTO_SET_IO 0x0800 /* set ->resource[0,1] */ | ||
270 | #define CONF_AUTO_SET_IOMEM 0x1000 /* set ->resource[2] */ | ||
236 | 271 | ||
237 | #endif /* __KERNEL__ */ | 272 | #endif /* __KERNEL__ */ |
238 | 273 | ||
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h index 626b63c33d9e..731cde010f42 100644 --- a/include/pcmcia/ss.h +++ b/include/pcmcia/ss.h | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/sched.h> /* task_struct, completion */ | 19 | #include <linux/sched.h> /* task_struct, completion */ |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | 21 | ||
22 | #include <pcmcia/cs.h> | ||
23 | #ifdef CONFIG_CARDBUS | 22 | #ifdef CONFIG_CARDBUS |
24 | #include <linux/pci.h> | 23 | #include <linux/pci.h> |
25 | #endif | 24 | #endif |
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h index d06e13be717b..3dec1949f69c 100644 --- a/include/scsi/libsas.h +++ b/include/scsi/libsas.h | |||
@@ -205,6 +205,7 @@ struct domain_device { | |||
205 | }; | 205 | }; |
206 | 206 | ||
207 | void *lldd_dev; | 207 | void *lldd_dev; |
208 | int gone; | ||
208 | }; | 209 | }; |
209 | 210 | ||
210 | struct sas_discovery_event { | 211 | struct sas_discovery_event { |
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h index 8fcb6e0e9e72..216af8538cc9 100644 --- a/include/scsi/scsi.h +++ b/include/scsi/scsi.h | |||
@@ -32,6 +32,12 @@ struct scsi_cmnd; | |||
32 | #endif | 32 | #endif |
33 | 33 | ||
34 | /* | 34 | /* |
35 | * DIX-capable adapters effectively support infinite chaining for the | ||
36 | * protection information scatterlist | ||
37 | */ | ||
38 | #define SCSI_MAX_PROT_SG_SEGMENTS 0xFFFF | ||
39 | |||
40 | /* | ||
35 | * Special value for scanning to specify scanning or rescanning of all | 41 | * Special value for scanning to specify scanning or rescanning of all |
36 | * possible channels, (target) ids, or luns on a given shost. | 42 | * possible channels, (target) ids, or luns on a given shost. |
37 | */ | 43 | */ |
@@ -67,6 +73,7 @@ struct scsi_cmnd; | |||
67 | #define SEND_DIAGNOSTIC 0x1d | 73 | #define SEND_DIAGNOSTIC 0x1d |
68 | #define ALLOW_MEDIUM_REMOVAL 0x1e | 74 | #define ALLOW_MEDIUM_REMOVAL 0x1e |
69 | 75 | ||
76 | #define READ_FORMAT_CAPACITIES 0x23 | ||
70 | #define SET_WINDOW 0x24 | 77 | #define SET_WINDOW 0x24 |
71 | #define READ_CAPACITY 0x25 | 78 | #define READ_CAPACITY 0x25 |
72 | #define READ_10 0x28 | 79 | #define READ_10 0x28 |
@@ -96,6 +103,7 @@ struct scsi_cmnd; | |||
96 | #define WRITE_SAME 0x41 | 103 | #define WRITE_SAME 0x41 |
97 | #define UNMAP 0x42 | 104 | #define UNMAP 0x42 |
98 | #define READ_TOC 0x43 | 105 | #define READ_TOC 0x43 |
106 | #define READ_HEADER 0x44 | ||
99 | #define LOG_SELECT 0x4c | 107 | #define LOG_SELECT 0x4c |
100 | #define LOG_SENSE 0x4d | 108 | #define LOG_SENSE 0x4d |
101 | #define XDWRITEREAD_10 0x53 | 109 | #define XDWRITEREAD_10 0x53 |
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index 50cb34ffef11..85867dcde335 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h | |||
@@ -148,6 +148,8 @@ struct scsi_device { | |||
148 | unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ | 148 | unsigned retry_hwerror:1; /* Retry HARDWARE_ERROR */ |
149 | unsigned last_sector_bug:1; /* do not use multisector accesses on | 149 | unsigned last_sector_bug:1; /* do not use multisector accesses on |
150 | SD_LAST_BUGGY_SECTORS */ | 150 | SD_LAST_BUGGY_SECTORS */ |
151 | unsigned no_read_disc_info:1; /* Avoid READ_DISC_INFO cmds */ | ||
152 | unsigned no_read_capacity_16:1; /* Avoid READ_CAPACITY_16 cmds */ | ||
151 | unsigned is_visible:1; /* is the device visible in sysfs */ | 153 | unsigned is_visible:1; /* is the device visible in sysfs */ |
152 | 154 | ||
153 | DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ | 155 | DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */ |
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h index b7bdecb7b76e..d0a6a845f204 100644 --- a/include/scsi/scsi_host.h +++ b/include/scsi/scsi_host.h | |||
@@ -388,6 +388,7 @@ struct scsi_host_template { | |||
388 | * of scatter-gather. | 388 | * of scatter-gather. |
389 | */ | 389 | */ |
390 | unsigned short sg_tablesize; | 390 | unsigned short sg_tablesize; |
391 | unsigned short sg_prot_tablesize; | ||
391 | 392 | ||
392 | /* | 393 | /* |
393 | * Set this if the host adapter has limitations beside segment count. | 394 | * Set this if the host adapter has limitations beside segment count. |
@@ -599,6 +600,7 @@ struct Scsi_Host { | |||
599 | int can_queue; | 600 | int can_queue; |
600 | short cmd_per_lun; | 601 | short cmd_per_lun; |
601 | short unsigned int sg_tablesize; | 602 | short unsigned int sg_tablesize; |
603 | short unsigned int sg_prot_tablesize; | ||
602 | short unsigned int max_sectors; | 604 | short unsigned int max_sectors; |
603 | unsigned long dma_boundary; | 605 | unsigned long dma_boundary; |
604 | /* | 606 | /* |
@@ -823,6 +825,11 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost) | |||
823 | return shost->prot_capabilities; | 825 | return shost->prot_capabilities; |
824 | } | 826 | } |
825 | 827 | ||
828 | static inline int scsi_host_prot_dma(struct Scsi_Host *shost) | ||
829 | { | ||
830 | return shost->prot_capabilities >= SHOST_DIX_TYPE0_PROTECTION; | ||
831 | } | ||
832 | |||
826 | static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) | 833 | static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type) |
827 | { | 834 | { |
828 | static unsigned char cap[] = { 0, | 835 | static unsigned char cap[] = { 0, |
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h index 17231385cb37..d6e7994aa634 100644 --- a/include/scsi/scsi_tcq.h +++ b/include/scsi/scsi_tcq.h | |||
@@ -97,13 +97,9 @@ static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) | |||
97 | static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) | 97 | static inline int scsi_populate_tag_msg(struct scsi_cmnd *cmd, char *msg) |
98 | { | 98 | { |
99 | struct request *req = cmd->request; | 99 | struct request *req = cmd->request; |
100 | struct scsi_device *sdev = cmd->device; | ||
101 | 100 | ||
102 | if (blk_rq_tagged(req)) { | 101 | if (blk_rq_tagged(req)) { |
103 | if (sdev->ordered_tags && req->cmd_flags & REQ_HARDBARRIER) | 102 | *msg++ = MSG_SIMPLE_TAG; |
104 | *msg++ = MSG_ORDERED_TAG; | ||
105 | else | ||
106 | *msg++ = MSG_SIMPLE_TAG; | ||
107 | *msg++ = req->tag; | 103 | *msg++ = req->tag; |
108 | return 2; | 104 | return 2; |
109 | } | 105 | } |
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h index 87d81b3ce564..59816fe31e68 100644 --- a/include/scsi/scsi_transport_fc.h +++ b/include/scsi/scsi_transport_fc.h | |||
@@ -496,6 +496,7 @@ struct fc_host_attrs { | |||
496 | u64 fabric_name; | 496 | u64 fabric_name; |
497 | char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; | 497 | char symbolic_name[FC_SYMBOLIC_NAME_SIZE]; |
498 | char system_hostname[FC_SYMBOLIC_NAME_SIZE]; | 498 | char system_hostname[FC_SYMBOLIC_NAME_SIZE]; |
499 | u32 dev_loss_tmo; | ||
499 | 500 | ||
500 | /* Private (Transport-managed) Attributes */ | 501 | /* Private (Transport-managed) Attributes */ |
501 | enum fc_tgtid_binding_type tgtid_bind_type; | 502 | enum fc_tgtid_binding_type tgtid_bind_type; |
@@ -580,6 +581,8 @@ struct fc_host_attrs { | |||
580 | (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name) | 581 | (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q_name) |
581 | #define fc_host_devloss_work_q(x) \ | 582 | #define fc_host_devloss_work_q(x) \ |
582 | (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) | 583 | (((struct fc_host_attrs *)(x)->shost_data)->devloss_work_q) |
584 | #define fc_host_dev_loss_tmo(x) \ | ||
585 | (((struct fc_host_attrs *)(x)->shost_data)->dev_loss_tmo) | ||
583 | 586 | ||
584 | 587 | ||
585 | struct fc_bsg_buffer { | 588 | struct fc_bsg_buffer { |
diff --git a/include/sound/core.h b/include/sound/core.h index 89e0ac17f44a..df26ebbfa9c6 100644 --- a/include/sound/core.h +++ b/include/sound/core.h | |||
@@ -133,9 +133,7 @@ struct snd_card { | |||
133 | int free_on_last_close; /* free in context of file_release */ | 133 | int free_on_last_close; /* free in context of file_release */ |
134 | wait_queue_head_t shutdown_sleep; | 134 | wait_queue_head_t shutdown_sleep; |
135 | struct device *dev; /* device assigned to this card */ | 135 | struct device *dev; /* device assigned to this card */ |
136 | #ifndef CONFIG_SYSFS_DEPRECATED | ||
137 | struct device *card_dev; /* cardX object for sysfs */ | 136 | struct device *card_dev; /* cardX object for sysfs */ |
138 | #endif | ||
139 | 137 | ||
140 | #ifdef CONFIG_PM | 138 | #ifdef CONFIG_PM |
141 | unsigned int power_state; /* power state */ | 139 | unsigned int power_state; /* power state */ |
@@ -196,11 +194,7 @@ struct snd_minor { | |||
196 | /* return a device pointer linked to each sound device as a parent */ | 194 | /* return a device pointer linked to each sound device as a parent */ |
197 | static inline struct device *snd_card_get_device_link(struct snd_card *card) | 195 | static inline struct device *snd_card_get_device_link(struct snd_card *card) |
198 | { | 196 | { |
199 | #ifdef CONFIG_SYSFS_DEPRECATED | ||
200 | return card ? card->dev : NULL; | ||
201 | #else | ||
202 | return card ? card->card_dev : NULL; | 197 | return card ? card->card_dev : NULL; |
203 | #endif | ||
204 | } | 198 | } |
205 | 199 | ||
206 | /* sound.c */ | 200 | /* sound.c */ |
diff --git a/include/sound/emu10k1.h b/include/sound/emu10k1.h index 6a664c3f7c1e..7dc97d12253c 100644 --- a/include/sound/emu10k1.h +++ b/include/sound/emu10k1.h | |||
@@ -1707,6 +1707,7 @@ struct snd_emu10k1 { | |||
1707 | unsigned int card_type; /* EMU10K1_CARD_* */ | 1707 | unsigned int card_type; /* EMU10K1_CARD_* */ |
1708 | unsigned int ecard_ctrl; /* ecard control bits */ | 1708 | unsigned int ecard_ctrl; /* ecard control bits */ |
1709 | unsigned long dma_mask; /* PCI DMA mask */ | 1709 | unsigned long dma_mask; /* PCI DMA mask */ |
1710 | unsigned int delay_pcm_irq; /* in samples */ | ||
1710 | int max_cache_pages; /* max memory size / PAGE_SIZE */ | 1711 | int max_cache_pages; /* max memory size / PAGE_SIZE */ |
1711 | struct snd_dma_buffer silent_page; /* silent page */ | 1712 | struct snd_dma_buffer silent_page; /* silent page */ |
1712 | struct snd_dma_buffer ptb_pages; /* page table pages */ | 1713 | struct snd_dma_buffer ptb_pages; /* page table pages */ |
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h index 0e4cfb694fe7..6fa7cbab7d93 100644 --- a/include/trace/events/irq.h +++ b/include/trace/events/irq.h | |||
@@ -5,7 +5,9 @@ | |||
5 | #define _TRACE_IRQ_H | 5 | #define _TRACE_IRQ_H |
6 | 6 | ||
7 | #include <linux/tracepoint.h> | 7 | #include <linux/tracepoint.h> |
8 | #include <linux/interrupt.h> | 8 | |
9 | struct irqaction; | ||
10 | struct softirq_action; | ||
9 | 11 | ||
10 | #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } | 12 | #define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq } |
11 | #define show_softirq_name(val) \ | 13 | #define show_softirq_name(val) \ |
@@ -93,7 +95,10 @@ DECLARE_EVENT_CLASS(softirq, | |||
93 | ), | 95 | ), |
94 | 96 | ||
95 | TP_fast_assign( | 97 | TP_fast_assign( |
96 | __entry->vec = (int)(h - vec); | 98 | if (vec) |
99 | __entry->vec = (int)(h - vec); | ||
100 | else | ||
101 | __entry->vec = (int)(long)h; | ||
97 | ), | 102 | ), |
98 | 103 | ||
99 | TP_printk("vec=%d [action=%s]", __entry->vec, | 104 | TP_printk("vec=%d [action=%s]", __entry->vec, |
@@ -136,6 +141,23 @@ DEFINE_EVENT(softirq, softirq_exit, | |||
136 | TP_ARGS(h, vec) | 141 | TP_ARGS(h, vec) |
137 | ); | 142 | ); |
138 | 143 | ||
144 | /** | ||
145 | * softirq_raise - called immediately when a softirq is raised | ||
146 | * @h: pointer to struct softirq_action | ||
147 | * @vec: pointer to first struct softirq_action in softirq_vec array | ||
148 | * | ||
149 | * The @h parameter contains a pointer to the softirq vector number which is | ||
150 | * raised. @vec is NULL and it means @h includes vector number not | ||
151 | * softirq_action. When used in combination with the softirq_entry tracepoint | ||
152 | * we can determine the softirq raise latency. | ||
153 | */ | ||
154 | DEFINE_EVENT(softirq, softirq_raise, | ||
155 | |||
156 | TP_PROTO(struct softirq_action *h, struct softirq_action *vec), | ||
157 | |||
158 | TP_ARGS(h, vec) | ||
159 | ); | ||
160 | |||
139 | #endif /* _TRACE_IRQ_H */ | 161 | #endif /* _TRACE_IRQ_H */ |
140 | 162 | ||
141 | /* This part must be outside protection */ | 163 | /* This part must be outside protection */ |
diff --git a/include/trace/events/napi.h b/include/trace/events/napi.h index 188deca2f3c7..8fe1e93f531d 100644 --- a/include/trace/events/napi.h +++ b/include/trace/events/napi.h | |||
@@ -6,10 +6,31 @@ | |||
6 | 6 | ||
7 | #include <linux/netdevice.h> | 7 | #include <linux/netdevice.h> |
8 | #include <linux/tracepoint.h> | 8 | #include <linux/tracepoint.h> |
9 | #include <linux/ftrace.h> | ||
10 | |||
11 | #define NO_DEV "(no_device)" | ||
12 | |||
13 | TRACE_EVENT(napi_poll, | ||
9 | 14 | ||
10 | DECLARE_TRACE(napi_poll, | ||
11 | TP_PROTO(struct napi_struct *napi), | 15 | TP_PROTO(struct napi_struct *napi), |
12 | TP_ARGS(napi)); | 16 | |
17 | TP_ARGS(napi), | ||
18 | |||
19 | TP_STRUCT__entry( | ||
20 | __field( struct napi_struct *, napi) | ||
21 | __string( dev_name, napi->dev ? napi->dev->name : NO_DEV) | ||
22 | ), | ||
23 | |||
24 | TP_fast_assign( | ||
25 | __entry->napi = napi; | ||
26 | __assign_str(dev_name, napi->dev ? napi->dev->name : NO_DEV); | ||
27 | ), | ||
28 | |||
29 | TP_printk("napi poll on napi struct %p for device %s", | ||
30 | __entry->napi, __get_str(dev_name)) | ||
31 | ); | ||
32 | |||
33 | #undef NO_DEV | ||
13 | 34 | ||
14 | #endif /* _TRACE_NAPI_H_ */ | 35 | #endif /* _TRACE_NAPI_H_ */ |
15 | 36 | ||
diff --git a/include/trace/events/net.h b/include/trace/events/net.h new file mode 100644 index 000000000000..5f247f5ffc56 --- /dev/null +++ b/include/trace/events/net.h | |||
@@ -0,0 +1,82 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM net | ||
3 | |||
4 | #if !defined(_TRACE_NET_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_NET_H | ||
6 | |||
7 | #include <linux/skbuff.h> | ||
8 | #include <linux/netdevice.h> | ||
9 | #include <linux/ip.h> | ||
10 | #include <linux/tracepoint.h> | ||
11 | |||
12 | TRACE_EVENT(net_dev_xmit, | ||
13 | |||
14 | TP_PROTO(struct sk_buff *skb, | ||
15 | int rc), | ||
16 | |||
17 | TP_ARGS(skb, rc), | ||
18 | |||
19 | TP_STRUCT__entry( | ||
20 | __field( void *, skbaddr ) | ||
21 | __field( unsigned int, len ) | ||
22 | __field( int, rc ) | ||
23 | __string( name, skb->dev->name ) | ||
24 | ), | ||
25 | |||
26 | TP_fast_assign( | ||
27 | __entry->skbaddr = skb; | ||
28 | __entry->len = skb->len; | ||
29 | __entry->rc = rc; | ||
30 | __assign_str(name, skb->dev->name); | ||
31 | ), | ||
32 | |||
33 | TP_printk("dev=%s skbaddr=%p len=%u rc=%d", | ||
34 | __get_str(name), __entry->skbaddr, __entry->len, __entry->rc) | ||
35 | ); | ||
36 | |||
37 | DECLARE_EVENT_CLASS(net_dev_template, | ||
38 | |||
39 | TP_PROTO(struct sk_buff *skb), | ||
40 | |||
41 | TP_ARGS(skb), | ||
42 | |||
43 | TP_STRUCT__entry( | ||
44 | __field( void *, skbaddr ) | ||
45 | __field( unsigned int, len ) | ||
46 | __string( name, skb->dev->name ) | ||
47 | ), | ||
48 | |||
49 | TP_fast_assign( | ||
50 | __entry->skbaddr = skb; | ||
51 | __entry->len = skb->len; | ||
52 | __assign_str(name, skb->dev->name); | ||
53 | ), | ||
54 | |||
55 | TP_printk("dev=%s skbaddr=%p len=%u", | ||
56 | __get_str(name), __entry->skbaddr, __entry->len) | ||
57 | ) | ||
58 | |||
59 | DEFINE_EVENT(net_dev_template, net_dev_queue, | ||
60 | |||
61 | TP_PROTO(struct sk_buff *skb), | ||
62 | |||
63 | TP_ARGS(skb) | ||
64 | ); | ||
65 | |||
66 | DEFINE_EVENT(net_dev_template, netif_receive_skb, | ||
67 | |||
68 | TP_PROTO(struct sk_buff *skb), | ||
69 | |||
70 | TP_ARGS(skb) | ||
71 | ); | ||
72 | |||
73 | DEFINE_EVENT(net_dev_template, netif_rx, | ||
74 | |||
75 | TP_PROTO(struct sk_buff *skb), | ||
76 | |||
77 | TP_ARGS(skb) | ||
78 | ); | ||
79 | #endif /* _TRACE_NET_H */ | ||
80 | |||
81 | /* This part must be outside protection */ | ||
82 | #include <trace/define_trace.h> | ||
diff --git a/include/trace/events/power.h b/include/trace/events/power.h index 35a2a6e7bf1e..286784d69b8f 100644 --- a/include/trace/events/power.h +++ b/include/trace/events/power.h | |||
@@ -10,12 +10,17 @@ | |||
10 | #ifndef _TRACE_POWER_ENUM_ | 10 | #ifndef _TRACE_POWER_ENUM_ |
11 | #define _TRACE_POWER_ENUM_ | 11 | #define _TRACE_POWER_ENUM_ |
12 | enum { | 12 | enum { |
13 | POWER_NONE = 0, | 13 | POWER_NONE = 0, |
14 | POWER_CSTATE = 1, | 14 | POWER_CSTATE = 1, /* C-State */ |
15 | POWER_PSTATE = 2, | 15 | POWER_PSTATE = 2, /* Fequency change or DVFS */ |
16 | POWER_SSTATE = 3, /* Suspend */ | ||
16 | }; | 17 | }; |
17 | #endif | 18 | #endif |
18 | 19 | ||
20 | /* | ||
21 | * The power events are used for cpuidle & suspend (power_start, power_end) | ||
22 | * and for cpufreq (power_frequency) | ||
23 | */ | ||
19 | DECLARE_EVENT_CLASS(power, | 24 | DECLARE_EVENT_CLASS(power, |
20 | 25 | ||
21 | TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), | 26 | TP_PROTO(unsigned int type, unsigned int state, unsigned int cpu_id), |
@@ -70,6 +75,85 @@ TRACE_EVENT(power_end, | |||
70 | 75 | ||
71 | ); | 76 | ); |
72 | 77 | ||
78 | /* | ||
79 | * The clock events are used for clock enable/disable and for | ||
80 | * clock rate change | ||
81 | */ | ||
82 | DECLARE_EVENT_CLASS(clock, | ||
83 | |||
84 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
85 | |||
86 | TP_ARGS(name, state, cpu_id), | ||
87 | |||
88 | TP_STRUCT__entry( | ||
89 | __string( name, name ) | ||
90 | __field( u64, state ) | ||
91 | __field( u64, cpu_id ) | ||
92 | ), | ||
93 | |||
94 | TP_fast_assign( | ||
95 | __assign_str(name, name); | ||
96 | __entry->state = state; | ||
97 | __entry->cpu_id = cpu_id; | ||
98 | ), | ||
99 | |||
100 | TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), | ||
101 | (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) | ||
102 | ); | ||
103 | |||
104 | DEFINE_EVENT(clock, clock_enable, | ||
105 | |||
106 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
107 | |||
108 | TP_ARGS(name, state, cpu_id) | ||
109 | ); | ||
110 | |||
111 | DEFINE_EVENT(clock, clock_disable, | ||
112 | |||
113 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
114 | |||
115 | TP_ARGS(name, state, cpu_id) | ||
116 | ); | ||
117 | |||
118 | DEFINE_EVENT(clock, clock_set_rate, | ||
119 | |||
120 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
121 | |||
122 | TP_ARGS(name, state, cpu_id) | ||
123 | ); | ||
124 | |||
125 | /* | ||
126 | * The power domain events are used for power domains transitions | ||
127 | */ | ||
128 | DECLARE_EVENT_CLASS(power_domain, | ||
129 | |||
130 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
131 | |||
132 | TP_ARGS(name, state, cpu_id), | ||
133 | |||
134 | TP_STRUCT__entry( | ||
135 | __string( name, name ) | ||
136 | __field( u64, state ) | ||
137 | __field( u64, cpu_id ) | ||
138 | ), | ||
139 | |||
140 | TP_fast_assign( | ||
141 | __assign_str(name, name); | ||
142 | __entry->state = state; | ||
143 | __entry->cpu_id = cpu_id; | ||
144 | ), | ||
145 | |||
146 | TP_printk("%s state=%lu cpu_id=%lu", __get_str(name), | ||
147 | (unsigned long)__entry->state, (unsigned long)__entry->cpu_id) | ||
148 | ); | ||
149 | |||
150 | DEFINE_EVENT(power_domain, power_domain_target, | ||
151 | |||
152 | TP_PROTO(const char *name, unsigned int state, unsigned int cpu_id), | ||
153 | |||
154 | TP_ARGS(name, state, cpu_id) | ||
155 | ); | ||
156 | |||
73 | #endif /* _TRACE_POWER_H */ | 157 | #endif /* _TRACE_POWER_H */ |
74 | 158 | ||
75 | /* This part must be outside protection */ | 159 | /* This part must be outside protection */ |
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index 9208c92aeab5..f6334782a593 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h | |||
@@ -362,6 +362,35 @@ TRACE_EVENT(sched_stat_runtime, | |||
362 | (unsigned long long)__entry->vruntime) | 362 | (unsigned long long)__entry->vruntime) |
363 | ); | 363 | ); |
364 | 364 | ||
365 | /* | ||
366 | * Tracepoint for showing priority inheritance modifying a tasks | ||
367 | * priority. | ||
368 | */ | ||
369 | TRACE_EVENT(sched_pi_setprio, | ||
370 | |||
371 | TP_PROTO(struct task_struct *tsk, int newprio), | ||
372 | |||
373 | TP_ARGS(tsk, newprio), | ||
374 | |||
375 | TP_STRUCT__entry( | ||
376 | __array( char, comm, TASK_COMM_LEN ) | ||
377 | __field( pid_t, pid ) | ||
378 | __field( int, oldprio ) | ||
379 | __field( int, newprio ) | ||
380 | ), | ||
381 | |||
382 | TP_fast_assign( | ||
383 | memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN); | ||
384 | __entry->pid = tsk->pid; | ||
385 | __entry->oldprio = tsk->prio; | ||
386 | __entry->newprio = newprio; | ||
387 | ), | ||
388 | |||
389 | TP_printk("comm=%s pid=%d oldprio=%d newprio=%d", | ||
390 | __entry->comm, __entry->pid, | ||
391 | __entry->oldprio, __entry->newprio) | ||
392 | ); | ||
393 | |||
365 | #endif /* _TRACE_SCHED_H */ | 394 | #endif /* _TRACE_SCHED_H */ |
366 | 395 | ||
367 | /* This part must be outside protection */ | 396 | /* This part must be outside protection */ |
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h index 4b2be6dc76f0..75ce9d500d8e 100644 --- a/include/trace/events/skb.h +++ b/include/trace/events/skb.h | |||
@@ -35,6 +35,23 @@ TRACE_EVENT(kfree_skb, | |||
35 | __entry->skbaddr, __entry->protocol, __entry->location) | 35 | __entry->skbaddr, __entry->protocol, __entry->location) |
36 | ); | 36 | ); |
37 | 37 | ||
38 | TRACE_EVENT(consume_skb, | ||
39 | |||
40 | TP_PROTO(struct sk_buff *skb), | ||
41 | |||
42 | TP_ARGS(skb), | ||
43 | |||
44 | TP_STRUCT__entry( | ||
45 | __field( void *, skbaddr ) | ||
46 | ), | ||
47 | |||
48 | TP_fast_assign( | ||
49 | __entry->skbaddr = skb; | ||
50 | ), | ||
51 | |||
52 | TP_printk("skbaddr=%p", __entry->skbaddr) | ||
53 | ); | ||
54 | |||
38 | TRACE_EVENT(skb_copy_datagram_iovec, | 55 | TRACE_EVENT(skb_copy_datagram_iovec, |
39 | 56 | ||
40 | TP_PROTO(const struct sk_buff *skb, int len), | 57 | TP_PROTO(const struct sk_buff *skb, int len), |
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h index c624126a9c8a..425bcfe56c62 100644 --- a/include/trace/events/timer.h +++ b/include/trace/events/timer.h | |||
@@ -81,14 +81,16 @@ TRACE_EVENT(timer_expire_entry, | |||
81 | TP_STRUCT__entry( | 81 | TP_STRUCT__entry( |
82 | __field( void *, timer ) | 82 | __field( void *, timer ) |
83 | __field( unsigned long, now ) | 83 | __field( unsigned long, now ) |
84 | __field( void *, function) | ||
84 | ), | 85 | ), |
85 | 86 | ||
86 | TP_fast_assign( | 87 | TP_fast_assign( |
87 | __entry->timer = timer; | 88 | __entry->timer = timer; |
88 | __entry->now = jiffies; | 89 | __entry->now = jiffies; |
90 | __entry->function = timer->function; | ||
89 | ), | 91 | ), |
90 | 92 | ||
91 | TP_printk("timer=%p now=%lu", __entry->timer, __entry->now) | 93 | TP_printk("timer=%p function=%pf now=%lu", __entry->timer, __entry->function,__entry->now) |
92 | ); | 94 | ); |
93 | 95 | ||
94 | /** | 96 | /** |
@@ -200,14 +202,16 @@ TRACE_EVENT(hrtimer_expire_entry, | |||
200 | TP_STRUCT__entry( | 202 | TP_STRUCT__entry( |
201 | __field( void *, hrtimer ) | 203 | __field( void *, hrtimer ) |
202 | __field( s64, now ) | 204 | __field( s64, now ) |
205 | __field( void *, function) | ||
203 | ), | 206 | ), |
204 | 207 | ||
205 | TP_fast_assign( | 208 | TP_fast_assign( |
206 | __entry->hrtimer = hrtimer; | 209 | __entry->hrtimer = hrtimer; |
207 | __entry->now = now->tv64; | 210 | __entry->now = now->tv64; |
211 | __entry->function = hrtimer->function; | ||
208 | ), | 212 | ), |
209 | 213 | ||
210 | TP_printk("hrtimer=%p now=%llu", __entry->hrtimer, | 214 | TP_printk("hrtimer=%p function=%pf now=%llu", __entry->hrtimer, __entry->function, |
211 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) | 215 | (unsigned long long)ktime_to_ns((ktime_t) { .tv64 = __entry->now })) |
212 | ); | 216 | ); |
213 | 217 | ||
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h new file mode 100644 index 000000000000..7d497291c85d --- /dev/null +++ b/include/trace/events/workqueue.h | |||
@@ -0,0 +1,121 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM workqueue | ||
3 | |||
4 | #if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_WORKQUEUE_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | #include <linux/workqueue.h> | ||
9 | |||
10 | DECLARE_EVENT_CLASS(workqueue_work, | ||
11 | |||
12 | TP_PROTO(struct work_struct *work), | ||
13 | |||
14 | TP_ARGS(work), | ||
15 | |||
16 | TP_STRUCT__entry( | ||
17 | __field( void *, work ) | ||
18 | ), | ||
19 | |||
20 | TP_fast_assign( | ||
21 | __entry->work = work; | ||
22 | ), | ||
23 | |||
24 | TP_printk("work struct %p", __entry->work) | ||
25 | ); | ||
26 | |||
27 | /** | ||
28 | * workqueue_queue_work - called when a work gets queued | ||
29 | * @req_cpu: the requested cpu | ||
30 | * @cwq: pointer to struct cpu_workqueue_struct | ||
31 | * @work: pointer to struct work_struct | ||
32 | * | ||
33 | * This event occurs when a work is queued immediately or once a | ||
34 | * delayed work is actually queued on a workqueue (ie: once the delay | ||
35 | * has been reached). | ||
36 | */ | ||
37 | TRACE_EVENT(workqueue_queue_work, | ||
38 | |||
39 | TP_PROTO(unsigned int req_cpu, struct cpu_workqueue_struct *cwq, | ||
40 | struct work_struct *work), | ||
41 | |||
42 | TP_ARGS(req_cpu, cwq, work), | ||
43 | |||
44 | TP_STRUCT__entry( | ||
45 | __field( void *, work ) | ||
46 | __field( void *, function) | ||
47 | __field( void *, workqueue) | ||
48 | __field( unsigned int, req_cpu ) | ||
49 | __field( unsigned int, cpu ) | ||
50 | ), | ||
51 | |||
52 | TP_fast_assign( | ||
53 | __entry->work = work; | ||
54 | __entry->function = work->func; | ||
55 | __entry->workqueue = cwq->wq; | ||
56 | __entry->req_cpu = req_cpu; | ||
57 | __entry->cpu = cwq->gcwq->cpu; | ||
58 | ), | ||
59 | |||
60 | TP_printk("work struct=%p function=%pf workqueue=%p req_cpu=%u cpu=%u", | ||
61 | __entry->work, __entry->function, __entry->workqueue, | ||
62 | __entry->req_cpu, __entry->cpu) | ||
63 | ); | ||
64 | |||
65 | /** | ||
66 | * workqueue_activate_work - called when a work gets activated | ||
67 | * @work: pointer to struct work_struct | ||
68 | * | ||
69 | * This event occurs when a queued work is put on the active queue, | ||
70 | * which happens immediately after queueing unless @max_active limit | ||
71 | * is reached. | ||
72 | */ | ||
73 | DEFINE_EVENT(workqueue_work, workqueue_activate_work, | ||
74 | |||
75 | TP_PROTO(struct work_struct *work), | ||
76 | |||
77 | TP_ARGS(work) | ||
78 | ); | ||
79 | |||
80 | /** | ||
81 | * workqueue_execute_start - called immediately before the workqueue callback | ||
82 | * @work: pointer to struct work_struct | ||
83 | * | ||
84 | * Allows to track workqueue execution. | ||
85 | */ | ||
86 | TRACE_EVENT(workqueue_execute_start, | ||
87 | |||
88 | TP_PROTO(struct work_struct *work), | ||
89 | |||
90 | TP_ARGS(work), | ||
91 | |||
92 | TP_STRUCT__entry( | ||
93 | __field( void *, work ) | ||
94 | __field( void *, function) | ||
95 | ), | ||
96 | |||
97 | TP_fast_assign( | ||
98 | __entry->work = work; | ||
99 | __entry->function = work->func; | ||
100 | ), | ||
101 | |||
102 | TP_printk("work struct %p: function %pf", __entry->work, __entry->function) | ||
103 | ); | ||
104 | |||
105 | /** | ||
106 | * workqueue_execute_end - called immediately before the workqueue callback | ||
107 | * @work: pointer to struct work_struct | ||
108 | * | ||
109 | * Allows to track workqueue execution. | ||
110 | */ | ||
111 | DEFINE_EVENT(workqueue_work, workqueue_execute_end, | ||
112 | |||
113 | TP_PROTO(struct work_struct *work), | ||
114 | |||
115 | TP_ARGS(work) | ||
116 | ); | ||
117 | |||
118 | #endif /* _TRACE_WORKQUEUE_H */ | ||
119 | |||
120 | /* This part must be outside protection */ | ||
121 | #include <trace/define_trace.h> | ||
diff --git a/include/video/vga.h b/include/video/vga.h index b49a5120ca2d..2b8691f7d256 100644 --- a/include/video/vga.h +++ b/include/video/vga.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright history from vga16fb.c: | 6 | * Copyright history from vga16fb.c: |
7 | * Copyright 1999 Ben Pfaff and Petr Vandrovec | 7 | * Copyright 1999 Ben Pfaff and Petr Vandrovec |
8 | * Based on VGA info at http://www.goodnet.com/~tinara/FreeVGA/home.htm | 8 | * Based on VGA info at http://www.osdever.net/FreeVGA/home.htm |
9 | * Based on VESA framebuffer (c) 1998 Gerd Knorr | 9 | * Based on VESA framebuffer (c) 1998 Gerd Knorr |
10 | * | 10 | * |
11 | * This file is subject to the terms and conditions of the GNU General | 11 | * This file is subject to the terms and conditions of the GNU General |
diff --git a/include/xen/platform_pci.h b/include/xen/platform_pci.h index ce9d671c636c..a785a3b0c8c7 100644 --- a/include/xen/platform_pci.h +++ b/include/xen/platform_pci.h | |||
@@ -16,11 +16,15 @@ | |||
16 | #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ | 16 | #define XEN_IOPORT_PROTOVER (XEN_IOPORT_BASE + 2) /* 1 byte access (R) */ |
17 | #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ | 17 | #define XEN_IOPORT_PRODNUM (XEN_IOPORT_BASE + 2) /* 2 byte access (W) */ |
18 | 18 | ||
19 | #define XEN_UNPLUG_ALL_IDE_DISKS 1 | 19 | #define XEN_UNPLUG_ALL_IDE_DISKS (1<<0) |
20 | #define XEN_UNPLUG_ALL_NICS 2 | 20 | #define XEN_UNPLUG_ALL_NICS (1<<1) |
21 | #define XEN_UNPLUG_AUX_IDE_DISKS 4 | 21 | #define XEN_UNPLUG_AUX_IDE_DISKS (1<<2) |
22 | #define XEN_UNPLUG_ALL 7 | 22 | #define XEN_UNPLUG_ALL (XEN_UNPLUG_ALL_IDE_DISKS|\ |
23 | #define XEN_UNPLUG_IGNORE 8 | 23 | XEN_UNPLUG_ALL_NICS|\ |
24 | XEN_UNPLUG_AUX_IDE_DISKS) | ||
25 | |||
26 | #define XEN_UNPLUG_UNNECESSARY (1<<16) | ||
27 | #define XEN_UNPLUG_NEVER (1<<17) | ||
24 | 28 | ||
25 | static inline int xen_must_unplug_nics(void) { | 29 | static inline int xen_must_unplug_nics(void) { |
26 | #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ | 30 | #if (defined(CONFIG_XEN_NETDEV_FRONTEND) || \ |