diff options
Diffstat (limited to 'include/linux')
319 files changed, 11197 insertions, 4807 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index e6b98a32495f..64e10746f282 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
@@ -56,6 +56,8 @@ static inline acpi_handle acpi_device_handle(struct acpi_device *adev) | |||
56 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ | 56 | #define ACPI_COMPANION_SET(dev, adev) set_primary_fwnode(dev, (adev) ? \ |
57 | acpi_fwnode_handle(adev) : NULL) | 57 | acpi_fwnode_handle(adev) : NULL) |
58 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) | 58 | #define ACPI_HANDLE(dev) acpi_device_handle(ACPI_COMPANION(dev)) |
59 | #define ACPI_HANDLE_FWNODE(fwnode) \ | ||
60 | acpi_device_handle(to_acpi_device_node(fwnode)) | ||
59 | 61 | ||
60 | static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) | 62 | static inline struct fwnode_handle *acpi_alloc_fwnode_static(void) |
61 | { | 63 | { |
@@ -585,6 +587,7 @@ extern int acpi_nvs_for_each_region(int (*func)(__u64, __u64, void *), | |||
585 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, | 587 | const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids, |
586 | const struct device *dev); | 588 | const struct device *dev); |
587 | 589 | ||
590 | void *acpi_get_match_data(const struct device *dev); | ||
588 | extern bool acpi_driver_match_device(struct device *dev, | 591 | extern bool acpi_driver_match_device(struct device *dev, |
589 | const struct device_driver *drv); | 592 | const struct device_driver *drv); |
590 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); | 593 | int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); |
@@ -627,6 +630,7 @@ int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count) | |||
627 | #define ACPI_COMPANION(dev) (NULL) | 630 | #define ACPI_COMPANION(dev) (NULL) |
628 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) | 631 | #define ACPI_COMPANION_SET(dev, adev) do { } while (0) |
629 | #define ACPI_HANDLE(dev) (NULL) | 632 | #define ACPI_HANDLE(dev) (NULL) |
633 | #define ACPI_HANDLE_FWNODE(fwnode) (NULL) | ||
630 | #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), | 634 | #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), |
631 | 635 | ||
632 | struct fwnode_handle; | 636 | struct fwnode_handle; |
@@ -762,6 +766,11 @@ static inline const struct acpi_device_id *acpi_match_device( | |||
762 | return NULL; | 766 | return NULL; |
763 | } | 767 | } |
764 | 768 | ||
769 | static inline void *acpi_get_match_data(const struct device *dev) | ||
770 | { | ||
771 | return NULL; | ||
772 | } | ||
773 | |||
765 | static inline bool acpi_driver_match_device(struct device *dev, | 774 | static inline bool acpi_driver_match_device(struct device *dev, |
766 | const struct device_driver *drv) | 775 | const struct device_driver *drv) |
767 | { | 776 | { |
@@ -985,6 +994,11 @@ struct acpi_gpio_mapping { | |||
985 | const char *name; | 994 | const char *name; |
986 | const struct acpi_gpio_params *data; | 995 | const struct acpi_gpio_params *data; |
987 | unsigned int size; | 996 | unsigned int size; |
997 | |||
998 | /* Ignore IoRestriction field */ | ||
999 | #define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) | ||
1000 | |||
1001 | unsigned int quirks; | ||
988 | }; | 1002 | }; |
989 | 1003 | ||
990 | #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) | 1004 | #if defined(CONFIG_ACPI) && defined(CONFIG_GPIOLIB) |
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h index 304511267c82..2b709416de05 100644 --- a/include/linux/arch_topology.h +++ b/include/linux/arch_topology.h | |||
@@ -27,7 +27,7 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity); | |||
27 | DECLARE_PER_CPU(unsigned long, freq_scale); | 27 | DECLARE_PER_CPU(unsigned long, freq_scale); |
28 | 28 | ||
29 | static inline | 29 | static inline |
30 | unsigned long topology_get_freq_scale(struct sched_domain *sd, int cpu) | 30 | unsigned long topology_get_freq_scale(int cpu) |
31 | { | 31 | { |
32 | return per_cpu(freq_scale, cpu); | 32 | return per_cpu(freq_scale, cpu); |
33 | } | 33 | } |
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h index 4c5bca38c653..a031897fca76 100644 --- a/include/linux/arm-smccc.h +++ b/include/linux/arm-smccc.h | |||
@@ -14,14 +14,16 @@ | |||
14 | #ifndef __LINUX_ARM_SMCCC_H | 14 | #ifndef __LINUX_ARM_SMCCC_H |
15 | #define __LINUX_ARM_SMCCC_H | 15 | #define __LINUX_ARM_SMCCC_H |
16 | 16 | ||
17 | #include <uapi/linux/const.h> | ||
18 | |||
17 | /* | 19 | /* |
18 | * This file provides common defines for ARM SMC Calling Convention as | 20 | * This file provides common defines for ARM SMC Calling Convention as |
19 | * specified in | 21 | * specified in |
20 | * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html | 22 | * http://infocenter.arm.com/help/topic/com.arm.doc.den0028a/index.html |
21 | */ | 23 | */ |
22 | 24 | ||
23 | #define ARM_SMCCC_STD_CALL 0 | 25 | #define ARM_SMCCC_STD_CALL _AC(0,U) |
24 | #define ARM_SMCCC_FAST_CALL 1 | 26 | #define ARM_SMCCC_FAST_CALL _AC(1,U) |
25 | #define ARM_SMCCC_TYPE_SHIFT 31 | 27 | #define ARM_SMCCC_TYPE_SHIFT 31 |
26 | 28 | ||
27 | #define ARM_SMCCC_SMC_32 0 | 29 | #define ARM_SMCCC_SMC_32 0 |
@@ -60,6 +62,24 @@ | |||
60 | #define ARM_SMCCC_QUIRK_NONE 0 | 62 | #define ARM_SMCCC_QUIRK_NONE 0 |
61 | #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ | 63 | #define ARM_SMCCC_QUIRK_QCOM_A6 1 /* Save/restore register a6 */ |
62 | 64 | ||
65 | #define ARM_SMCCC_VERSION_1_0 0x10000 | ||
66 | #define ARM_SMCCC_VERSION_1_1 0x10001 | ||
67 | |||
68 | #define ARM_SMCCC_VERSION_FUNC_ID \ | ||
69 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
70 | ARM_SMCCC_SMC_32, \ | ||
71 | 0, 0) | ||
72 | |||
73 | #define ARM_SMCCC_ARCH_FEATURES_FUNC_ID \ | ||
74 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
75 | ARM_SMCCC_SMC_32, \ | ||
76 | 0, 1) | ||
77 | |||
78 | #define ARM_SMCCC_ARCH_WORKAROUND_1 \ | ||
79 | ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \ | ||
80 | ARM_SMCCC_SMC_32, \ | ||
81 | 0, 0x8000) | ||
82 | |||
63 | #ifndef __ASSEMBLY__ | 83 | #ifndef __ASSEMBLY__ |
64 | 84 | ||
65 | #include <linux/linkage.h> | 85 | #include <linux/linkage.h> |
@@ -130,5 +150,146 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1, | |||
130 | 150 | ||
131 | #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) | 151 | #define arm_smccc_hvc_quirk(...) __arm_smccc_hvc(__VA_ARGS__) |
132 | 152 | ||
153 | /* SMCCC v1.1 implementation madness follows */ | ||
154 | #ifdef CONFIG_ARM64 | ||
155 | |||
156 | #define SMCCC_SMC_INST "smc #0" | ||
157 | #define SMCCC_HVC_INST "hvc #0" | ||
158 | |||
159 | #elif defined(CONFIG_ARM) | ||
160 | #include <asm/opcodes-sec.h> | ||
161 | #include <asm/opcodes-virt.h> | ||
162 | |||
163 | #define SMCCC_SMC_INST __SMC(0) | ||
164 | #define SMCCC_HVC_INST __HVC(0) | ||
165 | |||
166 | #endif | ||
167 | |||
168 | #define ___count_args(_0, _1, _2, _3, _4, _5, _6, _7, _8, x, ...) x | ||
169 | |||
170 | #define __count_args(...) \ | ||
171 | ___count_args(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0) | ||
172 | |||
173 | #define __constraint_write_0 \ | ||
174 | "+r" (r0), "=&r" (r1), "=&r" (r2), "=&r" (r3) | ||
175 | #define __constraint_write_1 \ | ||
176 | "+r" (r0), "+r" (r1), "=&r" (r2), "=&r" (r3) | ||
177 | #define __constraint_write_2 \ | ||
178 | "+r" (r0), "+r" (r1), "+r" (r2), "=&r" (r3) | ||
179 | #define __constraint_write_3 \ | ||
180 | "+r" (r0), "+r" (r1), "+r" (r2), "+r" (r3) | ||
181 | #define __constraint_write_4 __constraint_write_3 | ||
182 | #define __constraint_write_5 __constraint_write_4 | ||
183 | #define __constraint_write_6 __constraint_write_5 | ||
184 | #define __constraint_write_7 __constraint_write_6 | ||
185 | |||
186 | #define __constraint_read_0 | ||
187 | #define __constraint_read_1 | ||
188 | #define __constraint_read_2 | ||
189 | #define __constraint_read_3 | ||
190 | #define __constraint_read_4 "r" (r4) | ||
191 | #define __constraint_read_5 __constraint_read_4, "r" (r5) | ||
192 | #define __constraint_read_6 __constraint_read_5, "r" (r6) | ||
193 | #define __constraint_read_7 __constraint_read_6, "r" (r7) | ||
194 | |||
195 | #define __declare_arg_0(a0, res) \ | ||
196 | struct arm_smccc_res *___res = res; \ | ||
197 | register u32 r0 asm("r0") = a0; \ | ||
198 | register unsigned long r1 asm("r1"); \ | ||
199 | register unsigned long r2 asm("r2"); \ | ||
200 | register unsigned long r3 asm("r3") | ||
201 | |||
202 | #define __declare_arg_1(a0, a1, res) \ | ||
203 | struct arm_smccc_res *___res = res; \ | ||
204 | register u32 r0 asm("r0") = a0; \ | ||
205 | register typeof(a1) r1 asm("r1") = a1; \ | ||
206 | register unsigned long r2 asm("r2"); \ | ||
207 | register unsigned long r3 asm("r3") | ||
208 | |||
209 | #define __declare_arg_2(a0, a1, a2, res) \ | ||
210 | struct arm_smccc_res *___res = res; \ | ||
211 | register u32 r0 asm("r0") = a0; \ | ||
212 | register typeof(a1) r1 asm("r1") = a1; \ | ||
213 | register typeof(a2) r2 asm("r2") = a2; \ | ||
214 | register unsigned long r3 asm("r3") | ||
215 | |||
216 | #define __declare_arg_3(a0, a1, a2, a3, res) \ | ||
217 | struct arm_smccc_res *___res = res; \ | ||
218 | register u32 r0 asm("r0") = a0; \ | ||
219 | register typeof(a1) r1 asm("r1") = a1; \ | ||
220 | register typeof(a2) r2 asm("r2") = a2; \ | ||
221 | register typeof(a3) r3 asm("r3") = a3 | ||
222 | |||
223 | #define __declare_arg_4(a0, a1, a2, a3, a4, res) \ | ||
224 | __declare_arg_3(a0, a1, a2, a3, res); \ | ||
225 | register typeof(a4) r4 asm("r4") = a4 | ||
226 | |||
227 | #define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \ | ||
228 | __declare_arg_4(a0, a1, a2, a3, a4, res); \ | ||
229 | register typeof(a5) r5 asm("r5") = a5 | ||
230 | |||
231 | #define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \ | ||
232 | __declare_arg_5(a0, a1, a2, a3, a4, a5, res); \ | ||
233 | register typeof(a6) r6 asm("r6") = a6 | ||
234 | |||
235 | #define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \ | ||
236 | __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \ | ||
237 | register typeof(a7) r7 asm("r7") = a7 | ||
238 | |||
239 | #define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__) | ||
240 | #define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__) | ||
241 | |||
242 | #define ___constraints(count) \ | ||
243 | : __constraint_write_ ## count \ | ||
244 | : __constraint_read_ ## count \ | ||
245 | : "memory" | ||
246 | #define __constraints(count) ___constraints(count) | ||
247 | |||
248 | /* | ||
249 | * We have an output list that is not necessarily used, and GCC feels | ||
250 | * entitled to optimise the whole sequence away. "volatile" is what | ||
251 | * makes it stick. | ||
252 | */ | ||
253 | #define __arm_smccc_1_1(inst, ...) \ | ||
254 | do { \ | ||
255 | __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \ | ||
256 | asm volatile(inst "\n" \ | ||
257 | __constraints(__count_args(__VA_ARGS__))); \ | ||
258 | if (___res) \ | ||
259 | *___res = (typeof(*___res)){r0, r1, r2, r3}; \ | ||
260 | } while (0) | ||
261 | |||
262 | /* | ||
263 | * arm_smccc_1_1_smc() - make an SMCCC v1.1 compliant SMC call | ||
264 | * | ||
265 | * This is a variadic macro taking one to eight source arguments, and | ||
266 | * an optional return structure. | ||
267 | * | ||
268 | * @a0-a7: arguments passed in registers 0 to 7 | ||
269 | * @res: result values from registers 0 to 3 | ||
270 | * | ||
271 | * This macro is used to make SMC calls following SMC Calling Convention v1.1. | ||
272 | * The content of the supplied param are copied to registers 0 to 7 prior | ||
273 | * to the SMC instruction. The return values are updated with the content | ||
274 | * from register 0 to 3 on return from the SMC instruction if not NULL. | ||
275 | */ | ||
276 | #define arm_smccc_1_1_smc(...) __arm_smccc_1_1(SMCCC_SMC_INST, __VA_ARGS__) | ||
277 | |||
278 | /* | ||
279 | * arm_smccc_1_1_hvc() - make an SMCCC v1.1 compliant HVC call | ||
280 | * | ||
281 | * This is a variadic macro taking one to eight source arguments, and | ||
282 | * an optional return structure. | ||
283 | * | ||
284 | * @a0-a7: arguments passed in registers 0 to 7 | ||
285 | * @res: result values from registers 0 to 3 | ||
286 | * | ||
287 | * This macro is used to make HVC calls following SMC Calling Convention v1.1. | ||
288 | * The content of the supplied param are copied to registers 0 to 7 prior | ||
289 | * to the HVC instruction. The return values are updated with the content | ||
290 | * from register 0 to 3 on return from the HVC instruction if not NULL. | ||
291 | */ | ||
292 | #define arm_smccc_1_1_hvc(...) __arm_smccc_1_1(SMCCC_HVC_INST, __VA_ARGS__) | ||
293 | |||
133 | #endif /*__ASSEMBLY__*/ | 294 | #endif /*__ASSEMBLY__*/ |
134 | #endif /*__LINUX_ARM_SMCCC_H*/ | 295 | #endif /*__LINUX_ARM_SMCCC_H*/ |
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h new file mode 100644 index 000000000000..942afbd544b7 --- /dev/null +++ b/include/linux/arm_sdei.h | |||
@@ -0,0 +1,79 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright (C) 2017 Arm Ltd. | ||
3 | #ifndef __LINUX_ARM_SDEI_H | ||
4 | #define __LINUX_ARM_SDEI_H | ||
5 | |||
6 | #include <uapi/linux/arm_sdei.h> | ||
7 | |||
8 | enum sdei_conduit_types { | ||
9 | CONDUIT_INVALID = 0, | ||
10 | CONDUIT_SMC, | ||
11 | CONDUIT_HVC, | ||
12 | }; | ||
13 | |||
14 | #include <asm/sdei.h> | ||
15 | |||
16 | /* Arch code should override this to set the entry point from firmware... */ | ||
17 | #ifndef sdei_arch_get_entry_point | ||
18 | #define sdei_arch_get_entry_point(conduit) (0) | ||
19 | #endif | ||
20 | |||
21 | /* | ||
22 | * When an event occurs sdei_event_handler() will call a user-provided callback | ||
23 | * like this in NMI context on the CPU that received the event. | ||
24 | */ | ||
25 | typedef int (sdei_event_callback)(u32 event, struct pt_regs *regs, void *arg); | ||
26 | |||
27 | /* | ||
28 | * Register your callback to claim an event. The event must be described | ||
29 | * by firmware. | ||
30 | */ | ||
31 | int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg); | ||
32 | |||
33 | /* | ||
34 | * Calls to sdei_event_unregister() may return EINPROGRESS. Keep calling | ||
35 | * it until it succeeds. | ||
36 | */ | ||
37 | int sdei_event_unregister(u32 event_num); | ||
38 | |||
39 | int sdei_event_enable(u32 event_num); | ||
40 | int sdei_event_disable(u32 event_num); | ||
41 | |||
42 | #ifdef CONFIG_ARM_SDE_INTERFACE | ||
43 | /* For use by arch code when CPU hotplug notifiers are not appropriate. */ | ||
44 | int sdei_mask_local_cpu(void); | ||
45 | int sdei_unmask_local_cpu(void); | ||
46 | #else | ||
47 | static inline int sdei_mask_local_cpu(void) { return 0; } | ||
48 | static inline int sdei_unmask_local_cpu(void) { return 0; } | ||
49 | #endif /* CONFIG_ARM_SDE_INTERFACE */ | ||
50 | |||
51 | |||
52 | /* | ||
53 | * This struct represents an event that has been registered. The driver | ||
54 | * maintains a list of all events, and which ones are registered. (Private | ||
55 | * events have one entry in the list, but are registered on each CPU). | ||
56 | * A pointer to this struct is passed to firmware, and back to the event | ||
57 | * handler. The event handler can then use this to invoke the registered | ||
58 | * callback, without having to walk the list. | ||
59 | * | ||
60 | * For CPU private events, this structure is per-cpu. | ||
61 | */ | ||
62 | struct sdei_registered_event { | ||
63 | /* For use by arch code: */ | ||
64 | struct pt_regs interrupted_regs; | ||
65 | |||
66 | sdei_event_callback *callback; | ||
67 | void *callback_arg; | ||
68 | u32 event_num; | ||
69 | u8 priority; | ||
70 | }; | ||
71 | |||
72 | /* The arch code entry point should then call this when an event arrives. */ | ||
73 | int notrace sdei_event_handler(struct pt_regs *regs, | ||
74 | struct sdei_registered_event *arg); | ||
75 | |||
76 | /* arch code may use this to retrieve the extra registers. */ | ||
77 | int sdei_api_event_context(u32 query, u64 *result); | ||
78 | |||
79 | #endif /* __LINUX_ARM_SDEI_H */ | ||
diff --git a/include/linux/ata.h b/include/linux/ata.h index c7a353825450..40d150ad7e07 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
@@ -448,6 +448,8 @@ enum { | |||
448 | ATA_SET_MAX_LOCK = 0x02, | 448 | ATA_SET_MAX_LOCK = 0x02, |
449 | ATA_SET_MAX_UNLOCK = 0x03, | 449 | ATA_SET_MAX_UNLOCK = 0x03, |
450 | ATA_SET_MAX_FREEZE_LOCK = 0x04, | 450 | ATA_SET_MAX_FREEZE_LOCK = 0x04, |
451 | ATA_SET_MAX_PASSWD_DMA = 0x05, | ||
452 | ATA_SET_MAX_UNLOCK_DMA = 0x06, | ||
451 | 453 | ||
452 | /* feature values for DEVICE CONFIGURATION OVERLAY */ | 454 | /* feature values for DEVICE CONFIGURATION OVERLAY */ |
453 | ATA_DCO_RESTORE = 0xC0, | 455 | ATA_DCO_RESTORE = 0xC0, |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index e54e7e0033eb..3e4ce54d84ab 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -332,7 +332,7 @@ static inline bool inode_to_wb_is_valid(struct inode *inode) | |||
332 | * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the | 332 | * holding either @inode->i_lock, @inode->i_mapping->tree_lock, or the |
333 | * associated wb's list_lock. | 333 | * associated wb's list_lock. |
334 | */ | 334 | */ |
335 | static inline struct bdi_writeback *inode_to_wb(struct inode *inode) | 335 | static inline struct bdi_writeback *inode_to_wb(const struct inode *inode) |
336 | { | 336 | { |
337 | #ifdef CONFIG_LOCKDEP | 337 | #ifdef CONFIG_LOCKDEP |
338 | WARN_ON_ONCE(debug_locks && | 338 | WARN_ON_ONCE(debug_locks && |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 23d29b39f71e..d0eb659fa733 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
@@ -300,6 +300,29 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) | |||
300 | bv->bv_len = iter.bi_bvec_done; | 300 | bv->bv_len = iter.bi_bvec_done; |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline unsigned bio_pages_all(struct bio *bio) | ||
304 | { | ||
305 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | ||
306 | return bio->bi_vcnt; | ||
307 | } | ||
308 | |||
309 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) | ||
310 | { | ||
311 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | ||
312 | return bio->bi_io_vec; | ||
313 | } | ||
314 | |||
315 | static inline struct page *bio_first_page_all(struct bio *bio) | ||
316 | { | ||
317 | return bio_first_bvec_all(bio)->bv_page; | ||
318 | } | ||
319 | |||
320 | static inline struct bio_vec *bio_last_bvec_all(struct bio *bio) | ||
321 | { | ||
322 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | ||
323 | return &bio->bi_io_vec[bio->bi_vcnt - 1]; | ||
324 | } | ||
325 | |||
303 | enum bip_flags { | 326 | enum bip_flags { |
304 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ | 327 | BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ |
305 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ | 328 | BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ |
@@ -477,7 +500,6 @@ static inline void bio_flush_dcache_pages(struct bio *bi) | |||
477 | #endif | 500 | #endif |
478 | 501 | ||
479 | extern void bio_copy_data(struct bio *dst, struct bio *src); | 502 | extern void bio_copy_data(struct bio *dst, struct bio *src); |
480 | extern int bio_alloc_pages(struct bio *bio, gfp_t gfp); | ||
481 | extern void bio_free_pages(struct bio *bio); | 503 | extern void bio_free_pages(struct bio *bio); |
482 | 504 | ||
483 | extern struct bio *bio_copy_user_iov(struct request_queue *, | 505 | extern struct bio *bio_copy_user_iov(struct request_queue *, |
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h index 1030651f8309..cf2588d81148 100644 --- a/include/linux/bitfield.h +++ b/include/linux/bitfield.h | |||
@@ -16,6 +16,7 @@ | |||
16 | #define _LINUX_BITFIELD_H | 16 | #define _LINUX_BITFIELD_H |
17 | 17 | ||
18 | #include <linux/build_bug.h> | 18 | #include <linux/build_bug.h> |
19 | #include <asm/byteorder.h> | ||
19 | 20 | ||
20 | /* | 21 | /* |
21 | * Bitfield access macros | 22 | * Bitfield access macros |
@@ -103,4 +104,49 @@ | |||
103 | (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ | 104 | (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \ |
104 | }) | 105 | }) |
105 | 106 | ||
107 | extern void __compiletime_warning("value doesn't fit into mask") | ||
108 | __field_overflow(void); | ||
109 | extern void __compiletime_error("bad bitfield mask") | ||
110 | __bad_mask(void); | ||
111 | static __always_inline u64 field_multiplier(u64 field) | ||
112 | { | ||
113 | if ((field | (field - 1)) & ((field | (field - 1)) + 1)) | ||
114 | __bad_mask(); | ||
115 | return field & -field; | ||
116 | } | ||
117 | static __always_inline u64 field_mask(u64 field) | ||
118 | { | ||
119 | return field / field_multiplier(field); | ||
120 | } | ||
121 | #define ____MAKE_OP(type,base,to,from) \ | ||
122 | static __always_inline __##type type##_encode_bits(base v, base field) \ | ||
123 | { \ | ||
124 | if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \ | ||
125 | __field_overflow(); \ | ||
126 | return to((v & field_mask(field)) * field_multiplier(field)); \ | ||
127 | } \ | ||
128 | static __always_inline __##type type##_replace_bits(__##type old, \ | ||
129 | base val, base field) \ | ||
130 | { \ | ||
131 | return (old & ~to(field)) | type##_encode_bits(val, field); \ | ||
132 | } \ | ||
133 | static __always_inline void type##p_replace_bits(__##type *p, \ | ||
134 | base val, base field) \ | ||
135 | { \ | ||
136 | *p = (*p & ~to(field)) | type##_encode_bits(val, field); \ | ||
137 | } \ | ||
138 | static __always_inline base type##_get_bits(__##type v, base field) \ | ||
139 | { \ | ||
140 | return (from(v) & field)/field_multiplier(field); \ | ||
141 | } | ||
142 | #define __MAKE_OP(size) \ | ||
143 | ____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \ | ||
144 | ____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \ | ||
145 | ____MAKE_OP(u##size,u##size,,) | ||
146 | __MAKE_OP(16) | ||
147 | __MAKE_OP(32) | ||
148 | __MAKE_OP(64) | ||
149 | #undef __MAKE_OP | ||
150 | #undef ____MAKE_OP | ||
151 | |||
106 | #endif | 152 | #endif |
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h index 3489253e38fc..5f11fbdc27f8 100644 --- a/include/linux/bitmap.h +++ b/include/linux/bitmap.h | |||
@@ -64,9 +64,14 @@ | |||
64 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region | 64 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
65 | * bitmap_release_region(bitmap, pos, order) Free specified bit region | 65 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
66 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region | 66 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
67 | * bitmap_from_u32array(dst, nbits, buf, nwords) *dst = *buf (nwords 32b words) | 67 | * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst |
68 | * bitmap_to_u32array(buf, nwords, src, nbits) *buf = *dst (nwords 32b words) | 68 | * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst |
69 | * | 69 | * |
70 | * Note, bitmap_zero() and bitmap_fill() operate over the region of | ||
71 | * unsigned longs, that is, bits behind bitmap till the unsigned long | ||
72 | * boundary will be zeroed or filled as well. Consider to use | ||
73 | * bitmap_clear() or bitmap_set() to make explicit zeroing or filling | ||
74 | * respectively. | ||
70 | */ | 75 | */ |
71 | 76 | ||
72 | /** | 77 | /** |
@@ -83,8 +88,12 @@ | |||
83 | * test_and_change_bit(bit, addr) Change bit and return old value | 88 | * test_and_change_bit(bit, addr) Change bit and return old value |
84 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr | 89 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
85 | * find_first_bit(addr, nbits) Position first set bit in *addr | 90 | * find_first_bit(addr, nbits) Position first set bit in *addr |
86 | * find_next_zero_bit(addr, nbits, bit) Position next zero bit in *addr >= bit | 91 | * find_next_zero_bit(addr, nbits, bit) |
92 | * Position next zero bit in *addr >= bit | ||
87 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit | 93 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
94 | * find_next_and_bit(addr1, addr2, nbits, bit) | ||
95 | * Same as find_next_bit, but in | ||
96 | * (*addr1 & *addr2) | ||
88 | * | 97 | * |
89 | */ | 98 | */ |
90 | 99 | ||
@@ -174,14 +183,7 @@ extern void bitmap_fold(unsigned long *dst, const unsigned long *orig, | |||
174 | extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); | 183 | extern int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); |
175 | extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); | 184 | extern void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); |
176 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); | 185 | extern int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
177 | extern unsigned int bitmap_from_u32array(unsigned long *bitmap, | 186 | |
178 | unsigned int nbits, | ||
179 | const u32 *buf, | ||
180 | unsigned int nwords); | ||
181 | extern unsigned int bitmap_to_u32array(u32 *buf, | ||
182 | unsigned int nwords, | ||
183 | const unsigned long *bitmap, | ||
184 | unsigned int nbits); | ||
185 | #ifdef __BIG_ENDIAN | 187 | #ifdef __BIG_ENDIAN |
186 | extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); | 188 | extern void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); |
187 | #else | 189 | #else |
@@ -209,12 +211,12 @@ static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) | |||
209 | 211 | ||
210 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) | 212 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) |
211 | { | 213 | { |
212 | unsigned int nlongs = BITS_TO_LONGS(nbits); | 214 | if (small_const_nbits(nbits)) |
213 | if (!small_const_nbits(nbits)) { | 215 | *dst = ~0UL; |
214 | unsigned int len = (nlongs - 1) * sizeof(unsigned long); | 216 | else { |
215 | memset(dst, 0xff, len); | 217 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
218 | memset(dst, 0xff, len); | ||
216 | } | 219 | } |
217 | dst[nlongs - 1] = BITMAP_LAST_WORD_MASK(nbits); | ||
218 | } | 220 | } |
219 | 221 | ||
220 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | 222 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
@@ -228,6 +230,35 @@ static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, | |||
228 | } | 230 | } |
229 | } | 231 | } |
230 | 232 | ||
233 | /* | ||
234 | * Copy bitmap and clear tail bits in last word. | ||
235 | */ | ||
236 | static inline void bitmap_copy_clear_tail(unsigned long *dst, | ||
237 | const unsigned long *src, unsigned int nbits) | ||
238 | { | ||
239 | bitmap_copy(dst, src, nbits); | ||
240 | if (nbits % BITS_PER_LONG) | ||
241 | dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * On 32-bit systems bitmaps are represented as u32 arrays internally, and | ||
246 | * therefore conversion is not needed when copying data from/to arrays of u32. | ||
247 | */ | ||
248 | #if BITS_PER_LONG == 64 | ||
249 | extern void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, | ||
250 | unsigned int nbits); | ||
251 | extern void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, | ||
252 | unsigned int nbits); | ||
253 | #else | ||
254 | #define bitmap_from_arr32(bitmap, buf, nbits) \ | ||
255 | bitmap_copy_clear_tail((unsigned long *) (bitmap), \ | ||
256 | (const unsigned long *) (buf), (nbits)) | ||
257 | #define bitmap_to_arr32(buf, bitmap, nbits) \ | ||
258 | bitmap_copy_clear_tail((unsigned long *) (buf), \ | ||
259 | (const unsigned long *) (bitmap), (nbits)) | ||
260 | #endif | ||
261 | |||
231 | static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, | 262 | static inline int bitmap_and(unsigned long *dst, const unsigned long *src1, |
232 | const unsigned long *src2, unsigned int nbits) | 263 | const unsigned long *src2, unsigned int nbits) |
233 | { | 264 | { |
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h index e9825ff57b15..69bea82ebeb1 100644 --- a/include/linux/blk-cgroup.h +++ b/include/linux/blk-cgroup.h | |||
@@ -660,12 +660,14 @@ static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat) | |||
660 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, | 660 | static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to, |
661 | struct blkg_rwstat *from) | 661 | struct blkg_rwstat *from) |
662 | { | 662 | { |
663 | struct blkg_rwstat v = blkg_rwstat_read(from); | 663 | u64 sum[BLKG_RWSTAT_NR]; |
664 | int i; | 664 | int i; |
665 | 665 | ||
666 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | 666 | for (i = 0; i < BLKG_RWSTAT_NR; i++) |
667 | atomic64_add(atomic64_read(&v.aux_cnt[i]) + | 667 | sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]); |
668 | atomic64_read(&from->aux_cnt[i]), | 668 | |
669 | for (i = 0; i < BLKG_RWSTAT_NR; i++) | ||
670 | atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]), | ||
669 | &to->aux_cnt[i]); | 671 | &to->aux_cnt[i]); |
670 | } | 672 | } |
671 | 673 | ||
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 95c9a5c862e2..8efcf49796a3 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
@@ -51,6 +51,7 @@ struct blk_mq_hw_ctx { | |||
51 | unsigned int queue_num; | 51 | unsigned int queue_num; |
52 | 52 | ||
53 | atomic_t nr_active; | 53 | atomic_t nr_active; |
54 | unsigned int nr_expired; | ||
54 | 55 | ||
55 | struct hlist_node cpuhp_dead; | 56 | struct hlist_node cpuhp_dead; |
56 | struct kobject kobj; | 57 | struct kobject kobj; |
@@ -65,7 +66,7 @@ struct blk_mq_hw_ctx { | |||
65 | #endif | 66 | #endif |
66 | 67 | ||
67 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ | 68 | /* Must be the last member - see also blk_mq_hw_ctx_size(). */ |
68 | struct srcu_struct queue_rq_srcu[0]; | 69 | struct srcu_struct srcu[0]; |
69 | }; | 70 | }; |
70 | 71 | ||
71 | struct blk_mq_tag_set { | 72 | struct blk_mq_tag_set { |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 9e7d8bd776d2..bf18b95ed92d 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
@@ -39,6 +39,52 @@ typedef u8 __bitwise blk_status_t; | |||
39 | 39 | ||
40 | #define BLK_STS_AGAIN ((__force blk_status_t)12) | 40 | #define BLK_STS_AGAIN ((__force blk_status_t)12) |
41 | 41 | ||
42 | /* | ||
43 | * BLK_STS_DEV_RESOURCE is returned from the driver to the block layer if | ||
44 | * device related resources are unavailable, but the driver can guarantee | ||
45 | * that the queue will be rerun in the future once resources become | ||
46 | * available again. This is typically the case for device specific | ||
47 | * resources that are consumed for IO. If the driver fails allocating these | ||
48 | * resources, we know that inflight (or pending) IO will free these | ||
49 | * resource upon completion. | ||
50 | * | ||
51 | * This is different from BLK_STS_RESOURCE in that it explicitly references | ||
52 | * a device specific resource. For resources of wider scope, allocation | ||
53 | * failure can happen without having pending IO. This means that we can't | ||
54 | * rely on request completions freeing these resources, as IO may not be in | ||
55 | * flight. Examples of that are kernel memory allocations, DMA mappings, or | ||
56 | * any other system wide resources. | ||
57 | */ | ||
58 | #define BLK_STS_DEV_RESOURCE ((__force blk_status_t)13) | ||
59 | |||
60 | /** | ||
61 | * blk_path_error - returns true if error may be path related | ||
62 | * @error: status the request was completed with | ||
63 | * | ||
64 | * Description: | ||
65 | * This classifies block error status into non-retryable errors and ones | ||
66 | * that may be successful if retried on a failover path. | ||
67 | * | ||
68 | * Return: | ||
69 | * %false - retrying failover path will not help | ||
70 | * %true - may succeed if retried | ||
71 | */ | ||
72 | static inline bool blk_path_error(blk_status_t error) | ||
73 | { | ||
74 | switch (error) { | ||
75 | case BLK_STS_NOTSUPP: | ||
76 | case BLK_STS_NOSPC: | ||
77 | case BLK_STS_TARGET: | ||
78 | case BLK_STS_NEXUS: | ||
79 | case BLK_STS_MEDIUM: | ||
80 | case BLK_STS_PROTECTION: | ||
81 | return false; | ||
82 | } | ||
83 | |||
84 | /* Anything else could be a path failure, so should be retried */ | ||
85 | return true; | ||
86 | } | ||
87 | |||
42 | struct blk_issue_stat { | 88 | struct blk_issue_stat { |
43 | u64 stat; | 89 | u64 stat; |
44 | }; | 90 | }; |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 0ce8a372d506..4f3df807cf8f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -27,6 +27,8 @@ | |||
27 | #include <linux/percpu-refcount.h> | 27 | #include <linux/percpu-refcount.h> |
28 | #include <linux/scatterlist.h> | 28 | #include <linux/scatterlist.h> |
29 | #include <linux/blkzoned.h> | 29 | #include <linux/blkzoned.h> |
30 | #include <linux/seqlock.h> | ||
31 | #include <linux/u64_stats_sync.h> | ||
30 | 32 | ||
31 | struct module; | 33 | struct module; |
32 | struct scsi_ioctl_command; | 34 | struct scsi_ioctl_command; |
@@ -121,6 +123,12 @@ typedef __u32 __bitwise req_flags_t; | |||
121 | /* Look at ->special_vec for the actual data payload instead of the | 123 | /* Look at ->special_vec for the actual data payload instead of the |
122 | bio chain. */ | 124 | bio chain. */ |
123 | #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) | 125 | #define RQF_SPECIAL_PAYLOAD ((__force req_flags_t)(1 << 18)) |
126 | /* The per-zone write lock is held for this request */ | ||
127 | #define RQF_ZONE_WRITE_LOCKED ((__force req_flags_t)(1 << 19)) | ||
128 | /* timeout is expired */ | ||
129 | #define RQF_MQ_TIMEOUT_EXPIRED ((__force req_flags_t)(1 << 20)) | ||
130 | /* already slept for hybrid poll */ | ||
131 | #define RQF_MQ_POLL_SLEPT ((__force req_flags_t)(1 << 21)) | ||
124 | 132 | ||
125 | /* flags that prevent us from merging requests: */ | 133 | /* flags that prevent us from merging requests: */ |
126 | #define RQF_NOMERGE_FLAGS \ | 134 | #define RQF_NOMERGE_FLAGS \ |
@@ -133,12 +141,6 @@ typedef __u32 __bitwise req_flags_t; | |||
133 | * especially blk_mq_rq_ctx_init() to take care of the added fields. | 141 | * especially blk_mq_rq_ctx_init() to take care of the added fields. |
134 | */ | 142 | */ |
135 | struct request { | 143 | struct request { |
136 | struct list_head queuelist; | ||
137 | union { | ||
138 | struct __call_single_data csd; | ||
139 | u64 fifo_time; | ||
140 | }; | ||
141 | |||
142 | struct request_queue *q; | 144 | struct request_queue *q; |
143 | struct blk_mq_ctx *mq_ctx; | 145 | struct blk_mq_ctx *mq_ctx; |
144 | 146 | ||
@@ -148,8 +150,6 @@ struct request { | |||
148 | 150 | ||
149 | int internal_tag; | 151 | int internal_tag; |
150 | 152 | ||
151 | unsigned long atomic_flags; | ||
152 | |||
153 | /* the following two fields are internal, NEVER access directly */ | 153 | /* the following two fields are internal, NEVER access directly */ |
154 | unsigned int __data_len; /* total data len */ | 154 | unsigned int __data_len; /* total data len */ |
155 | int tag; | 155 | int tag; |
@@ -158,6 +158,8 @@ struct request { | |||
158 | struct bio *bio; | 158 | struct bio *bio; |
159 | struct bio *biotail; | 159 | struct bio *biotail; |
160 | 160 | ||
161 | struct list_head queuelist; | ||
162 | |||
161 | /* | 163 | /* |
162 | * The hash is used inside the scheduler, and killed once the | 164 | * The hash is used inside the scheduler, and killed once the |
163 | * request reaches the dispatch list. The ipi_list is only used | 165 | * request reaches the dispatch list. The ipi_list is only used |
@@ -205,19 +207,16 @@ struct request { | |||
205 | struct hd_struct *part; | 207 | struct hd_struct *part; |
206 | unsigned long start_time; | 208 | unsigned long start_time; |
207 | struct blk_issue_stat issue_stat; | 209 | struct blk_issue_stat issue_stat; |
208 | #ifdef CONFIG_BLK_CGROUP | ||
209 | struct request_list *rl; /* rl this rq is alloced from */ | ||
210 | unsigned long long start_time_ns; | ||
211 | unsigned long long io_start_time_ns; /* when passed to hardware */ | ||
212 | #endif | ||
213 | /* Number of scatter-gather DMA addr+len pairs after | 210 | /* Number of scatter-gather DMA addr+len pairs after |
214 | * physical address coalescing is performed. | 211 | * physical address coalescing is performed. |
215 | */ | 212 | */ |
216 | unsigned short nr_phys_segments; | 213 | unsigned short nr_phys_segments; |
214 | |||
217 | #if defined(CONFIG_BLK_DEV_INTEGRITY) | 215 | #if defined(CONFIG_BLK_DEV_INTEGRITY) |
218 | unsigned short nr_integrity_segments; | 216 | unsigned short nr_integrity_segments; |
219 | #endif | 217 | #endif |
220 | 218 | ||
219 | unsigned short write_hint; | ||
221 | unsigned short ioprio; | 220 | unsigned short ioprio; |
222 | 221 | ||
223 | unsigned int timeout; | 222 | unsigned int timeout; |
@@ -226,11 +225,37 @@ struct request { | |||
226 | 225 | ||
227 | unsigned int extra_len; /* length of alignment and padding */ | 226 | unsigned int extra_len; /* length of alignment and padding */ |
228 | 227 | ||
229 | unsigned short write_hint; | 228 | /* |
229 | * On blk-mq, the lower bits of ->gstate (generation number and | ||
230 | * state) carry the MQ_RQ_* state value and the upper bits the | ||
231 | * generation number which is monotonically incremented and used to | ||
232 | * distinguish the reuse instances. | ||
233 | * | ||
234 | * ->gstate_seq allows updates to ->gstate and other fields | ||
235 | * (currently ->deadline) during request start to be read | ||
236 | * atomically from the timeout path, so that it can operate on a | ||
237 | * coherent set of information. | ||
238 | */ | ||
239 | seqcount_t gstate_seq; | ||
240 | u64 gstate; | ||
241 | |||
242 | /* | ||
243 | * ->aborted_gstate is used by the timeout to claim a specific | ||
244 | * recycle instance of this request. See blk_mq_timeout_work(). | ||
245 | */ | ||
246 | struct u64_stats_sync aborted_gstate_sync; | ||
247 | u64 aborted_gstate; | ||
248 | |||
249 | /* access through blk_rq_set_deadline, blk_rq_deadline */ | ||
250 | unsigned long __deadline; | ||
230 | 251 | ||
231 | unsigned long deadline; | ||
232 | struct list_head timeout_list; | 252 | struct list_head timeout_list; |
233 | 253 | ||
254 | union { | ||
255 | struct __call_single_data csd; | ||
256 | u64 fifo_time; | ||
257 | }; | ||
258 | |||
234 | /* | 259 | /* |
235 | * completion callback. | 260 | * completion callback. |
236 | */ | 261 | */ |
@@ -239,6 +264,12 @@ struct request { | |||
239 | 264 | ||
240 | /* for bidi */ | 265 | /* for bidi */ |
241 | struct request *next_rq; | 266 | struct request *next_rq; |
267 | |||
268 | #ifdef CONFIG_BLK_CGROUP | ||
269 | struct request_list *rl; /* rl this rq is alloced from */ | ||
270 | unsigned long long start_time_ns; | ||
271 | unsigned long long io_start_time_ns; /* when passed to hardware */ | ||
272 | #endif | ||
242 | }; | 273 | }; |
243 | 274 | ||
244 | static inline bool blk_op_is_scsi(unsigned int op) | 275 | static inline bool blk_op_is_scsi(unsigned int op) |
@@ -564,6 +595,22 @@ struct request_queue { | |||
564 | struct queue_limits limits; | 595 | struct queue_limits limits; |
565 | 596 | ||
566 | /* | 597 | /* |
598 | * Zoned block device information for request dispatch control. | ||
599 | * nr_zones is the total number of zones of the device. This is always | ||
600 | * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones | ||
601 | * bits which indicates if a zone is conventional (bit clear) or | ||
602 | * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones | ||
603 | * bits which indicates if a zone is write locked, that is, if a write | ||
604 | * request targeting the zone was dispatched. All three fields are | ||
605 | * initialized by the low level device driver (e.g. scsi/sd.c). | ||
606 | * Stacking drivers (device mappers) may or may not initialize | ||
607 | * these fields. | ||
608 | */ | ||
609 | unsigned int nr_zones; | ||
610 | unsigned long *seq_zones_bitmap; | ||
611 | unsigned long *seq_zones_wlock; | ||
612 | |||
613 | /* | ||
567 | * sg stuff | 614 | * sg stuff |
568 | */ | 615 | */ |
569 | unsigned int sg_timeout; | 616 | unsigned int sg_timeout; |
@@ -807,6 +854,27 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q) | |||
807 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; | 854 | return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0; |
808 | } | 855 | } |
809 | 856 | ||
857 | static inline unsigned int blk_queue_nr_zones(struct request_queue *q) | ||
858 | { | ||
859 | return q->nr_zones; | ||
860 | } | ||
861 | |||
862 | static inline unsigned int blk_queue_zone_no(struct request_queue *q, | ||
863 | sector_t sector) | ||
864 | { | ||
865 | if (!blk_queue_is_zoned(q)) | ||
866 | return 0; | ||
867 | return sector >> ilog2(q->limits.chunk_sectors); | ||
868 | } | ||
869 | |||
870 | static inline bool blk_queue_zone_is_seq(struct request_queue *q, | ||
871 | sector_t sector) | ||
872 | { | ||
873 | if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap) | ||
874 | return false; | ||
875 | return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap); | ||
876 | } | ||
877 | |||
810 | static inline bool rq_is_sync(struct request *rq) | 878 | static inline bool rq_is_sync(struct request *rq) |
811 | { | 879 | { |
812 | return op_is_sync(rq->cmd_flags); | 880 | return op_is_sync(rq->cmd_flags); |
@@ -1046,6 +1114,16 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq) | |||
1046 | return blk_rq_cur_bytes(rq) >> 9; | 1114 | return blk_rq_cur_bytes(rq) >> 9; |
1047 | } | 1115 | } |
1048 | 1116 | ||
1117 | static inline unsigned int blk_rq_zone_no(struct request *rq) | ||
1118 | { | ||
1119 | return blk_queue_zone_no(rq->q, blk_rq_pos(rq)); | ||
1120 | } | ||
1121 | |||
1122 | static inline unsigned int blk_rq_zone_is_seq(struct request *rq) | ||
1123 | { | ||
1124 | return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq)); | ||
1125 | } | ||
1126 | |||
1049 | /* | 1127 | /* |
1050 | * Some commands like WRITE SAME have a payload or data transfer size which | 1128 | * Some commands like WRITE SAME have a payload or data transfer size which |
1051 | * is different from the size of the request. Any driver that supports such | 1129 | * is different from the size of the request. Any driver that supports such |
@@ -1595,7 +1673,15 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev) | |||
1595 | 1673 | ||
1596 | if (q) | 1674 | if (q) |
1597 | return blk_queue_zone_sectors(q); | 1675 | return blk_queue_zone_sectors(q); |
1676 | return 0; | ||
1677 | } | ||
1678 | |||
1679 | static inline unsigned int bdev_nr_zones(struct block_device *bdev) | ||
1680 | { | ||
1681 | struct request_queue *q = bdev_get_queue(bdev); | ||
1598 | 1682 | ||
1683 | if (q) | ||
1684 | return blk_queue_nr_zones(q); | ||
1599 | return 0; | 1685 | return 0; |
1600 | } | 1686 | } |
1601 | 1687 | ||
@@ -1731,8 +1817,6 @@ static inline bool req_gap_front_merge(struct request *req, struct bio *bio) | |||
1731 | 1817 | ||
1732 | int kblockd_schedule_work(struct work_struct *work); | 1818 | int kblockd_schedule_work(struct work_struct *work); |
1733 | int kblockd_schedule_work_on(int cpu, struct work_struct *work); | 1819 | int kblockd_schedule_work_on(int cpu, struct work_struct *work); |
1734 | int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); | ||
1735 | int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); | ||
1736 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); | 1820 | int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay); |
1737 | 1821 | ||
1738 | #ifdef CONFIG_BLK_CGROUP | 1822 | #ifdef CONFIG_BLK_CGROUP |
@@ -1971,6 +2055,60 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | |||
1971 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); | 2055 | extern int bdev_read_page(struct block_device *, sector_t, struct page *); |
1972 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, | 2056 | extern int bdev_write_page(struct block_device *, sector_t, struct page *, |
1973 | struct writeback_control *); | 2057 | struct writeback_control *); |
2058 | |||
2059 | #ifdef CONFIG_BLK_DEV_ZONED | ||
2060 | bool blk_req_needs_zone_write_lock(struct request *rq); | ||
2061 | void __blk_req_zone_write_lock(struct request *rq); | ||
2062 | void __blk_req_zone_write_unlock(struct request *rq); | ||
2063 | |||
2064 | static inline void blk_req_zone_write_lock(struct request *rq) | ||
2065 | { | ||
2066 | if (blk_req_needs_zone_write_lock(rq)) | ||
2067 | __blk_req_zone_write_lock(rq); | ||
2068 | } | ||
2069 | |||
2070 | static inline void blk_req_zone_write_unlock(struct request *rq) | ||
2071 | { | ||
2072 | if (rq->rq_flags & RQF_ZONE_WRITE_LOCKED) | ||
2073 | __blk_req_zone_write_unlock(rq); | ||
2074 | } | ||
2075 | |||
2076 | static inline bool blk_req_zone_is_write_locked(struct request *rq) | ||
2077 | { | ||
2078 | return rq->q->seq_zones_wlock && | ||
2079 | test_bit(blk_rq_zone_no(rq), rq->q->seq_zones_wlock); | ||
2080 | } | ||
2081 | |||
2082 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) | ||
2083 | { | ||
2084 | if (!blk_req_needs_zone_write_lock(rq)) | ||
2085 | return true; | ||
2086 | return !blk_req_zone_is_write_locked(rq); | ||
2087 | } | ||
2088 | #else | ||
2089 | static inline bool blk_req_needs_zone_write_lock(struct request *rq) | ||
2090 | { | ||
2091 | return false; | ||
2092 | } | ||
2093 | |||
2094 | static inline void blk_req_zone_write_lock(struct request *rq) | ||
2095 | { | ||
2096 | } | ||
2097 | |||
2098 | static inline void blk_req_zone_write_unlock(struct request *rq) | ||
2099 | { | ||
2100 | } | ||
2101 | static inline bool blk_req_zone_is_write_locked(struct request *rq) | ||
2102 | { | ||
2103 | return false; | ||
2104 | } | ||
2105 | |||
2106 | static inline bool blk_req_can_dispatch_to_zone(struct request *rq) | ||
2107 | { | ||
2108 | return true; | ||
2109 | } | ||
2110 | #endif /* CONFIG_BLK_DEV_ZONED */ | ||
2111 | |||
1974 | #else /* CONFIG_BLOCK */ | 2112 | #else /* CONFIG_BLOCK */ |
1975 | 2113 | ||
1976 | struct block_device; | 2114 | struct block_device; |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index 0b25cf87b6d6..66df387106de 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/numa.h> | 17 | #include <linux/numa.h> |
18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
19 | 19 | ||
20 | struct bpf_verifier_env; | ||
20 | struct perf_event; | 21 | struct perf_event; |
21 | struct bpf_prog; | 22 | struct bpf_prog; |
22 | struct bpf_map; | 23 | struct bpf_map; |
@@ -24,6 +25,7 @@ struct bpf_map; | |||
24 | /* map is generic key/value storage optionally accesible by eBPF programs */ | 25 | /* map is generic key/value storage optionally accesible by eBPF programs */ |
25 | struct bpf_map_ops { | 26 | struct bpf_map_ops { |
26 | /* funcs callable from userspace (via syscall) */ | 27 | /* funcs callable from userspace (via syscall) */ |
28 | int (*map_alloc_check)(union bpf_attr *attr); | ||
27 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); | 29 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); |
28 | void (*map_release)(struct bpf_map *map, struct file *map_file); | 30 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
29 | void (*map_free)(struct bpf_map *map); | 31 | void (*map_free)(struct bpf_map *map); |
@@ -72,6 +74,33 @@ struct bpf_map { | |||
72 | char name[BPF_OBJ_NAME_LEN]; | 74 | char name[BPF_OBJ_NAME_LEN]; |
73 | }; | 75 | }; |
74 | 76 | ||
77 | struct bpf_offloaded_map; | ||
78 | |||
79 | struct bpf_map_dev_ops { | ||
80 | int (*map_get_next_key)(struct bpf_offloaded_map *map, | ||
81 | void *key, void *next_key); | ||
82 | int (*map_lookup_elem)(struct bpf_offloaded_map *map, | ||
83 | void *key, void *value); | ||
84 | int (*map_update_elem)(struct bpf_offloaded_map *map, | ||
85 | void *key, void *value, u64 flags); | ||
86 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); | ||
87 | }; | ||
88 | |||
89 | struct bpf_offloaded_map { | ||
90 | struct bpf_map map; | ||
91 | struct net_device *netdev; | ||
92 | const struct bpf_map_dev_ops *dev_ops; | ||
93 | void *dev_priv; | ||
94 | struct list_head offloads; | ||
95 | }; | ||
96 | |||
97 | static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) | ||
98 | { | ||
99 | return container_of(map, struct bpf_offloaded_map, map); | ||
100 | } | ||
101 | |||
102 | extern const struct bpf_map_ops bpf_map_offload_ops; | ||
103 | |||
75 | /* function argument constraints */ | 104 | /* function argument constraints */ |
76 | enum bpf_arg_type { | 105 | enum bpf_arg_type { |
77 | ARG_DONTCARE = 0, /* unused argument in helper function */ | 106 | ARG_DONTCARE = 0, /* unused argument in helper function */ |
@@ -193,14 +222,20 @@ struct bpf_verifier_ops { | |||
193 | struct bpf_prog *prog, u32 *target_size); | 222 | struct bpf_prog *prog, u32 *target_size); |
194 | }; | 223 | }; |
195 | 224 | ||
196 | struct bpf_dev_offload { | 225 | struct bpf_prog_offload_ops { |
226 | int (*insn_hook)(struct bpf_verifier_env *env, | ||
227 | int insn_idx, int prev_insn_idx); | ||
228 | }; | ||
229 | |||
230 | struct bpf_prog_offload { | ||
197 | struct bpf_prog *prog; | 231 | struct bpf_prog *prog; |
198 | struct net_device *netdev; | 232 | struct net_device *netdev; |
199 | void *dev_priv; | 233 | void *dev_priv; |
200 | struct list_head offloads; | 234 | struct list_head offloads; |
201 | bool dev_state; | 235 | bool dev_state; |
202 | bool verifier_running; | 236 | const struct bpf_prog_offload_ops *dev_ops; |
203 | wait_queue_head_t verifier_done; | 237 | void *jited_image; |
238 | u32 jited_len; | ||
204 | }; | 239 | }; |
205 | 240 | ||
206 | struct bpf_prog_aux { | 241 | struct bpf_prog_aux { |
@@ -209,6 +244,10 @@ struct bpf_prog_aux { | |||
209 | u32 max_ctx_offset; | 244 | u32 max_ctx_offset; |
210 | u32 stack_depth; | 245 | u32 stack_depth; |
211 | u32 id; | 246 | u32 id; |
247 | u32 func_cnt; | ||
248 | bool offload_requested; | ||
249 | struct bpf_prog **func; | ||
250 | void *jit_data; /* JIT specific data. arch dependent */ | ||
212 | struct latch_tree_node ksym_tnode; | 251 | struct latch_tree_node ksym_tnode; |
213 | struct list_head ksym_lnode; | 252 | struct list_head ksym_lnode; |
214 | const struct bpf_prog_ops *ops; | 253 | const struct bpf_prog_ops *ops; |
@@ -220,7 +259,7 @@ struct bpf_prog_aux { | |||
220 | #ifdef CONFIG_SECURITY | 259 | #ifdef CONFIG_SECURITY |
221 | void *security; | 260 | void *security; |
222 | #endif | 261 | #endif |
223 | struct bpf_dev_offload *offload; | 262 | struct bpf_prog_offload *offload; |
224 | union { | 263 | union { |
225 | struct work_struct work; | 264 | struct work_struct work; |
226 | struct rcu_head rcu; | 265 | struct rcu_head rcu; |
@@ -295,6 +334,9 @@ int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs, | |||
295 | 334 | ||
296 | void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, | 335 | void bpf_prog_array_delete_safe(struct bpf_prog_array __rcu *progs, |
297 | struct bpf_prog *old_prog); | 336 | struct bpf_prog *old_prog); |
337 | int bpf_prog_array_copy_info(struct bpf_prog_array __rcu *array, | ||
338 | __u32 __user *prog_ids, u32 request_cnt, | ||
339 | __u32 __user *prog_cnt); | ||
298 | int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, | 340 | int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, |
299 | struct bpf_prog *exclude_prog, | 341 | struct bpf_prog *exclude_prog, |
300 | struct bpf_prog *include_prog, | 342 | struct bpf_prog *include_prog, |
@@ -355,6 +397,9 @@ void bpf_prog_put(struct bpf_prog *prog); | |||
355 | int __bpf_prog_charge(struct user_struct *user, u32 pages); | 397 | int __bpf_prog_charge(struct user_struct *user, u32 pages); |
356 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages); | 398 | void __bpf_prog_uncharge(struct user_struct *user, u32 pages); |
357 | 399 | ||
400 | void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock); | ||
401 | void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock); | ||
402 | |||
358 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); | 403 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
359 | struct bpf_map *__bpf_map_get(struct fd f); | 404 | struct bpf_map *__bpf_map_get(struct fd f); |
360 | struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); | 405 | struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref); |
@@ -363,6 +408,7 @@ void bpf_map_put(struct bpf_map *map); | |||
363 | int bpf_map_precharge_memlock(u32 pages); | 408 | int bpf_map_precharge_memlock(u32 pages); |
364 | void *bpf_map_area_alloc(size_t size, int numa_node); | 409 | void *bpf_map_area_alloc(size_t size, int numa_node); |
365 | void bpf_map_area_free(void *base); | 410 | void bpf_map_area_free(void *base); |
411 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); | ||
366 | 412 | ||
367 | extern int sysctl_unprivileged_bpf_disabled; | 413 | extern int sysctl_unprivileged_bpf_disabled; |
368 | 414 | ||
@@ -409,6 +455,7 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) | |||
409 | 455 | ||
410 | /* verify correctness of eBPF program */ | 456 | /* verify correctness of eBPF program */ |
411 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); | 457 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr); |
458 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); | ||
412 | 459 | ||
413 | /* Map specifics */ | 460 | /* Map specifics */ |
414 | struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); | 461 | struct net_device *__dev_map_lookup_elem(struct bpf_map *map, u32 key); |
@@ -536,14 +583,35 @@ bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); | |||
536 | 583 | ||
537 | int bpf_prog_offload_compile(struct bpf_prog *prog); | 584 | int bpf_prog_offload_compile(struct bpf_prog *prog); |
538 | void bpf_prog_offload_destroy(struct bpf_prog *prog); | 585 | void bpf_prog_offload_destroy(struct bpf_prog *prog); |
586 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, | ||
587 | struct bpf_prog *prog); | ||
588 | |||
589 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); | ||
590 | |||
591 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); | ||
592 | int bpf_map_offload_update_elem(struct bpf_map *map, | ||
593 | void *key, void *value, u64 flags); | ||
594 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); | ||
595 | int bpf_map_offload_get_next_key(struct bpf_map *map, | ||
596 | void *key, void *next_key); | ||
597 | |||
598 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map); | ||
539 | 599 | ||
540 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) | 600 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
541 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); | 601 | int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr); |
542 | 602 | ||
543 | static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) | 603 | static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) |
544 | { | 604 | { |
545 | return aux->offload; | 605 | return aux->offload_requested; |
546 | } | 606 | } |
607 | |||
608 | static inline bool bpf_map_is_dev_bound(struct bpf_map *map) | ||
609 | { | ||
610 | return unlikely(map->ops == &bpf_map_offload_ops); | ||
611 | } | ||
612 | |||
613 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); | ||
614 | void bpf_map_offload_map_free(struct bpf_map *map); | ||
547 | #else | 615 | #else |
548 | static inline int bpf_prog_offload_init(struct bpf_prog *prog, | 616 | static inline int bpf_prog_offload_init(struct bpf_prog *prog, |
549 | union bpf_attr *attr) | 617 | union bpf_attr *attr) |
@@ -555,9 +623,23 @@ static inline bool bpf_prog_is_dev_bound(struct bpf_prog_aux *aux) | |||
555 | { | 623 | { |
556 | return false; | 624 | return false; |
557 | } | 625 | } |
626 | |||
627 | static inline bool bpf_map_is_dev_bound(struct bpf_map *map) | ||
628 | { | ||
629 | return false; | ||
630 | } | ||
631 | |||
632 | static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) | ||
633 | { | ||
634 | return ERR_PTR(-EOPNOTSUPP); | ||
635 | } | ||
636 | |||
637 | static inline void bpf_map_offload_map_free(struct bpf_map *map) | ||
638 | { | ||
639 | } | ||
558 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ | 640 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ |
559 | 641 | ||
560 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) | 642 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_BPF_SYSCALL) && defined(CONFIG_INET) |
561 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); | 643 | struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key); |
562 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); | 644 | int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type); |
563 | #else | 645 | #else |
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 978c1d9c9383..19b8349a3809 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h | |||
@@ -42,7 +42,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops) | |||
42 | BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) | 42 | BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops) |
43 | #ifdef CONFIG_NET | 43 | #ifdef CONFIG_NET |
44 | BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) | 44 | BPF_MAP_TYPE(BPF_MAP_TYPE_DEVMAP, dev_map_ops) |
45 | #ifdef CONFIG_STREAM_PARSER | 45 | #if defined(CONFIG_STREAM_PARSER) && defined(CONFIG_INET) |
46 | BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) | 46 | BPF_MAP_TYPE(BPF_MAP_TYPE_SOCKMAP, sock_map_ops) |
47 | #endif | 47 | #endif |
48 | BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) | 48 | BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops) |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 1632bb13ad8a..6b66cd1aa0b9 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
@@ -76,6 +76,14 @@ struct bpf_reg_state { | |||
76 | s64 smax_value; /* maximum possible (s64)value */ | 76 | s64 smax_value; /* maximum possible (s64)value */ |
77 | u64 umin_value; /* minimum possible (u64)value */ | 77 | u64 umin_value; /* minimum possible (u64)value */ |
78 | u64 umax_value; /* maximum possible (u64)value */ | 78 | u64 umax_value; /* maximum possible (u64)value */ |
79 | /* Inside the callee two registers can be both PTR_TO_STACK like | ||
80 | * R1=fp-8 and R2=fp-8, but one of them points to this function stack | ||
81 | * while another to the caller's stack. To differentiate them 'frameno' | ||
82 | * is used which is an index in bpf_verifier_state->frame[] array | ||
83 | * pointing to bpf_func_state. | ||
84 | * This field must be second to last, for states_equal() reasons. | ||
85 | */ | ||
86 | u32 frameno; | ||
79 | /* This field must be last, for states_equal() reasons. */ | 87 | /* This field must be last, for states_equal() reasons. */ |
80 | enum bpf_reg_liveness live; | 88 | enum bpf_reg_liveness live; |
81 | }; | 89 | }; |
@@ -83,7 +91,8 @@ struct bpf_reg_state { | |||
83 | enum bpf_stack_slot_type { | 91 | enum bpf_stack_slot_type { |
84 | STACK_INVALID, /* nothing was stored in this stack slot */ | 92 | STACK_INVALID, /* nothing was stored in this stack slot */ |
85 | STACK_SPILL, /* register spilled into stack */ | 93 | STACK_SPILL, /* register spilled into stack */ |
86 | STACK_MISC /* BPF program wrote some data into this slot */ | 94 | STACK_MISC, /* BPF program wrote some data into this slot */ |
95 | STACK_ZERO, /* BPF program wrote constant zero */ | ||
87 | }; | 96 | }; |
88 | 97 | ||
89 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | 98 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ |
@@ -96,13 +105,34 @@ struct bpf_stack_state { | |||
96 | /* state of the program: | 105 | /* state of the program: |
97 | * type of all registers and stack info | 106 | * type of all registers and stack info |
98 | */ | 107 | */ |
99 | struct bpf_verifier_state { | 108 | struct bpf_func_state { |
100 | struct bpf_reg_state regs[MAX_BPF_REG]; | 109 | struct bpf_reg_state regs[MAX_BPF_REG]; |
101 | struct bpf_verifier_state *parent; | 110 | struct bpf_verifier_state *parent; |
111 | /* index of call instruction that called into this func */ | ||
112 | int callsite; | ||
113 | /* stack frame number of this function state from pov of | ||
114 | * enclosing bpf_verifier_state. | ||
115 | * 0 = main function, 1 = first callee. | ||
116 | */ | ||
117 | u32 frameno; | ||
118 | /* subprog number == index within subprog_stack_depth | ||
119 | * zero == main subprog | ||
120 | */ | ||
121 | u32 subprogno; | ||
122 | |||
123 | /* should be second to last. See copy_func_state() */ | ||
102 | int allocated_stack; | 124 | int allocated_stack; |
103 | struct bpf_stack_state *stack; | 125 | struct bpf_stack_state *stack; |
104 | }; | 126 | }; |
105 | 127 | ||
128 | #define MAX_CALL_FRAMES 8 | ||
129 | struct bpf_verifier_state { | ||
130 | /* call stack tracking */ | ||
131 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | ||
132 | struct bpf_verifier_state *parent; | ||
133 | u32 curframe; | ||
134 | }; | ||
135 | |||
106 | /* linked list of verifier states used to prune search */ | 136 | /* linked list of verifier states used to prune search */ |
107 | struct bpf_verifier_state_list { | 137 | struct bpf_verifier_state_list { |
108 | struct bpf_verifier_state state; | 138 | struct bpf_verifier_state state; |
@@ -113,6 +143,7 @@ struct bpf_insn_aux_data { | |||
113 | union { | 143 | union { |
114 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | 144 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ |
115 | struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ | 145 | struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */ |
146 | s32 call_imm; /* saved imm field of call insn */ | ||
116 | }; | 147 | }; |
117 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ | 148 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
118 | bool seen; /* this insn was processed by the verifier */ | 149 | bool seen; /* this insn was processed by the verifier */ |
@@ -135,11 +166,7 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifer_log *log) | |||
135 | return log->len_used >= log->len_total - 1; | 166 | return log->len_used >= log->len_total - 1; |
136 | } | 167 | } |
137 | 168 | ||
138 | struct bpf_verifier_env; | 169 | #define BPF_MAX_SUBPROGS 256 |
139 | struct bpf_ext_analyzer_ops { | ||
140 | int (*insn_hook)(struct bpf_verifier_env *env, | ||
141 | int insn_idx, int prev_insn_idx); | ||
142 | }; | ||
143 | 170 | ||
144 | /* single container for all structs | 171 | /* single container for all structs |
145 | * one verifier_env per bpf_check() call | 172 | * one verifier_env per bpf_check() call |
@@ -152,29 +179,31 @@ struct bpf_verifier_env { | |||
152 | bool strict_alignment; /* perform strict pointer alignment checks */ | 179 | bool strict_alignment; /* perform strict pointer alignment checks */ |
153 | struct bpf_verifier_state *cur_state; /* current verifier state */ | 180 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
154 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ | 181 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
155 | const struct bpf_ext_analyzer_ops *dev_ops; /* device analyzer ops */ | ||
156 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ | 182 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
157 | u32 used_map_cnt; /* number of used maps */ | 183 | u32 used_map_cnt; /* number of used maps */ |
158 | u32 id_gen; /* used to generate unique reg IDs */ | 184 | u32 id_gen; /* used to generate unique reg IDs */ |
159 | bool allow_ptr_leaks; | 185 | bool allow_ptr_leaks; |
160 | bool seen_direct_write; | 186 | bool seen_direct_write; |
161 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | 187 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ |
162 | |||
163 | struct bpf_verifer_log log; | 188 | struct bpf_verifer_log log; |
189 | u32 subprog_starts[BPF_MAX_SUBPROGS]; | ||
190 | /* computes the stack depth of each bpf function */ | ||
191 | u16 subprog_stack_depth[BPF_MAX_SUBPROGS + 1]; | ||
192 | u32 subprog_cnt; | ||
164 | }; | 193 | }; |
165 | 194 | ||
195 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, | ||
196 | const char *fmt, ...); | ||
197 | |||
166 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) | 198 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) |
167 | { | 199 | { |
168 | return env->cur_state->regs; | 200 | struct bpf_verifier_state *cur = env->cur_state; |
201 | |||
202 | return cur->frame[cur->curframe]->regs; | ||
169 | } | 203 | } |
170 | 204 | ||
171 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) | ||
172 | int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); | 205 | int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env); |
173 | #else | 206 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
174 | static inline int bpf_prog_offload_verifier_prep(struct bpf_verifier_env *env) | 207 | int insn_idx, int prev_insn_idx); |
175 | { | ||
176 | return -EOPNOTSUPP; | ||
177 | } | ||
178 | #endif | ||
179 | 208 | ||
180 | #endif /* _LINUX_BPF_VERIFIER_H */ | 209 | #endif /* _LINUX_BPF_VERIFIER_H */ |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 8ff86b4c1b8a..d3339dd48b1a 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #define PHY_ID_BCM5241 0x0143bc30 | 14 | #define PHY_ID_BCM5241 0x0143bc30 |
15 | #define PHY_ID_BCMAC131 0x0143bc70 | 15 | #define PHY_ID_BCMAC131 0x0143bc70 |
16 | #define PHY_ID_BCM5481 0x0143bca0 | 16 | #define PHY_ID_BCM5481 0x0143bca0 |
17 | #define PHY_ID_BCM5395 0x0143bcf0 | ||
17 | #define PHY_ID_BCM54810 0x03625d00 | 18 | #define PHY_ID_BCM54810 0x03625d00 |
18 | #define PHY_ID_BCM5482 0x0143bcb0 | 19 | #define PHY_ID_BCM5482 0x0143bcb0 |
19 | #define PHY_ID_BCM5411 0x00206070 | 20 | #define PHY_ID_BCM5411 0x00206070 |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index 8b1bf8d3d4a2..894e5d125de6 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -81,11 +81,14 @@ struct buffer_head { | |||
81 | /* | 81 | /* |
82 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() | 82 | * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() |
83 | * and buffer_foo() functions. | 83 | * and buffer_foo() functions. |
84 | * To avoid reset buffer flags that are already set, because that causes | ||
85 | * a costly cache line transition, check the flag first. | ||
84 | */ | 86 | */ |
85 | #define BUFFER_FNS(bit, name) \ | 87 | #define BUFFER_FNS(bit, name) \ |
86 | static __always_inline void set_buffer_##name(struct buffer_head *bh) \ | 88 | static __always_inline void set_buffer_##name(struct buffer_head *bh) \ |
87 | { \ | 89 | { \ |
88 | set_bit(BH_##bit, &(bh)->b_state); \ | 90 | if (!test_bit(BH_##bit, &(bh)->b_state)) \ |
91 | set_bit(BH_##bit, &(bh)->b_state); \ | ||
89 | } \ | 92 | } \ |
90 | static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ | 93 | static __always_inline void clear_buffer_##name(struct buffer_head *bh) \ |
91 | { \ | 94 | { \ |
@@ -151,7 +154,6 @@ void buffer_check_dirty_writeback(struct page *page, | |||
151 | 154 | ||
152 | void mark_buffer_dirty(struct buffer_head *bh); | 155 | void mark_buffer_dirty(struct buffer_head *bh); |
153 | void mark_buffer_write_io_error(struct buffer_head *bh); | 156 | void mark_buffer_write_io_error(struct buffer_head *bh); |
154 | void init_buffer(struct buffer_head *, bh_end_io_t *, void *); | ||
155 | void touch_buffer(struct buffer_head *bh); | 157 | void touch_buffer(struct buffer_head *bh); |
156 | void set_bh_page(struct buffer_head *bh, | 158 | void set_bh_page(struct buffer_head *bh, |
157 | struct page *page, unsigned long offset); | 159 | struct page *page, unsigned long offset); |
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index 3efed0d742a0..43d1fd50d433 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h | |||
@@ -8,7 +8,6 @@ | |||
8 | #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) | 8 | #define __BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) |
9 | #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) | 9 | #define BUILD_BUG_ON_NOT_POWER_OF_2(n) (0) |
10 | #define BUILD_BUG_ON_ZERO(e) (0) | 10 | #define BUILD_BUG_ON_ZERO(e) (0) |
11 | #define BUILD_BUG_ON_NULL(e) ((void *)0) | ||
12 | #define BUILD_BUG_ON_INVALID(e) (0) | 11 | #define BUILD_BUG_ON_INVALID(e) (0) |
13 | #define BUILD_BUG_ON_MSG(cond, msg) (0) | 12 | #define BUILD_BUG_ON_MSG(cond, msg) (0) |
14 | #define BUILD_BUG_ON(condition) (0) | 13 | #define BUILD_BUG_ON(condition) (0) |
@@ -28,7 +27,6 @@ | |||
28 | * aren't permitted). | 27 | * aren't permitted). |
29 | */ | 28 | */ |
30 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) | 29 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); })) |
31 | #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:(-!!(e)); })) | ||
32 | 30 | ||
33 | /* | 31 | /* |
34 | * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the | 32 | * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the |
diff --git a/include/linux/bvec.h b/include/linux/bvec.h index ec8a4d7af6bd..fe7a22dd133b 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h | |||
@@ -125,4 +125,13 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv, | |||
125 | ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ | 125 | ((bvl = bvec_iter_bvec((bio_vec), (iter))), 1); \ |
126 | bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) | 126 | bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len)) |
127 | 127 | ||
128 | /* for iterating one bio from start to end */ | ||
129 | #define BVEC_ITER_ALL_INIT (struct bvec_iter) \ | ||
130 | { \ | ||
131 | .bi_sector = 0, \ | ||
132 | .bi_size = UINT_MAX, \ | ||
133 | .bi_idx = 0, \ | ||
134 | .bi_bvec_done = 0, \ | ||
135 | } | ||
136 | |||
128 | #endif /* __LINUX_BVEC_ITER_H */ | 137 | #endif /* __LINUX_BVEC_ITER_H */ |
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h index 61f1cf2d9f44..055aaf5ed9af 100644 --- a/include/linux/can/dev.h +++ b/include/linux/can/dev.h | |||
@@ -46,6 +46,7 @@ struct can_priv { | |||
46 | unsigned int bitrate_const_cnt; | 46 | unsigned int bitrate_const_cnt; |
47 | const u32 *data_bitrate_const; | 47 | const u32 *data_bitrate_const; |
48 | unsigned int data_bitrate_const_cnt; | 48 | unsigned int data_bitrate_const_cnt; |
49 | u32 bitrate_max; | ||
49 | struct can_clock clock; | 50 | struct can_clock clock; |
50 | 51 | ||
51 | enum can_state state; | 52 | enum can_state state; |
@@ -166,6 +167,12 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, | |||
166 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); | 167 | unsigned int can_get_echo_skb(struct net_device *dev, unsigned int idx); |
167 | void can_free_echo_skb(struct net_device *dev, unsigned int idx); | 168 | void can_free_echo_skb(struct net_device *dev, unsigned int idx); |
168 | 169 | ||
170 | #ifdef CONFIG_OF | ||
171 | void of_can_transceiver(struct net_device *dev); | ||
172 | #else | ||
173 | static inline void of_can_transceiver(struct net_device *dev) { } | ||
174 | #endif | ||
175 | |||
169 | struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); | 176 | struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf); |
170 | struct sk_buff *alloc_canfd_skb(struct net_device *dev, | 177 | struct sk_buff *alloc_canfd_skb(struct net_device *dev, |
171 | struct canfd_frame **cfd); | 178 | struct canfd_frame **cfd); |
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8b7fd8eeccee..9f242b876fde 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
@@ -561,7 +561,7 @@ struct cftype { | |||
561 | 561 | ||
562 | /* | 562 | /* |
563 | * Control Group subsystem type. | 563 | * Control Group subsystem type. |
564 | * See Documentation/cgroups/cgroups.txt for details | 564 | * See Documentation/cgroup-v1/cgroups.txt for details |
565 | */ | 565 | */ |
566 | struct cgroup_subsys { | 566 | struct cgroup_subsys { |
567 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); | 567 | struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); |
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 7c925e6211f1..f711be6e8c44 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
@@ -20,6 +20,8 @@ | |||
20 | * flags used across common struct clk. these flags should only affect the | 20 | * flags used across common struct clk. these flags should only affect the |
21 | * top-level framework. custom flags for dealing with hardware specifics | 21 | * top-level framework. custom flags for dealing with hardware specifics |
22 | * belong in struct clk_foo | 22 | * belong in struct clk_foo |
23 | * | ||
24 | * Please update clk_flags[] in drivers/clk/clk.c when making changes here! | ||
23 | */ | 25 | */ |
24 | #define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ | 26 | #define CLK_SET_RATE_GATE BIT(0) /* must be gated across rate change */ |
25 | #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ | 27 | #define CLK_SET_PARENT_GATE BIT(1) /* must be gated across re-parent */ |
@@ -412,7 +414,7 @@ extern const struct clk_ops clk_divider_ro_ops; | |||
412 | 414 | ||
413 | unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, | 415 | unsigned long divider_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, |
414 | unsigned int val, const struct clk_div_table *table, | 416 | unsigned int val, const struct clk_div_table *table, |
415 | unsigned long flags); | 417 | unsigned long flags, unsigned long width); |
416 | long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, | 418 | long divider_round_rate_parent(struct clk_hw *hw, struct clk_hw *parent, |
417 | unsigned long rate, unsigned long *prate, | 419 | unsigned long rate, unsigned long *prate, |
418 | const struct clk_div_table *table, | 420 | const struct clk_div_table *table, |
@@ -744,6 +746,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw); | |||
744 | unsigned long __clk_get_flags(struct clk *clk); | 746 | unsigned long __clk_get_flags(struct clk *clk); |
745 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); | 747 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); |
746 | bool clk_hw_is_prepared(const struct clk_hw *hw); | 748 | bool clk_hw_is_prepared(const struct clk_hw *hw); |
749 | bool clk_hw_rate_is_protected(const struct clk_hw *hw); | ||
747 | bool clk_hw_is_enabled(const struct clk_hw *hw); | 750 | bool clk_hw_is_enabled(const struct clk_hw *hw); |
748 | bool __clk_is_enabled(struct clk *clk); | 751 | bool __clk_is_enabled(struct clk *clk); |
749 | struct clk *__clk_lookup(const char *name); | 752 | struct clk *__clk_lookup(const char *name); |
@@ -806,6 +809,44 @@ extern struct of_device_id __clk_of_table; | |||
806 | } \ | 809 | } \ |
807 | OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) | 810 | OF_DECLARE_1(clk, name, compat, name##_of_clk_init_driver) |
808 | 811 | ||
812 | #define CLK_HW_INIT(_name, _parent, _ops, _flags) \ | ||
813 | (&(struct clk_init_data) { \ | ||
814 | .flags = _flags, \ | ||
815 | .name = _name, \ | ||
816 | .parent_names = (const char *[]) { _parent }, \ | ||
817 | .num_parents = 1, \ | ||
818 | .ops = _ops, \ | ||
819 | }) | ||
820 | |||
821 | #define CLK_HW_INIT_PARENTS(_name, _parents, _ops, _flags) \ | ||
822 | (&(struct clk_init_data) { \ | ||
823 | .flags = _flags, \ | ||
824 | .name = _name, \ | ||
825 | .parent_names = _parents, \ | ||
826 | .num_parents = ARRAY_SIZE(_parents), \ | ||
827 | .ops = _ops, \ | ||
828 | }) | ||
829 | |||
830 | #define CLK_HW_INIT_NO_PARENT(_name, _ops, _flags) \ | ||
831 | (&(struct clk_init_data) { \ | ||
832 | .flags = _flags, \ | ||
833 | .name = _name, \ | ||
834 | .parent_names = NULL, \ | ||
835 | .num_parents = 0, \ | ||
836 | .ops = _ops, \ | ||
837 | }) | ||
838 | |||
839 | #define CLK_FIXED_FACTOR(_struct, _name, _parent, \ | ||
840 | _div, _mult, _flags) \ | ||
841 | struct clk_fixed_factor _struct = { \ | ||
842 | .div = _div, \ | ||
843 | .mult = _mult, \ | ||
844 | .hw.init = CLK_HW_INIT(_name, \ | ||
845 | _parent, \ | ||
846 | &clk_fixed_factor_ops, \ | ||
847 | _flags), \ | ||
848 | } | ||
849 | |||
809 | #ifdef CONFIG_OF | 850 | #ifdef CONFIG_OF |
810 | int of_clk_add_provider(struct device_node *np, | 851 | int of_clk_add_provider(struct device_node *np, |
811 | struct clk *(*clk_src_get)(struct of_phandle_args *args, | 852 | struct clk *(*clk_src_get)(struct of_phandle_args *args, |
diff --git a/include/linux/clk.h b/include/linux/clk.h index 12c96d94d1fa..4c4ef9f34db3 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
@@ -331,6 +331,38 @@ struct clk *devm_clk_get(struct device *dev, const char *id); | |||
331 | */ | 331 | */ |
332 | struct clk *devm_get_clk_from_child(struct device *dev, | 332 | struct clk *devm_get_clk_from_child(struct device *dev, |
333 | struct device_node *np, const char *con_id); | 333 | struct device_node *np, const char *con_id); |
334 | /** | ||
335 | * clk_rate_exclusive_get - get exclusivity over the rate control of a | ||
336 | * producer | ||
337 | * @clk: clock source | ||
338 | * | ||
339 | * This function allows drivers to get exclusive control over the rate of a | ||
340 | * provider. It prevents any other consumer to execute, even indirectly, | ||
341 | * opereation which could alter the rate of the provider or cause glitches | ||
342 | * | ||
343 | * If exlusivity is claimed more than once on clock, even by the same driver, | ||
344 | * the rate effectively gets locked as exclusivity can't be preempted. | ||
345 | * | ||
346 | * Must not be called from within atomic context. | ||
347 | * | ||
348 | * Returns success (0) or negative errno. | ||
349 | */ | ||
350 | int clk_rate_exclusive_get(struct clk *clk); | ||
351 | |||
352 | /** | ||
353 | * clk_rate_exclusive_put - release exclusivity over the rate control of a | ||
354 | * producer | ||
355 | * @clk: clock source | ||
356 | * | ||
357 | * This function allows drivers to release the exclusivity it previously got | ||
358 | * from clk_rate_exclusive_get() | ||
359 | * | ||
360 | * The caller must balance the number of clk_rate_exclusive_get() and | ||
361 | * clk_rate_exclusive_put() calls. | ||
362 | * | ||
363 | * Must not be called from within atomic context. | ||
364 | */ | ||
365 | void clk_rate_exclusive_put(struct clk *clk); | ||
334 | 366 | ||
335 | /** | 367 | /** |
336 | * clk_enable - inform the system when the clock source should be running. | 368 | * clk_enable - inform the system when the clock source should be running. |
@@ -473,6 +505,23 @@ long clk_round_rate(struct clk *clk, unsigned long rate); | |||
473 | int clk_set_rate(struct clk *clk, unsigned long rate); | 505 | int clk_set_rate(struct clk *clk, unsigned long rate); |
474 | 506 | ||
475 | /** | 507 | /** |
508 | * clk_set_rate_exclusive- set the clock rate and claim exclusivity over | ||
509 | * clock source | ||
510 | * @clk: clock source | ||
511 | * @rate: desired clock rate in Hz | ||
512 | * | ||
513 | * This helper function allows drivers to atomically set the rate of a producer | ||
514 | * and claim exclusivity over the rate control of the producer. | ||
515 | * | ||
516 | * It is essentially a combination of clk_set_rate() and | ||
517 | * clk_rate_exclusite_get(). Caller must balance this call with a call to | ||
518 | * clk_rate_exclusive_put() | ||
519 | * | ||
520 | * Returns success (0) or negative errno. | ||
521 | */ | ||
522 | int clk_set_rate_exclusive(struct clk *clk, unsigned long rate); | ||
523 | |||
524 | /** | ||
476 | * clk_has_parent - check if a clock is a possible parent for another | 525 | * clk_has_parent - check if a clock is a possible parent for another |
477 | * @clk: clock source | 526 | * @clk: clock source |
478 | * @parent: parent clock source | 527 | * @parent: parent clock source |
@@ -583,6 +632,14 @@ static inline void clk_bulk_put(int num_clks, struct clk_bulk_data *clks) {} | |||
583 | 632 | ||
584 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} | 633 | static inline void devm_clk_put(struct device *dev, struct clk *clk) {} |
585 | 634 | ||
635 | |||
636 | static inline int clk_rate_exclusive_get(struct clk *clk) | ||
637 | { | ||
638 | return 0; | ||
639 | } | ||
640 | |||
641 | static inline void clk_rate_exclusive_put(struct clk *clk) {} | ||
642 | |||
586 | static inline int clk_enable(struct clk *clk) | 643 | static inline int clk_enable(struct clk *clk) |
587 | { | 644 | { |
588 | return 0; | 645 | return 0; |
@@ -609,6 +666,11 @@ static inline int clk_set_rate(struct clk *clk, unsigned long rate) | |||
609 | return 0; | 666 | return 0; |
610 | } | 667 | } |
611 | 668 | ||
669 | static inline int clk_set_rate_exclusive(struct clk *clk, unsigned long rate) | ||
670 | { | ||
671 | return 0; | ||
672 | } | ||
673 | |||
612 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) | 674 | static inline long clk_round_rate(struct clk *clk, unsigned long rate) |
613 | { | 675 | { |
614 | return 0; | 676 | return 0; |
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 2eabc862abdb..4890ff033220 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #ifndef __CLKDEV_H | 12 | #ifndef __CLKDEV_H |
13 | #define __CLKDEV_H | 13 | #define __CLKDEV_H |
14 | 14 | ||
15 | #include <asm/clkdev.h> | 15 | #include <linux/slab.h> |
16 | 16 | ||
17 | struct clk; | 17 | struct clk; |
18 | struct clk_hw; | 18 | struct clk_hw; |
@@ -52,9 +52,4 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); | |||
52 | int clk_register_clkdev(struct clk *, const char *, const char *); | 52 | int clk_register_clkdev(struct clk *, const char *, const char *); |
53 | int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); | 53 | int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); |
54 | 54 | ||
55 | #ifdef CONFIG_COMMON_CLK | ||
56 | int __clk_get(struct clk *clk); | ||
57 | void __clk_put(struct clk *clk); | ||
58 | #endif | ||
59 | |||
60 | #endif | 55 | #endif |
diff --git a/include/linux/compat.h b/include/linux/compat.h index 0fc36406f32c..8a9643857c4a 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
@@ -157,6 +157,104 @@ struct compat_sigaction { | |||
157 | compat_sigset_t sa_mask __packed; | 157 | compat_sigset_t sa_mask __packed; |
158 | }; | 158 | }; |
159 | 159 | ||
160 | typedef union compat_sigval { | ||
161 | compat_int_t sival_int; | ||
162 | compat_uptr_t sival_ptr; | ||
163 | } compat_sigval_t; | ||
164 | |||
165 | typedef struct compat_siginfo { | ||
166 | int si_signo; | ||
167 | #ifndef __ARCH_HAS_SWAPPED_SIGINFO | ||
168 | int si_errno; | ||
169 | int si_code; | ||
170 | #else | ||
171 | int si_code; | ||
172 | int si_errno; | ||
173 | #endif | ||
174 | |||
175 | union { | ||
176 | int _pad[128/sizeof(int) - 3]; | ||
177 | |||
178 | /* kill() */ | ||
179 | struct { | ||
180 | compat_pid_t _pid; /* sender's pid */ | ||
181 | __compat_uid32_t _uid; /* sender's uid */ | ||
182 | } _kill; | ||
183 | |||
184 | /* POSIX.1b timers */ | ||
185 | struct { | ||
186 | compat_timer_t _tid; /* timer id */ | ||
187 | int _overrun; /* overrun count */ | ||
188 | compat_sigval_t _sigval; /* same as below */ | ||
189 | } _timer; | ||
190 | |||
191 | /* POSIX.1b signals */ | ||
192 | struct { | ||
193 | compat_pid_t _pid; /* sender's pid */ | ||
194 | __compat_uid32_t _uid; /* sender's uid */ | ||
195 | compat_sigval_t _sigval; | ||
196 | } _rt; | ||
197 | |||
198 | /* SIGCHLD */ | ||
199 | struct { | ||
200 | compat_pid_t _pid; /* which child */ | ||
201 | __compat_uid32_t _uid; /* sender's uid */ | ||
202 | int _status; /* exit code */ | ||
203 | compat_clock_t _utime; | ||
204 | compat_clock_t _stime; | ||
205 | } _sigchld; | ||
206 | |||
207 | #ifdef CONFIG_X86_X32_ABI | ||
208 | /* SIGCHLD (x32 version) */ | ||
209 | struct { | ||
210 | compat_pid_t _pid; /* which child */ | ||
211 | __compat_uid32_t _uid; /* sender's uid */ | ||
212 | int _status; /* exit code */ | ||
213 | compat_s64 _utime; | ||
214 | compat_s64 _stime; | ||
215 | } _sigchld_x32; | ||
216 | #endif | ||
217 | |||
218 | /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGTRAP, SIGEMT */ | ||
219 | struct { | ||
220 | compat_uptr_t _addr; /* faulting insn/memory ref. */ | ||
221 | #ifdef __ARCH_SI_TRAPNO | ||
222 | int _trapno; /* TRAP # which caused the signal */ | ||
223 | #endif | ||
224 | union { | ||
225 | /* | ||
226 | * used when si_code=BUS_MCEERR_AR or | ||
227 | * used when si_code=BUS_MCEERR_AO | ||
228 | */ | ||
229 | short int _addr_lsb; /* Valid LSB of the reported address. */ | ||
230 | /* used when si_code=SEGV_BNDERR */ | ||
231 | struct { | ||
232 | short _dummy_bnd; | ||
233 | compat_uptr_t _lower; | ||
234 | compat_uptr_t _upper; | ||
235 | } _addr_bnd; | ||
236 | /* used when si_code=SEGV_PKUERR */ | ||
237 | struct { | ||
238 | short _dummy_pkey; | ||
239 | u32 _pkey; | ||
240 | } _addr_pkey; | ||
241 | }; | ||
242 | } _sigfault; | ||
243 | |||
244 | /* SIGPOLL */ | ||
245 | struct { | ||
246 | compat_long_t _band; /* POLL_IN, POLL_OUT, POLL_MSG */ | ||
247 | int _fd; | ||
248 | } _sigpoll; | ||
249 | |||
250 | struct { | ||
251 | compat_uptr_t _call_addr; /* calling user insn */ | ||
252 | int _syscall; /* triggering system call number */ | ||
253 | unsigned int _arch; /* AUDIT_ARCH_* of syscall */ | ||
254 | } _sigsys; | ||
255 | } _sifields; | ||
256 | } compat_siginfo_t; | ||
257 | |||
160 | /* | 258 | /* |
161 | * These functions operate on 32- or 64-bit specs depending on | 259 | * These functions operate on 32- or 64-bit specs depending on |
162 | * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. | 260 | * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments. |
@@ -412,7 +510,7 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask, | |||
412 | unsigned long bitmap_size); | 510 | unsigned long bitmap_size); |
413 | long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, | 511 | long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask, |
414 | unsigned long bitmap_size); | 512 | unsigned long bitmap_size); |
415 | int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from); | 513 | int copy_siginfo_from_user32(siginfo_t *to, const struct compat_siginfo __user *from); |
416 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); | 514 | int copy_siginfo_to_user32(struct compat_siginfo __user *to, const siginfo_t *from); |
417 | int get_compat_sigevent(struct sigevent *event, | 515 | int get_compat_sigevent(struct sigevent *event, |
418 | const struct compat_sigevent __user *u_event); | 516 | const struct compat_sigevent __user *u_event); |
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 3b609edffa8f..d02a4df3f473 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
@@ -19,3 +19,11 @@ | |||
19 | 19 | ||
20 | #define randomized_struct_fields_start struct { | 20 | #define randomized_struct_fields_start struct { |
21 | #define randomized_struct_fields_end }; | 21 | #define randomized_struct_fields_end }; |
22 | |||
23 | /* all clang versions usable with the kernel support KASAN ABI version 5 */ | ||
24 | #define KASAN_ABI_VERSION 5 | ||
25 | |||
26 | /* emulate gcc's __SANITIZE_ADDRESS__ flag */ | ||
27 | #if __has_feature(address_sanitizer) | ||
28 | #define __SANITIZE_ADDRESS__ | ||
29 | #endif | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 52e611ab9a6c..c2cc57a2f508 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
@@ -185,23 +185,21 @@ void __read_once_size(const volatile void *p, void *res, int size) | |||
185 | 185 | ||
186 | #ifdef CONFIG_KASAN | 186 | #ifdef CONFIG_KASAN |
187 | /* | 187 | /* |
188 | * This function is not 'inline' because __no_sanitize_address confilcts | 188 | * We can't declare function 'inline' because __no_sanitize_address confilcts |
189 | * with inlining. Attempt to inline it may cause a build failure. | 189 | * with inlining. Attempt to inline it may cause a build failure. |
190 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 | 190 | * https://gcc.gnu.org/bugzilla/show_bug.cgi?id=67368 |
191 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. | 191 | * '__maybe_unused' allows us to avoid defined-but-not-used warnings. |
192 | */ | 192 | */ |
193 | static __no_sanitize_address __maybe_unused | 193 | # define __no_kasan_or_inline __no_sanitize_address __maybe_unused |
194 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | ||
195 | { | ||
196 | __READ_ONCE_SIZE; | ||
197 | } | ||
198 | #else | 194 | #else |
199 | static __always_inline | 195 | # define __no_kasan_or_inline __always_inline |
196 | #endif | ||
197 | |||
198 | static __no_kasan_or_inline | ||
200 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) | 199 | void __read_once_size_nocheck(const volatile void *p, void *res, int size) |
201 | { | 200 | { |
202 | __READ_ONCE_SIZE; | 201 | __READ_ONCE_SIZE; |
203 | } | 202 | } |
204 | #endif | ||
205 | 203 | ||
206 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) | 204 | static __always_inline void __write_once_size(volatile void *p, void *res, int size) |
207 | { | 205 | { |
@@ -240,6 +238,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
240 | * required ordering. | 238 | * required ordering. |
241 | */ | 239 | */ |
242 | #include <asm/barrier.h> | 240 | #include <asm/barrier.h> |
241 | #include <linux/kasan-checks.h> | ||
243 | 242 | ||
244 | #define __READ_ONCE(x, check) \ | 243 | #define __READ_ONCE(x, check) \ |
245 | ({ \ | 244 | ({ \ |
@@ -259,6 +258,13 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s | |||
259 | */ | 258 | */ |
260 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) | 259 | #define READ_ONCE_NOCHECK(x) __READ_ONCE(x, 0) |
261 | 260 | ||
261 | static __no_kasan_or_inline | ||
262 | unsigned long read_word_at_a_time(const void *addr) | ||
263 | { | ||
264 | kasan_check_read(addr, 1); | ||
265 | return *(unsigned long *)addr; | ||
266 | } | ||
267 | |||
262 | #define WRITE_ONCE(x, val) \ | 268 | #define WRITE_ONCE(x, val) \ |
263 | ({ \ | 269 | ({ \ |
264 | union { typeof(x) __val; char __c[1]; } __u = \ | 270 | union { typeof(x) __val; char __c[1]; } __u = \ |
diff --git a/include/linux/cper.h b/include/linux/cper.h index 723e952fde0d..d14ef4e77c8a 100644 --- a/include/linux/cper.h +++ b/include/linux/cper.h | |||
@@ -275,6 +275,50 @@ enum { | |||
275 | #define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) | 275 | #define CPER_ARM_INFO_FLAGS_PROPAGATED BIT(2) |
276 | #define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) | 276 | #define CPER_ARM_INFO_FLAGS_OVERFLOW BIT(3) |
277 | 277 | ||
278 | #define CPER_ARM_CACHE_ERROR 0 | ||
279 | #define CPER_ARM_TLB_ERROR 1 | ||
280 | #define CPER_ARM_BUS_ERROR 2 | ||
281 | #define CPER_ARM_VENDOR_ERROR 3 | ||
282 | #define CPER_ARM_MAX_TYPE CPER_ARM_VENDOR_ERROR | ||
283 | |||
284 | #define CPER_ARM_ERR_VALID_TRANSACTION_TYPE BIT(0) | ||
285 | #define CPER_ARM_ERR_VALID_OPERATION_TYPE BIT(1) | ||
286 | #define CPER_ARM_ERR_VALID_LEVEL BIT(2) | ||
287 | #define CPER_ARM_ERR_VALID_PROC_CONTEXT_CORRUPT BIT(3) | ||
288 | #define CPER_ARM_ERR_VALID_CORRECTED BIT(4) | ||
289 | #define CPER_ARM_ERR_VALID_PRECISE_PC BIT(5) | ||
290 | #define CPER_ARM_ERR_VALID_RESTARTABLE_PC BIT(6) | ||
291 | #define CPER_ARM_ERR_VALID_PARTICIPATION_TYPE BIT(7) | ||
292 | #define CPER_ARM_ERR_VALID_TIME_OUT BIT(8) | ||
293 | #define CPER_ARM_ERR_VALID_ADDRESS_SPACE BIT(9) | ||
294 | #define CPER_ARM_ERR_VALID_MEM_ATTRIBUTES BIT(10) | ||
295 | #define CPER_ARM_ERR_VALID_ACCESS_MODE BIT(11) | ||
296 | |||
297 | #define CPER_ARM_ERR_TRANSACTION_SHIFT 16 | ||
298 | #define CPER_ARM_ERR_TRANSACTION_MASK GENMASK(1,0) | ||
299 | #define CPER_ARM_ERR_OPERATION_SHIFT 18 | ||
300 | #define CPER_ARM_ERR_OPERATION_MASK GENMASK(3,0) | ||
301 | #define CPER_ARM_ERR_LEVEL_SHIFT 22 | ||
302 | #define CPER_ARM_ERR_LEVEL_MASK GENMASK(2,0) | ||
303 | #define CPER_ARM_ERR_PC_CORRUPT_SHIFT 25 | ||
304 | #define CPER_ARM_ERR_PC_CORRUPT_MASK GENMASK(0,0) | ||
305 | #define CPER_ARM_ERR_CORRECTED_SHIFT 26 | ||
306 | #define CPER_ARM_ERR_CORRECTED_MASK GENMASK(0,0) | ||
307 | #define CPER_ARM_ERR_PRECISE_PC_SHIFT 27 | ||
308 | #define CPER_ARM_ERR_PRECISE_PC_MASK GENMASK(0,0) | ||
309 | #define CPER_ARM_ERR_RESTARTABLE_PC_SHIFT 28 | ||
310 | #define CPER_ARM_ERR_RESTARTABLE_PC_MASK GENMASK(0,0) | ||
311 | #define CPER_ARM_ERR_PARTICIPATION_TYPE_SHIFT 29 | ||
312 | #define CPER_ARM_ERR_PARTICIPATION_TYPE_MASK GENMASK(1,0) | ||
313 | #define CPER_ARM_ERR_TIME_OUT_SHIFT 31 | ||
314 | #define CPER_ARM_ERR_TIME_OUT_MASK GENMASK(0,0) | ||
315 | #define CPER_ARM_ERR_ADDRESS_SPACE_SHIFT 32 | ||
316 | #define CPER_ARM_ERR_ADDRESS_SPACE_MASK GENMASK(1,0) | ||
317 | #define CPER_ARM_ERR_MEM_ATTRIBUTES_SHIFT 34 | ||
318 | #define CPER_ARM_ERR_MEM_ATTRIBUTES_MASK GENMASK(8,0) | ||
319 | #define CPER_ARM_ERR_ACCESS_MODE_SHIFT 43 | ||
320 | #define CPER_ARM_ERR_ACCESS_MODE_MASK GENMASK(0,0) | ||
321 | |||
278 | /* | 322 | /* |
279 | * All tables and structs must be byte-packed to match CPER | 323 | * All tables and structs must be byte-packed to match CPER |
280 | * specification, since the tables are provided by the system BIOS | 324 | * specification, since the tables are provided by the system BIOS |
@@ -494,6 +538,8 @@ struct cper_sec_pcie { | |||
494 | /* Reset to default packing */ | 538 | /* Reset to default packing */ |
495 | #pragma pack() | 539 | #pragma pack() |
496 | 540 | ||
541 | extern const char * const cper_proc_error_type_strs[4]; | ||
542 | |||
497 | u64 cper_next_record_id(void); | 543 | u64 cper_next_record_id(void); |
498 | const char *cper_severity_str(unsigned int); | 544 | const char *cper_severity_str(unsigned int); |
499 | const char *cper_mem_err_type_str(unsigned int); | 545 | const char *cper_mem_err_type_str(unsigned int); |
@@ -503,5 +549,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *, | |||
503 | struct cper_mem_err_compact *); | 549 | struct cper_mem_err_compact *); |
504 | const char *cper_mem_err_unpack(struct trace_seq *, | 550 | const char *cper_mem_err_unpack(struct trace_seq *, |
505 | struct cper_mem_err_compact *); | 551 | struct cper_mem_err_compact *); |
552 | void cper_print_proc_arm(const char *pfx, | ||
553 | const struct cper_sec_proc_arm *proc); | ||
506 | 554 | ||
507 | #endif | 555 | #endif |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 065f3a8eb486..21e8d248d956 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -629,6 +629,18 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, | |||
629 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) | 629 | for (pos = table; pos->frequency != CPUFREQ_TABLE_END; pos++) |
630 | 630 | ||
631 | /* | 631 | /* |
632 | * cpufreq_for_each_entry_idx - iterate over a cpufreq_frequency_table | ||
633 | * with index | ||
634 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | ||
635 | * @table: the cpufreq_frequency_table * to iterate over. | ||
636 | * @idx: the table entry currently being processed | ||
637 | */ | ||
638 | |||
639 | #define cpufreq_for_each_entry_idx(pos, table, idx) \ | ||
640 | for (pos = table, idx = 0; pos->frequency != CPUFREQ_TABLE_END; \ | ||
641 | pos++, idx++) | ||
642 | |||
643 | /* | ||
632 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table | 644 | * cpufreq_for_each_valid_entry - iterate over a cpufreq_frequency_table |
633 | * excluding CPUFREQ_ENTRY_INVALID frequencies. | 645 | * excluding CPUFREQ_ENTRY_INVALID frequencies. |
634 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | 646 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. |
@@ -641,6 +653,21 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev, | |||
641 | continue; \ | 653 | continue; \ |
642 | else | 654 | else |
643 | 655 | ||
656 | /* | ||
657 | * cpufreq_for_each_valid_entry_idx - iterate with index over a cpufreq | ||
658 | * frequency_table excluding CPUFREQ_ENTRY_INVALID frequencies. | ||
659 | * @pos: the cpufreq_frequency_table * to use as a loop cursor. | ||
660 | * @table: the cpufreq_frequency_table * to iterate over. | ||
661 | * @idx: the table entry currently being processed | ||
662 | */ | ||
663 | |||
664 | #define cpufreq_for_each_valid_entry_idx(pos, table, idx) \ | ||
665 | cpufreq_for_each_entry_idx(pos, table, idx) \ | ||
666 | if (pos->frequency == CPUFREQ_ENTRY_INVALID) \ | ||
667 | continue; \ | ||
668 | else | ||
669 | |||
670 | |||
644 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, | 671 | int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, |
645 | struct cpufreq_frequency_table *table); | 672 | struct cpufreq_frequency_table *table); |
646 | 673 | ||
@@ -667,19 +694,20 @@ static inline int cpufreq_table_find_index_al(struct cpufreq_policy *policy, | |||
667 | unsigned int target_freq) | 694 | unsigned int target_freq) |
668 | { | 695 | { |
669 | struct cpufreq_frequency_table *table = policy->freq_table; | 696 | struct cpufreq_frequency_table *table = policy->freq_table; |
670 | struct cpufreq_frequency_table *pos, *best = table - 1; | 697 | struct cpufreq_frequency_table *pos; |
671 | unsigned int freq; | 698 | unsigned int freq; |
699 | int idx, best = -1; | ||
672 | 700 | ||
673 | cpufreq_for_each_valid_entry(pos, table) { | 701 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
674 | freq = pos->frequency; | 702 | freq = pos->frequency; |
675 | 703 | ||
676 | if (freq >= target_freq) | 704 | if (freq >= target_freq) |
677 | return pos - table; | 705 | return idx; |
678 | 706 | ||
679 | best = pos; | 707 | best = idx; |
680 | } | 708 | } |
681 | 709 | ||
682 | return best - table; | 710 | return best; |
683 | } | 711 | } |
684 | 712 | ||
685 | /* Find lowest freq at or above target in a table in descending order */ | 713 | /* Find lowest freq at or above target in a table in descending order */ |
@@ -687,28 +715,29 @@ static inline int cpufreq_table_find_index_dl(struct cpufreq_policy *policy, | |||
687 | unsigned int target_freq) | 715 | unsigned int target_freq) |
688 | { | 716 | { |
689 | struct cpufreq_frequency_table *table = policy->freq_table; | 717 | struct cpufreq_frequency_table *table = policy->freq_table; |
690 | struct cpufreq_frequency_table *pos, *best = table - 1; | 718 | struct cpufreq_frequency_table *pos; |
691 | unsigned int freq; | 719 | unsigned int freq; |
720 | int idx, best = -1; | ||
692 | 721 | ||
693 | cpufreq_for_each_valid_entry(pos, table) { | 722 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
694 | freq = pos->frequency; | 723 | freq = pos->frequency; |
695 | 724 | ||
696 | if (freq == target_freq) | 725 | if (freq == target_freq) |
697 | return pos - table; | 726 | return idx; |
698 | 727 | ||
699 | if (freq > target_freq) { | 728 | if (freq > target_freq) { |
700 | best = pos; | 729 | best = idx; |
701 | continue; | 730 | continue; |
702 | } | 731 | } |
703 | 732 | ||
704 | /* No freq found above target_freq */ | 733 | /* No freq found above target_freq */ |
705 | if (best == table - 1) | 734 | if (best == -1) |
706 | return pos - table; | 735 | return idx; |
707 | 736 | ||
708 | return best - table; | 737 | return best; |
709 | } | 738 | } |
710 | 739 | ||
711 | return best - table; | 740 | return best; |
712 | } | 741 | } |
713 | 742 | ||
714 | /* Works only on sorted freq-tables */ | 743 | /* Works only on sorted freq-tables */ |
@@ -728,28 +757,29 @@ static inline int cpufreq_table_find_index_ah(struct cpufreq_policy *policy, | |||
728 | unsigned int target_freq) | 757 | unsigned int target_freq) |
729 | { | 758 | { |
730 | struct cpufreq_frequency_table *table = policy->freq_table; | 759 | struct cpufreq_frequency_table *table = policy->freq_table; |
731 | struct cpufreq_frequency_table *pos, *best = table - 1; | 760 | struct cpufreq_frequency_table *pos; |
732 | unsigned int freq; | 761 | unsigned int freq; |
762 | int idx, best = -1; | ||
733 | 763 | ||
734 | cpufreq_for_each_valid_entry(pos, table) { | 764 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
735 | freq = pos->frequency; | 765 | freq = pos->frequency; |
736 | 766 | ||
737 | if (freq == target_freq) | 767 | if (freq == target_freq) |
738 | return pos - table; | 768 | return idx; |
739 | 769 | ||
740 | if (freq < target_freq) { | 770 | if (freq < target_freq) { |
741 | best = pos; | 771 | best = idx; |
742 | continue; | 772 | continue; |
743 | } | 773 | } |
744 | 774 | ||
745 | /* No freq found below target_freq */ | 775 | /* No freq found below target_freq */ |
746 | if (best == table - 1) | 776 | if (best == -1) |
747 | return pos - table; | 777 | return idx; |
748 | 778 | ||
749 | return best - table; | 779 | return best; |
750 | } | 780 | } |
751 | 781 | ||
752 | return best - table; | 782 | return best; |
753 | } | 783 | } |
754 | 784 | ||
755 | /* Find highest freq at or below target in a table in descending order */ | 785 | /* Find highest freq at or below target in a table in descending order */ |
@@ -757,19 +787,20 @@ static inline int cpufreq_table_find_index_dh(struct cpufreq_policy *policy, | |||
757 | unsigned int target_freq) | 787 | unsigned int target_freq) |
758 | { | 788 | { |
759 | struct cpufreq_frequency_table *table = policy->freq_table; | 789 | struct cpufreq_frequency_table *table = policy->freq_table; |
760 | struct cpufreq_frequency_table *pos, *best = table - 1; | 790 | struct cpufreq_frequency_table *pos; |
761 | unsigned int freq; | 791 | unsigned int freq; |
792 | int idx, best = -1; | ||
762 | 793 | ||
763 | cpufreq_for_each_valid_entry(pos, table) { | 794 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
764 | freq = pos->frequency; | 795 | freq = pos->frequency; |
765 | 796 | ||
766 | if (freq <= target_freq) | 797 | if (freq <= target_freq) |
767 | return pos - table; | 798 | return idx; |
768 | 799 | ||
769 | best = pos; | 800 | best = idx; |
770 | } | 801 | } |
771 | 802 | ||
772 | return best - table; | 803 | return best; |
773 | } | 804 | } |
774 | 805 | ||
775 | /* Works only on sorted freq-tables */ | 806 | /* Works only on sorted freq-tables */ |
@@ -789,32 +820,33 @@ static inline int cpufreq_table_find_index_ac(struct cpufreq_policy *policy, | |||
789 | unsigned int target_freq) | 820 | unsigned int target_freq) |
790 | { | 821 | { |
791 | struct cpufreq_frequency_table *table = policy->freq_table; | 822 | struct cpufreq_frequency_table *table = policy->freq_table; |
792 | struct cpufreq_frequency_table *pos, *best = table - 1; | 823 | struct cpufreq_frequency_table *pos; |
793 | unsigned int freq; | 824 | unsigned int freq; |
825 | int idx, best = -1; | ||
794 | 826 | ||
795 | cpufreq_for_each_valid_entry(pos, table) { | 827 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
796 | freq = pos->frequency; | 828 | freq = pos->frequency; |
797 | 829 | ||
798 | if (freq == target_freq) | 830 | if (freq == target_freq) |
799 | return pos - table; | 831 | return idx; |
800 | 832 | ||
801 | if (freq < target_freq) { | 833 | if (freq < target_freq) { |
802 | best = pos; | 834 | best = idx; |
803 | continue; | 835 | continue; |
804 | } | 836 | } |
805 | 837 | ||
806 | /* No freq found below target_freq */ | 838 | /* No freq found below target_freq */ |
807 | if (best == table - 1) | 839 | if (best == -1) |
808 | return pos - table; | 840 | return idx; |
809 | 841 | ||
810 | /* Choose the closest freq */ | 842 | /* Choose the closest freq */ |
811 | if (target_freq - best->frequency > freq - target_freq) | 843 | if (target_freq - table[best].frequency > freq - target_freq) |
812 | return pos - table; | 844 | return idx; |
813 | 845 | ||
814 | return best - table; | 846 | return best; |
815 | } | 847 | } |
816 | 848 | ||
817 | return best - table; | 849 | return best; |
818 | } | 850 | } |
819 | 851 | ||
820 | /* Find closest freq to target in a table in descending order */ | 852 | /* Find closest freq to target in a table in descending order */ |
@@ -822,32 +854,33 @@ static inline int cpufreq_table_find_index_dc(struct cpufreq_policy *policy, | |||
822 | unsigned int target_freq) | 854 | unsigned int target_freq) |
823 | { | 855 | { |
824 | struct cpufreq_frequency_table *table = policy->freq_table; | 856 | struct cpufreq_frequency_table *table = policy->freq_table; |
825 | struct cpufreq_frequency_table *pos, *best = table - 1; | 857 | struct cpufreq_frequency_table *pos; |
826 | unsigned int freq; | 858 | unsigned int freq; |
859 | int idx, best = -1; | ||
827 | 860 | ||
828 | cpufreq_for_each_valid_entry(pos, table) { | 861 | cpufreq_for_each_valid_entry_idx(pos, table, idx) { |
829 | freq = pos->frequency; | 862 | freq = pos->frequency; |
830 | 863 | ||
831 | if (freq == target_freq) | 864 | if (freq == target_freq) |
832 | return pos - table; | 865 | return idx; |
833 | 866 | ||
834 | if (freq > target_freq) { | 867 | if (freq > target_freq) { |
835 | best = pos; | 868 | best = idx; |
836 | continue; | 869 | continue; |
837 | } | 870 | } |
838 | 871 | ||
839 | /* No freq found above target_freq */ | 872 | /* No freq found above target_freq */ |
840 | if (best == table - 1) | 873 | if (best == -1) |
841 | return pos - table; | 874 | return idx; |
842 | 875 | ||
843 | /* Choose the closest freq */ | 876 | /* Choose the closest freq */ |
844 | if (best->frequency - target_freq > target_freq - freq) | 877 | if (table[best].frequency - target_freq > target_freq - freq) |
845 | return pos - table; | 878 | return idx; |
846 | 879 | ||
847 | return best - table; | 880 | return best; |
848 | } | 881 | } |
849 | 882 | ||
850 | return best - table; | 883 | return best; |
851 | } | 884 | } |
852 | 885 | ||
853 | /* Works only on sorted freq-tables */ | 886 | /* Works only on sorted freq-tables */ |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index 1a32e558eb11..5172ad0daa7c 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -59,6 +59,7 @@ enum cpuhp_state { | |||
59 | CPUHP_PCI_XGENE_DEAD, | 59 | CPUHP_PCI_XGENE_DEAD, |
60 | CPUHP_IOMMU_INTEL_DEAD, | 60 | CPUHP_IOMMU_INTEL_DEAD, |
61 | CPUHP_LUSTRE_CFS_DEAD, | 61 | CPUHP_LUSTRE_CFS_DEAD, |
62 | CPUHP_AP_ARM_CACHE_B15_RAC_DEAD, | ||
62 | CPUHP_WORKQUEUE_PREP, | 63 | CPUHP_WORKQUEUE_PREP, |
63 | CPUHP_POWER_NUMA_PREPARE, | 64 | CPUHP_POWER_NUMA_PREPARE, |
64 | CPUHP_HRTIMERS_PREPARE, | 65 | CPUHP_HRTIMERS_PREPARE, |
@@ -109,6 +110,7 @@ enum cpuhp_state { | |||
109 | CPUHP_AP_PERF_XTENSA_STARTING, | 110 | CPUHP_AP_PERF_XTENSA_STARTING, |
110 | CPUHP_AP_PERF_METAG_STARTING, | 111 | CPUHP_AP_PERF_METAG_STARTING, |
111 | CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, | 112 | CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, |
113 | CPUHP_AP_ARM_SDEI_STARTING, | ||
112 | CPUHP_AP_ARM_VFP_STARTING, | 114 | CPUHP_AP_ARM_VFP_STARTING, |
113 | CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, | 115 | CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING, |
114 | CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, | 116 | CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING, |
@@ -137,6 +139,7 @@ enum cpuhp_state { | |||
137 | CPUHP_AP_ARM64_ISNDEP_STARTING, | 139 | CPUHP_AP_ARM64_ISNDEP_STARTING, |
138 | CPUHP_AP_SMPCFD_DYING, | 140 | CPUHP_AP_SMPCFD_DYING, |
139 | CPUHP_AP_X86_TBOOT_DYING, | 141 | CPUHP_AP_X86_TBOOT_DYING, |
142 | CPUHP_AP_ARM_CACHE_B15_RAC_DYING, | ||
140 | CPUHP_AP_ONLINE, | 143 | CPUHP_AP_ONLINE, |
141 | CPUHP_TEARDOWN_CPU, | 144 | CPUHP_TEARDOWN_CPU, |
142 | CPUHP_AP_ONLINE_IDLE, | 145 | CPUHP_AP_ONLINE_IDLE, |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 8f7788d23b57..871f9e21810c 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
@@ -257,22 +257,30 @@ static inline int cpuidle_register_governor(struct cpuidle_governor *gov) | |||
257 | {return 0;} | 257 | {return 0;} |
258 | #endif | 258 | #endif |
259 | 259 | ||
260 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ | 260 | #define __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, is_retention) \ |
261 | ({ \ | 261 | ({ \ |
262 | int __ret; \ | 262 | int __ret = 0; \ |
263 | \ | 263 | \ |
264 | if (!idx) { \ | 264 | if (!idx) { \ |
265 | cpu_do_idle(); \ | 265 | cpu_do_idle(); \ |
266 | return idx; \ | 266 | return idx; \ |
267 | } \ | 267 | } \ |
268 | \ | 268 | \ |
269 | __ret = cpu_pm_enter(); \ | 269 | if (!is_retention) \ |
270 | if (!__ret) { \ | 270 | __ret = cpu_pm_enter(); \ |
271 | __ret = low_level_idle_enter(idx); \ | 271 | if (!__ret) { \ |
272 | cpu_pm_exit(); \ | 272 | __ret = low_level_idle_enter(idx); \ |
273 | } \ | 273 | if (!is_retention) \ |
274 | \ | 274 | cpu_pm_exit(); \ |
275 | __ret ? -1 : idx; \ | 275 | } \ |
276 | \ | ||
277 | __ret ? -1 : idx; \ | ||
276 | }) | 278 | }) |
277 | 279 | ||
280 | #define CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx) \ | ||
281 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 0) | ||
282 | |||
283 | #define CPU_PM_CPU_IDLE_ENTER_RETENTION(low_level_idle_enter, idx) \ | ||
284 | __CPU_PM_CPU_IDLE_ENTER(low_level_idle_enter, idx, 1) | ||
285 | |||
278 | #endif /* _LINUX_CPUIDLE_H */ | 286 | #endif /* _LINUX_CPUIDLE_H */ |
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h index 75b565194437..d4a2a7dcd72d 100644 --- a/include/linux/cpumask.h +++ b/include/linux/cpumask.h | |||
@@ -640,7 +640,7 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp) | |||
640 | /** | 640 | /** |
641 | * cpumask_size - size to allocate for a 'struct cpumask' in bytes | 641 | * cpumask_size - size to allocate for a 'struct cpumask' in bytes |
642 | */ | 642 | */ |
643 | static inline size_t cpumask_size(void) | 643 | static inline unsigned int cpumask_size(void) |
644 | { | 644 | { |
645 | return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); | 645 | return BITS_TO_LONGS(nr_cpumask_bits) * sizeof(long); |
646 | } | 646 | } |
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 1b8e41597ef5..934633a05d20 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h | |||
@@ -112,7 +112,7 @@ static inline int cpuset_do_slab_mem_spread(void) | |||
112 | return task_spread_slab(current); | 112 | return task_spread_slab(current); |
113 | } | 113 | } |
114 | 114 | ||
115 | extern int current_cpuset_is_being_rebound(void); | 115 | extern bool current_cpuset_is_being_rebound(void); |
116 | 116 | ||
117 | extern void rebuild_sched_domains(void); | 117 | extern void rebuild_sched_domains(void); |
118 | 118 | ||
@@ -247,9 +247,9 @@ static inline int cpuset_do_slab_mem_spread(void) | |||
247 | return 0; | 247 | return 0; |
248 | } | 248 | } |
249 | 249 | ||
250 | static inline int current_cpuset_is_being_rebound(void) | 250 | static inline bool current_cpuset_is_being_rebound(void) |
251 | { | 251 | { |
252 | return 0; | 252 | return false; |
253 | } | 253 | } |
254 | 254 | ||
255 | static inline void rebuild_sched_domains(void) | 255 | static inline void rebuild_sched_domains(void) |
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index a992e6ca2f1c..f7ac2aa93269 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
@@ -2,13 +2,13 @@ | |||
2 | #ifndef LINUX_CRASH_DUMP_H | 2 | #ifndef LINUX_CRASH_DUMP_H |
3 | #define LINUX_CRASH_DUMP_H | 3 | #define LINUX_CRASH_DUMP_H |
4 | 4 | ||
5 | #ifdef CONFIG_CRASH_DUMP | ||
6 | #include <linux/kexec.h> | 5 | #include <linux/kexec.h> |
7 | #include <linux/proc_fs.h> | 6 | #include <linux/proc_fs.h> |
8 | #include <linux/elf.h> | 7 | #include <linux/elf.h> |
9 | 8 | ||
10 | #include <asm/pgtable.h> /* for pgprot_t */ | 9 | #include <asm/pgtable.h> /* for pgprot_t */ |
11 | 10 | ||
11 | #ifdef CONFIG_CRASH_DUMP | ||
12 | #define ELFCORE_ADDR_MAX (-1ULL) | 12 | #define ELFCORE_ADDR_MAX (-1ULL) |
13 | #define ELFCORE_ADDR_ERR (-2ULL) | 13 | #define ELFCORE_ADDR_ERR (-2ULL) |
14 | 14 | ||
@@ -52,13 +52,13 @@ void vmcore_cleanup(void); | |||
52 | * has passed the elf core header address on command line. | 52 | * has passed the elf core header address on command line. |
53 | * | 53 | * |
54 | * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will | 54 | * This is not just a test if CONFIG_CRASH_DUMP is enabled or not. It will |
55 | * return 1 if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic of | 55 | * return true if CONFIG_CRASH_DUMP=y and if kernel is booting after a panic |
56 | * previous kernel. | 56 | * of previous kernel. |
57 | */ | 57 | */ |
58 | 58 | ||
59 | static inline int is_kdump_kernel(void) | 59 | static inline bool is_kdump_kernel(void) |
60 | { | 60 | { |
61 | return (elfcorehdr_addr != ELFCORE_ADDR_MAX) ? 1 : 0; | 61 | return elfcorehdr_addr != ELFCORE_ADDR_MAX; |
62 | } | 62 | } |
63 | 63 | ||
64 | /* is_vmcore_usable() checks if the kernel is booting after a panic and | 64 | /* is_vmcore_usable() checks if the kernel is booting after a panic and |
@@ -89,7 +89,7 @@ extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); | |||
89 | extern void unregister_oldmem_pfn_is_ram(void); | 89 | extern void unregister_oldmem_pfn_is_ram(void); |
90 | 90 | ||
91 | #else /* !CONFIG_CRASH_DUMP */ | 91 | #else /* !CONFIG_CRASH_DUMP */ |
92 | static inline int is_kdump_kernel(void) { return 0; } | 92 | static inline bool is_kdump_kernel(void) { return 0; } |
93 | #endif /* CONFIG_CRASH_DUMP */ | 93 | #endif /* CONFIG_CRASH_DUMP */ |
94 | 94 | ||
95 | extern unsigned long saved_max_pfn; | 95 | extern unsigned long saved_max_pfn; |
diff --git a/include/linux/crc-ccitt.h b/include/linux/crc-ccitt.h index cd4f420231ba..72c92c396bb8 100644 --- a/include/linux/crc-ccitt.h +++ b/include/linux/crc-ccitt.h | |||
@@ -5,12 +5,19 @@ | |||
5 | #include <linux/types.h> | 5 | #include <linux/types.h> |
6 | 6 | ||
7 | extern u16 const crc_ccitt_table[256]; | 7 | extern u16 const crc_ccitt_table[256]; |
8 | extern u16 const crc_ccitt_false_table[256]; | ||
8 | 9 | ||
9 | extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); | 10 | extern u16 crc_ccitt(u16 crc, const u8 *buffer, size_t len); |
11 | extern u16 crc_ccitt_false(u16 crc, const u8 *buffer, size_t len); | ||
10 | 12 | ||
11 | static inline u16 crc_ccitt_byte(u16 crc, const u8 c) | 13 | static inline u16 crc_ccitt_byte(u16 crc, const u8 c) |
12 | { | 14 | { |
13 | return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; | 15 | return (crc >> 8) ^ crc_ccitt_table[(crc ^ c) & 0xff]; |
14 | } | 16 | } |
15 | 17 | ||
18 | static inline u16 crc_ccitt_false_byte(u16 crc, const u8 c) | ||
19 | { | ||
20 | return (crc << 8) ^ crc_ccitt_false_table[(crc >> 8) ^ c]; | ||
21 | } | ||
22 | |||
16 | #endif /* _LINUX_CRC_CCITT_H */ | 23 | #endif /* _LINUX_CRC_CCITT_H */ |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 78508ca4b108..7e6e84cf6383 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
@@ -107,8 +107,16 @@ | |||
107 | #define CRYPTO_ALG_INTERNAL 0x00002000 | 107 | #define CRYPTO_ALG_INTERNAL 0x00002000 |
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Set if the algorithm has a ->setkey() method but can be used without | ||
111 | * calling it first, i.e. there is a default key. | ||
112 | */ | ||
113 | #define CRYPTO_ALG_OPTIONAL_KEY 0x00004000 | ||
114 | |||
115 | /* | ||
110 | * Transform masks and values (for crt_flags). | 116 | * Transform masks and values (for crt_flags). |
111 | */ | 117 | */ |
118 | #define CRYPTO_TFM_NEED_KEY 0x00000001 | ||
119 | |||
112 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 | 120 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
113 | #define CRYPTO_TFM_RES_MASK 0xfff00000 | 121 | #define CRYPTO_TFM_RES_MASK 0xfff00000 |
114 | 122 | ||
@@ -447,7 +455,7 @@ struct crypto_alg { | |||
447 | unsigned int cra_alignmask; | 455 | unsigned int cra_alignmask; |
448 | 456 | ||
449 | int cra_priority; | 457 | int cra_priority; |
450 | atomic_t cra_refcnt; | 458 | refcount_t cra_refcnt; |
451 | 459 | ||
452 | char cra_name[CRYPTO_MAX_ALG_NAME]; | 460 | char cra_name[CRYPTO_MAX_ALG_NAME]; |
453 | char cra_driver_name[CRYPTO_MAX_ALG_NAME]; | 461 | char cra_driver_name[CRYPTO_MAX_ALG_NAME]; |
diff --git a/include/linux/dax.h b/include/linux/dax.h index 5258346c558c..0185ecdae135 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h | |||
@@ -96,7 +96,7 @@ bool dax_write_cache_enabled(struct dax_device *dax_dev); | |||
96 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, | 96 | ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter, |
97 | const struct iomap_ops *ops); | 97 | const struct iomap_ops *ops); |
98 | int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, | 98 | int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
99 | pfn_t *pfnp, const struct iomap_ops *ops); | 99 | pfn_t *pfnp, int *errp, const struct iomap_ops *ops); |
100 | int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, | 100 | int dax_finish_sync_fault(struct vm_fault *vmf, enum page_entry_size pe_size, |
101 | pfn_t pfn); | 101 | pfn_t pfn); |
102 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); | 102 | int dax_delete_mapping_entry(struct address_space *mapping, pgoff_t index); |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index 65cd8ab60b7a..82a99d366aec 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
@@ -227,6 +227,7 @@ extern seqlock_t rename_lock; | |||
227 | */ | 227 | */ |
228 | extern void d_instantiate(struct dentry *, struct inode *); | 228 | extern void d_instantiate(struct dentry *, struct inode *); |
229 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); | 229 | extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *); |
230 | extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *); | ||
230 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); | 231 | extern int d_instantiate_no_diralias(struct dentry *, struct inode *); |
231 | extern void __d_drop(struct dentry *dentry); | 232 | extern void __d_drop(struct dentry *dentry); |
232 | extern void d_drop(struct dentry *dentry); | 233 | extern void d_drop(struct dentry *dentry); |
@@ -235,6 +236,7 @@ extern void d_set_d_op(struct dentry *dentry, const struct dentry_operations *op | |||
235 | 236 | ||
236 | /* allocate/de-allocate */ | 237 | /* allocate/de-allocate */ |
237 | extern struct dentry * d_alloc(struct dentry *, const struct qstr *); | 238 | extern struct dentry * d_alloc(struct dentry *, const struct qstr *); |
239 | extern struct dentry * d_alloc_anon(struct super_block *); | ||
238 | extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); | 240 | extern struct dentry * d_alloc_pseudo(struct super_block *, const struct qstr *); |
239 | extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, | 241 | extern struct dentry * d_alloc_parallel(struct dentry *, const struct qstr *, |
240 | wait_queue_head_t *); | 242 | wait_queue_head_t *); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index a5538433c927..da83f64952e7 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
@@ -28,6 +28,7 @@ enum dm_queue_mode { | |||
28 | DM_TYPE_REQUEST_BASED = 2, | 28 | DM_TYPE_REQUEST_BASED = 2, |
29 | DM_TYPE_MQ_REQUEST_BASED = 3, | 29 | DM_TYPE_MQ_REQUEST_BASED = 3, |
30 | DM_TYPE_DAX_BIO_BASED = 4, | 30 | DM_TYPE_DAX_BIO_BASED = 4, |
31 | DM_TYPE_NVME_BIO_BASED = 5, | ||
31 | }; | 32 | }; |
32 | 33 | ||
33 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; | 34 | typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t; |
@@ -221,14 +222,6 @@ struct target_type { | |||
221 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) | 222 | #define dm_target_is_wildcard(type) ((type)->features & DM_TARGET_WILDCARD) |
222 | 223 | ||
223 | /* | 224 | /* |
224 | * Some targets need to be sent the same WRITE bio severals times so | ||
225 | * that they can send copies of it to different devices. This function | ||
226 | * examines any supplied bio and returns the number of copies of it the | ||
227 | * target requires. | ||
228 | */ | ||
229 | typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio); | ||
230 | |||
231 | /* | ||
232 | * A target implements own bio data integrity. | 225 | * A target implements own bio data integrity. |
233 | */ | 226 | */ |
234 | #define DM_TARGET_INTEGRITY 0x00000010 | 227 | #define DM_TARGET_INTEGRITY 0x00000010 |
@@ -291,13 +284,6 @@ struct dm_target { | |||
291 | */ | 284 | */ |
292 | unsigned per_io_data_size; | 285 | unsigned per_io_data_size; |
293 | 286 | ||
294 | /* | ||
295 | * If defined, this function is called to find out how many | ||
296 | * duplicate bios should be sent to the target when writing | ||
297 | * data. | ||
298 | */ | ||
299 | dm_num_write_bios_fn num_write_bios; | ||
300 | |||
301 | /* target specific data */ | 287 | /* target specific data */ |
302 | void *private; | 288 | void *private; |
303 | 289 | ||
@@ -329,35 +315,9 @@ struct dm_target_callbacks { | |||
329 | int (*congested_fn) (struct dm_target_callbacks *, int); | 315 | int (*congested_fn) (struct dm_target_callbacks *, int); |
330 | }; | 316 | }; |
331 | 317 | ||
332 | /* | 318 | void *dm_per_bio_data(struct bio *bio, size_t data_size); |
333 | * For bio-based dm. | 319 | struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size); |
334 | * One of these is allocated for each bio. | 320 | unsigned dm_bio_get_target_bio_nr(const struct bio *bio); |
335 | * This structure shouldn't be touched directly by target drivers. | ||
336 | * It is here so that we can inline dm_per_bio_data and | ||
337 | * dm_bio_from_per_bio_data | ||
338 | */ | ||
339 | struct dm_target_io { | ||
340 | struct dm_io *io; | ||
341 | struct dm_target *ti; | ||
342 | unsigned target_bio_nr; | ||
343 | unsigned *len_ptr; | ||
344 | struct bio clone; | ||
345 | }; | ||
346 | |||
347 | static inline void *dm_per_bio_data(struct bio *bio, size_t data_size) | ||
348 | { | ||
349 | return (char *)bio - offsetof(struct dm_target_io, clone) - data_size; | ||
350 | } | ||
351 | |||
352 | static inline struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size) | ||
353 | { | ||
354 | return (struct bio *)((char *)data + data_size + offsetof(struct dm_target_io, clone)); | ||
355 | } | ||
356 | |||
357 | static inline unsigned dm_bio_get_target_bio_nr(const struct bio *bio) | ||
358 | { | ||
359 | return container_of(bio, struct dm_target_io, clone)->target_bio_nr; | ||
360 | } | ||
361 | 321 | ||
362 | int dm_register_target(struct target_type *t); | 322 | int dm_register_target(struct target_type *t); |
363 | void dm_unregister_target(struct target_type *t); | 323 | void dm_unregister_target(struct target_type *t); |
@@ -500,6 +460,11 @@ void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type); | |||
500 | int dm_table_complete(struct dm_table *t); | 460 | int dm_table_complete(struct dm_table *t); |
501 | 461 | ||
502 | /* | 462 | /* |
463 | * Destroy the table when finished. | ||
464 | */ | ||
465 | void dm_table_destroy(struct dm_table *t); | ||
466 | |||
467 | /* | ||
503 | * Target may require that it is never sent I/O larger than len. | 468 | * Target may require that it is never sent I/O larger than len. |
504 | */ | 469 | */ |
505 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); | 470 | int __must_check dm_set_target_max_io_len(struct dm_target *ti, sector_t len); |
@@ -585,6 +550,7 @@ do { \ | |||
585 | #define DM_ENDIO_DONE 0 | 550 | #define DM_ENDIO_DONE 0 |
586 | #define DM_ENDIO_INCOMPLETE 1 | 551 | #define DM_ENDIO_INCOMPLETE 1 |
587 | #define DM_ENDIO_REQUEUE 2 | 552 | #define DM_ENDIO_REQUEUE 2 |
553 | #define DM_ENDIO_DELAY_REQUEUE 3 | ||
588 | 554 | ||
589 | /* | 555 | /* |
590 | * Definitions of return values from target map function. | 556 | * Definitions of return values from target map function. |
@@ -592,7 +558,7 @@ do { \ | |||
592 | #define DM_MAPIO_SUBMITTED 0 | 558 | #define DM_MAPIO_SUBMITTED 0 |
593 | #define DM_MAPIO_REMAPPED 1 | 559 | #define DM_MAPIO_REMAPPED 1 |
594 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE | 560 | #define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE |
595 | #define DM_MAPIO_DELAY_REQUEUE 3 | 561 | #define DM_MAPIO_DELAY_REQUEUE DM_ENDIO_DELAY_REQUEUE |
596 | #define DM_MAPIO_KILL 4 | 562 | #define DM_MAPIO_KILL 4 |
597 | 563 | ||
598 | #define dm_sector_div64(x, y)( \ | 564 | #define dm_sector_div64(x, y)( \ |
diff --git a/include/linux/device.h b/include/linux/device.h index 9d32000725da..b093405ed525 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * device.h - generic, centralized driver model | 3 | * device.h - generic, centralized driver model |
3 | * | 4 | * |
@@ -5,8 +6,6 @@ | |||
5 | * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> | 6 | * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de> |
6 | * Copyright (c) 2008-2009 Novell Inc. | 7 | * Copyright (c) 2008-2009 Novell Inc. |
7 | * | 8 | * |
8 | * This file is released under the GPLv2 | ||
9 | * | ||
10 | * See Documentation/driver-model/ for more information. | 9 | * See Documentation/driver-model/ for more information. |
11 | */ | 10 | */ |
12 | 11 | ||
@@ -21,7 +20,6 @@ | |||
21 | #include <linux/compiler.h> | 20 | #include <linux/compiler.h> |
22 | #include <linux/types.h> | 21 | #include <linux/types.h> |
23 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
24 | #include <linux/pinctrl/devinfo.h> | ||
25 | #include <linux/pm.h> | 23 | #include <linux/pm.h> |
26 | #include <linux/atomic.h> | 24 | #include <linux/atomic.h> |
27 | #include <linux/ratelimit.h> | 25 | #include <linux/ratelimit.h> |
@@ -42,6 +40,7 @@ struct fwnode_handle; | |||
42 | struct iommu_ops; | 40 | struct iommu_ops; |
43 | struct iommu_group; | 41 | struct iommu_group; |
44 | struct iommu_fwspec; | 42 | struct iommu_fwspec; |
43 | struct dev_pin_info; | ||
45 | 44 | ||
46 | struct bus_attribute { | 45 | struct bus_attribute { |
47 | struct attribute attr; | 46 | struct attribute attr; |
@@ -288,6 +287,7 @@ struct device_driver { | |||
288 | const struct attribute_group **groups; | 287 | const struct attribute_group **groups; |
289 | 288 | ||
290 | const struct dev_pm_ops *pm; | 289 | const struct dev_pm_ops *pm; |
290 | int (*coredump) (struct device *dev); | ||
291 | 291 | ||
292 | struct driver_private *p; | 292 | struct driver_private *p; |
293 | }; | 293 | }; |
@@ -301,7 +301,6 @@ extern struct device_driver *driver_find(const char *name, | |||
301 | extern int driver_probe_done(void); | 301 | extern int driver_probe_done(void); |
302 | extern void wait_for_device_probe(void); | 302 | extern void wait_for_device_probe(void); |
303 | 303 | ||
304 | |||
305 | /* sysfs interface for exporting driver attributes */ | 304 | /* sysfs interface for exporting driver attributes */ |
306 | 305 | ||
307 | struct driver_attribute { | 306 | struct driver_attribute { |
@@ -575,6 +574,9 @@ ssize_t device_store_bool(struct device *dev, struct device_attribute *attr, | |||
575 | 574 | ||
576 | #define DEVICE_ATTR(_name, _mode, _show, _store) \ | 575 | #define DEVICE_ATTR(_name, _mode, _show, _store) \ |
577 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) | 576 | struct device_attribute dev_attr_##_name = __ATTR(_name, _mode, _show, _store) |
577 | #define DEVICE_ATTR_PREALLOC(_name, _mode, _show, _store) \ | ||
578 | struct device_attribute dev_attr_##_name = \ | ||
579 | __ATTR_PREALLOC(_name, _mode, _show, _store) | ||
578 | #define DEVICE_ATTR_RW(_name) \ | 580 | #define DEVICE_ATTR_RW(_name) \ |
579 | struct device_attribute dev_attr_##_name = __ATTR_RW(_name) | 581 | struct device_attribute dev_attr_##_name = __ATTR_RW(_name) |
580 | #define DEVICE_ATTR_RO(_name) \ | 582 | #define DEVICE_ATTR_RO(_name) \ |
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h index 79f27d60ec66..085db2fee2d7 100644 --- a/include/linux/dma-buf.h +++ b/include/linux/dma-buf.h | |||
@@ -301,7 +301,7 @@ struct dma_buf { | |||
301 | struct dma_fence_cb cb; | 301 | struct dma_fence_cb cb; |
302 | wait_queue_head_t *poll; | 302 | wait_queue_head_t *poll; |
303 | 303 | ||
304 | unsigned long active; | 304 | __poll_t active; |
305 | } cb_excl, cb_shared; | 305 | } cb_excl, cb_shared; |
306 | }; | 306 | }; |
307 | 307 | ||
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h new file mode 100644 index 000000000000..bcdb1a3e4b1f --- /dev/null +++ b/include/linux/dma-direct.h | |||
@@ -0,0 +1,47 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _LINUX_DMA_DIRECT_H | ||
3 | #define _LINUX_DMA_DIRECT_H 1 | ||
4 | |||
5 | #include <linux/dma-mapping.h> | ||
6 | |||
7 | #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA | ||
8 | #include <asm/dma-direct.h> | ||
9 | #else | ||
10 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) | ||
11 | { | ||
12 | dma_addr_t dev_addr = (dma_addr_t)paddr; | ||
13 | |||
14 | return dev_addr - ((dma_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); | ||
15 | } | ||
16 | |||
17 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) | ||
18 | { | ||
19 | phys_addr_t paddr = (phys_addr_t)dev_addr; | ||
20 | |||
21 | return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT); | ||
22 | } | ||
23 | |||
24 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) | ||
25 | { | ||
26 | if (!dev->dma_mask) | ||
27 | return false; | ||
28 | |||
29 | return addr + size - 1 <= *dev->dma_mask; | ||
30 | } | ||
31 | #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ | ||
32 | |||
33 | #ifdef CONFIG_ARCH_HAS_DMA_MARK_CLEAN | ||
34 | void dma_mark_clean(void *addr, size_t size); | ||
35 | #else | ||
36 | static inline void dma_mark_clean(void *addr, size_t size) | ||
37 | { | ||
38 | } | ||
39 | #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ | ||
40 | |||
41 | void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, | ||
42 | gfp_t gfp, unsigned long attrs); | ||
43 | void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, | ||
44 | dma_addr_t dma_addr, unsigned long attrs); | ||
45 | int dma_direct_supported(struct device *dev, u64 mask); | ||
46 | |||
47 | #endif /* _LINUX_DMA_DIRECT_H */ | ||
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index 332a5420243c..bc8940ca280d 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h | |||
@@ -21,6 +21,7 @@ | |||
21 | #define __LINUX_DMA_FENCE_ARRAY_H | 21 | #define __LINUX_DMA_FENCE_ARRAY_H |
22 | 22 | ||
23 | #include <linux/dma-fence.h> | 23 | #include <linux/dma-fence.h> |
24 | #include <linux/irq_work.h> | ||
24 | 25 | ||
25 | /** | 26 | /** |
26 | * struct dma_fence_array_cb - callback helper for fence array | 27 | * struct dma_fence_array_cb - callback helper for fence array |
@@ -47,6 +48,8 @@ struct dma_fence_array { | |||
47 | unsigned num_fences; | 48 | unsigned num_fences; |
48 | atomic_t num_pending; | 49 | atomic_t num_pending; |
49 | struct dma_fence **fences; | 50 | struct dma_fence **fences; |
51 | |||
52 | struct irq_work work; | ||
50 | }; | 53 | }; |
51 | 54 | ||
52 | extern const struct dma_fence_ops dma_fence_array_ops; | 55 | extern const struct dma_fence_ops dma_fence_array_ops; |
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index efdabbb64e3c..4c008170fe65 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h | |||
@@ -242,7 +242,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence) | |||
242 | * The caller is required to hold the RCU read lock. | 242 | * The caller is required to hold the RCU read lock. |
243 | */ | 243 | */ |
244 | static inline struct dma_fence * | 244 | static inline struct dma_fence * |
245 | dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep) | 245 | dma_fence_get_rcu_safe(struct dma_fence __rcu **fencep) |
246 | { | 246 | { |
247 | do { | 247 | do { |
248 | struct dma_fence *fence; | 248 | struct dma_fence *fence; |
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index 81ed9b2d84dc..34fe8463d10e 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
@@ -136,7 +136,7 @@ struct dma_map_ops { | |||
136 | int is_phys; | 136 | int is_phys; |
137 | }; | 137 | }; |
138 | 138 | ||
139 | extern const struct dma_map_ops dma_noop_ops; | 139 | extern const struct dma_map_ops dma_direct_ops; |
140 | extern const struct dma_map_ops dma_virt_ops; | 140 | extern const struct dma_map_ops dma_virt_ops; |
141 | 141 | ||
142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | 142 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
@@ -513,10 +513,18 @@ static inline void *dma_alloc_attrs(struct device *dev, size_t size, | |||
513 | void *cpu_addr; | 513 | void *cpu_addr; |
514 | 514 | ||
515 | BUG_ON(!ops); | 515 | BUG_ON(!ops); |
516 | WARN_ON_ONCE(dev && !dev->coherent_dma_mask); | ||
516 | 517 | ||
517 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) | 518 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) |
518 | return cpu_addr; | 519 | return cpu_addr; |
519 | 520 | ||
521 | /* | ||
522 | * Let the implementation decide on the zone to allocate from, and | ||
523 | * decide on the way of zeroing the memory given that the memory | ||
524 | * returned should always be zeroed. | ||
525 | */ | ||
526 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM | __GFP_ZERO); | ||
527 | |||
520 | if (!arch_dma_alloc_attrs(&dev, &flag)) | 528 | if (!arch_dma_alloc_attrs(&dev, &flag)) |
521 | return NULL; | 529 | return NULL; |
522 | if (!ops->alloc) | 530 | if (!ops->alloc) |
@@ -568,6 +576,14 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) | |||
568 | return 0; | 576 | return 0; |
569 | } | 577 | } |
570 | 578 | ||
579 | /* | ||
580 | * This is a hack for the legacy x86 forbid_dac and iommu_sac_force. Please | ||
581 | * don't use this is new code. | ||
582 | */ | ||
583 | #ifndef arch_dma_supported | ||
584 | #define arch_dma_supported(dev, mask) (1) | ||
585 | #endif | ||
586 | |||
571 | static inline void dma_check_mask(struct device *dev, u64 mask) | 587 | static inline void dma_check_mask(struct device *dev, u64 mask) |
572 | { | 588 | { |
573 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) | 589 | if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1))) |
@@ -580,6 +596,9 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
580 | 596 | ||
581 | if (!ops) | 597 | if (!ops) |
582 | return 0; | 598 | return 0; |
599 | if (!arch_dma_supported(dev, mask)) | ||
600 | return 0; | ||
601 | |||
583 | if (!ops->dma_supported) | 602 | if (!ops->dma_supported) |
584 | return 1; | 603 | return 1; |
585 | return ops->dma_supported(dev, mask); | 604 | return ops->dma_supported(dev, mask); |
@@ -692,7 +711,7 @@ static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) | |||
692 | #ifndef dma_max_pfn | 711 | #ifndef dma_max_pfn |
693 | static inline unsigned long dma_max_pfn(struct device *dev) | 712 | static inline unsigned long dma_max_pfn(struct device *dev) |
694 | { | 713 | { |
695 | return *dev->dma_mask >> PAGE_SHIFT; | 714 | return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset; |
696 | } | 715 | } |
697 | #endif | 716 | #endif |
698 | 717 | ||
diff --git a/include/linux/dsa/lan9303.h b/include/linux/dsa/lan9303.h index f48a85c377de..b4f22112ba75 100644 --- a/include/linux/dsa/lan9303.h +++ b/include/linux/dsa/lan9303.h | |||
@@ -23,9 +23,10 @@ struct lan9303 { | |||
23 | struct regmap_irq_chip_data *irq_data; | 23 | struct regmap_irq_chip_data *irq_data; |
24 | struct gpio_desc *reset_gpio; | 24 | struct gpio_desc *reset_gpio; |
25 | u32 reset_duration; /* in [ms] */ | 25 | u32 reset_duration; /* in [ms] */ |
26 | bool phy_addr_sel_strap; | 26 | int phy_addr_base; |
27 | struct dsa_switch *ds; | 27 | struct dsa_switch *ds; |
28 | struct mutex indirect_mutex; /* protect indexed register access */ | 28 | struct mutex indirect_mutex; /* protect indexed register access */ |
29 | struct mutex alr_mutex; /* protect ALR access */ | ||
29 | const struct lan9303_phy_ops *ops; | 30 | const struct lan9303_phy_ops *ops; |
30 | bool is_bridged; /* true if port 1 and 2 are bridged */ | 31 | bool is_bridged; /* true if port 1 and 2 are bridged */ |
31 | 32 | ||
diff --git a/include/linux/efi.h b/include/linux/efi.h index 29fdf8029cf6..f5083aa72eae 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
@@ -475,6 +475,39 @@ typedef struct { | |||
475 | u64 get_all; | 475 | u64 get_all; |
476 | } apple_properties_protocol_64_t; | 476 | } apple_properties_protocol_64_t; |
477 | 477 | ||
478 | typedef struct { | ||
479 | u32 get_capability; | ||
480 | u32 get_event_log; | ||
481 | u32 hash_log_extend_event; | ||
482 | u32 submit_command; | ||
483 | u32 get_active_pcr_banks; | ||
484 | u32 set_active_pcr_banks; | ||
485 | u32 get_result_of_set_active_pcr_banks; | ||
486 | } efi_tcg2_protocol_32_t; | ||
487 | |||
488 | typedef struct { | ||
489 | u64 get_capability; | ||
490 | u64 get_event_log; | ||
491 | u64 hash_log_extend_event; | ||
492 | u64 submit_command; | ||
493 | u64 get_active_pcr_banks; | ||
494 | u64 set_active_pcr_banks; | ||
495 | u64 get_result_of_set_active_pcr_banks; | ||
496 | } efi_tcg2_protocol_64_t; | ||
497 | |||
498 | typedef u32 efi_tcg2_event_log_format; | ||
499 | |||
500 | typedef struct { | ||
501 | void *get_capability; | ||
502 | efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format, | ||
503 | efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *); | ||
504 | void *hash_log_extend_event; | ||
505 | void *submit_command; | ||
506 | void *get_active_pcr_banks; | ||
507 | void *set_active_pcr_banks; | ||
508 | void *get_result_of_set_active_pcr_banks; | ||
509 | } efi_tcg2_protocol_t; | ||
510 | |||
478 | /* | 511 | /* |
479 | * Types and defines for EFI ResetSystem | 512 | * Types and defines for EFI ResetSystem |
480 | */ | 513 | */ |
@@ -625,6 +658,7 @@ void efi_native_runtime_setup(void); | |||
625 | #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) | 658 | #define EFI_MEMORY_ATTRIBUTES_TABLE_GUID EFI_GUID(0xdcfa911d, 0x26eb, 0x469f, 0xa2, 0x20, 0x38, 0xb7, 0xdc, 0x46, 0x12, 0x20) |
626 | #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) | 659 | #define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d) |
627 | #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) | 660 | #define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0) |
661 | #define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f) | ||
628 | 662 | ||
629 | #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) | 663 | #define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f) |
630 | #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) | 664 | #define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23) |
@@ -637,6 +671,7 @@ void efi_native_runtime_setup(void); | |||
637 | #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) | 671 | #define LINUX_EFI_ARM_SCREEN_INFO_TABLE_GUID EFI_GUID(0xe03fc20a, 0x85dc, 0x406e, 0xb9, 0x0e, 0x4a, 0xb5, 0x02, 0x37, 0x1d, 0x95) |
638 | #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) | 672 | #define LINUX_EFI_LOADER_ENTRY_GUID EFI_GUID(0x4a67b082, 0x0a4c, 0x41cf, 0xb6, 0xc7, 0x44, 0x0b, 0x29, 0xbb, 0x8c, 0x4f) |
639 | #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) | 673 | #define LINUX_EFI_RANDOM_SEED_TABLE_GUID EFI_GUID(0x1ce1e5bc, 0x7ceb, 0x42f2, 0x81, 0xe5, 0x8a, 0xad, 0xf1, 0x80, 0xf5, 0x7b) |
674 | #define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa) | ||
640 | 675 | ||
641 | typedef struct { | 676 | typedef struct { |
642 | efi_guid_t guid; | 677 | efi_guid_t guid; |
@@ -911,6 +946,7 @@ extern struct efi { | |||
911 | unsigned long properties_table; /* properties table */ | 946 | unsigned long properties_table; /* properties table */ |
912 | unsigned long mem_attr_table; /* memory attributes table */ | 947 | unsigned long mem_attr_table; /* memory attributes table */ |
913 | unsigned long rng_seed; /* UEFI firmware random seed */ | 948 | unsigned long rng_seed; /* UEFI firmware random seed */ |
949 | unsigned long tpm_log; /* TPM2 Event Log table */ | ||
914 | efi_get_time_t *get_time; | 950 | efi_get_time_t *get_time; |
915 | efi_set_time_t *set_time; | 951 | efi_set_time_t *set_time; |
916 | efi_get_wakeup_time_t *get_wakeup_time; | 952 | efi_get_wakeup_time_t *get_wakeup_time; |
@@ -1536,6 +1572,8 @@ static inline void | |||
1536 | efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } | 1572 | efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { } |
1537 | #endif | 1573 | #endif |
1538 | 1574 | ||
1575 | void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); | ||
1576 | |||
1539 | /* | 1577 | /* |
1540 | * Arch code can implement the following three template macros, avoiding | 1578 | * Arch code can implement the following three template macros, avoiding |
1541 | * reptition for the void/non-void return cases of {__,}efi_call_virt(): | 1579 | * reptition for the void/non-void return cases of {__,}efi_call_virt(): |
@@ -1603,4 +1641,12 @@ struct linux_efi_random_seed { | |||
1603 | u8 bits[]; | 1641 | u8 bits[]; |
1604 | }; | 1642 | }; |
1605 | 1643 | ||
1644 | struct linux_efi_tpm_eventlog { | ||
1645 | u32 size; | ||
1646 | u8 version; | ||
1647 | u8 log[]; | ||
1648 | }; | ||
1649 | |||
1650 | extern int efi_tpm_eventlog_init(void); | ||
1651 | |||
1606 | #endif /* _LINUX_EFI_H */ | 1652 | #endif /* _LINUX_EFI_H */ |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 3d794b3dc532..6d9e230dffd2 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
@@ -198,8 +198,6 @@ extern bool elv_attempt_insert_merge(struct request_queue *, struct request *); | |||
198 | extern void elv_requeue_request(struct request_queue *, struct request *); | 198 | extern void elv_requeue_request(struct request_queue *, struct request *); |
199 | extern struct request *elv_former_request(struct request_queue *, struct request *); | 199 | extern struct request *elv_former_request(struct request_queue *, struct request *); |
200 | extern struct request *elv_latter_request(struct request_queue *, struct request *); | 200 | extern struct request *elv_latter_request(struct request_queue *, struct request *); |
201 | extern int elv_register_queue(struct request_queue *q); | ||
202 | extern void elv_unregister_queue(struct request_queue *q); | ||
203 | extern int elv_may_queue(struct request_queue *, unsigned int); | 201 | extern int elv_may_queue(struct request_queue *, unsigned int); |
204 | extern void elv_completed_request(struct request_queue *, struct request *); | 202 | extern void elv_completed_request(struct request_queue *, struct request *); |
205 | extern int elv_set_request(struct request_queue *q, struct request *rq, | 203 | extern int elv_set_request(struct request_queue *q, struct request *rq, |
diff --git a/include/linux/error-injection.h b/include/linux/error-injection.h new file mode 100644 index 000000000000..280c61ecbf20 --- /dev/null +++ b/include/linux/error-injection.h | |||
@@ -0,0 +1,27 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _LINUX_ERROR_INJECTION_H | ||
3 | #define _LINUX_ERROR_INJECTION_H | ||
4 | |||
5 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION | ||
6 | |||
7 | #include <asm/error-injection.h> | ||
8 | |||
9 | extern bool within_error_injection_list(unsigned long addr); | ||
10 | extern int get_injectable_error_type(unsigned long addr); | ||
11 | |||
12 | #else /* !CONFIG_FUNCTION_ERROR_INJECTION */ | ||
13 | |||
14 | #include <asm-generic/error-injection.h> | ||
15 | static inline bool within_error_injection_list(unsigned long addr) | ||
16 | { | ||
17 | return false; | ||
18 | } | ||
19 | |||
20 | static inline int get_injectable_error_type(unsigned long addr) | ||
21 | { | ||
22 | return EI_ETYPE_NONE; | ||
23 | } | ||
24 | |||
25 | #endif | ||
26 | |||
27 | #endif /* _LINUX_ERROR_INJECTION_H */ | ||
diff --git a/include/linux/errseq.h b/include/linux/errseq.h index 6ffae9c5052d..fc2777770768 100644 --- a/include/linux/errseq.h +++ b/include/linux/errseq.h | |||
@@ -1,6 +1,6 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | 2 | /* |
3 | * See Documentation/errseq.rst and lib/errseq.c | 3 | * See Documentation/core-api/errseq.rst and lib/errseq.c |
4 | */ | 4 | */ |
5 | #ifndef _LINUX_ERRSEQ_H | 5 | #ifndef _LINUX_ERRSEQ_H |
6 | #define _LINUX_ERRSEQ_H | 6 | #define _LINUX_ERRSEQ_H |
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h index 60b2985e8a18..7094718b653b 100644 --- a/include/linux/eventfd.h +++ b/include/linux/eventfd.h | |||
@@ -26,18 +26,16 @@ | |||
26 | #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) | 26 | #define EFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK) |
27 | #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) | 27 | #define EFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS | EFD_SEMAPHORE) |
28 | 28 | ||
29 | struct eventfd_ctx; | ||
29 | struct file; | 30 | struct file; |
30 | 31 | ||
31 | #ifdef CONFIG_EVENTFD | 32 | #ifdef CONFIG_EVENTFD |
32 | 33 | ||
33 | struct file *eventfd_file_create(unsigned int count, int flags); | ||
34 | struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx); | ||
35 | void eventfd_ctx_put(struct eventfd_ctx *ctx); | 34 | void eventfd_ctx_put(struct eventfd_ctx *ctx); |
36 | struct file *eventfd_fget(int fd); | 35 | struct file *eventfd_fget(int fd); |
37 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); | 36 | struct eventfd_ctx *eventfd_ctx_fdget(int fd); |
38 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); | 37 | struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); |
39 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); | 38 | __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n); |
40 | ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); | ||
41 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, | 39 | int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait, |
42 | __u64 *cnt); | 40 | __u64 *cnt); |
43 | 41 | ||
@@ -47,10 +45,6 @@ int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *w | |||
47 | * Ugly ugly ugly error layer to support modules that uses eventfd but | 45 | * Ugly ugly ugly error layer to support modules that uses eventfd but |
48 | * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. | 46 | * pretend to work in !CONFIG_EVENTFD configurations. Namely, AIO. |
49 | */ | 47 | */ |
50 | static inline struct file *eventfd_file_create(unsigned int count, int flags) | ||
51 | { | ||
52 | return ERR_PTR(-ENOSYS); | ||
53 | } | ||
54 | 48 | ||
55 | static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) | 49 | static inline struct eventfd_ctx *eventfd_ctx_fdget(int fd) |
56 | { | 50 | { |
@@ -67,12 +61,6 @@ static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) | |||
67 | 61 | ||
68 | } | 62 | } |
69 | 63 | ||
70 | static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, | ||
71 | __u64 *cnt) | ||
72 | { | ||
73 | return -ENOSYS; | ||
74 | } | ||
75 | |||
76 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, | 64 | static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, |
77 | wait_queue_entry_t *wait, __u64 *cnt) | 65 | wait_queue_entry_t *wait, __u64 *cnt) |
78 | { | 66 | { |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index 43e98d30d2df..58aecb60ea51 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
@@ -117,6 +117,7 @@ struct f2fs_super_block { | |||
117 | /* | 117 | /* |
118 | * For checkpoint | 118 | * For checkpoint |
119 | */ | 119 | */ |
120 | #define CP_NOCRC_RECOVERY_FLAG 0x00000200 | ||
120 | #define CP_TRIMMED_FLAG 0x00000100 | 121 | #define CP_TRIMMED_FLAG 0x00000100 |
121 | #define CP_NAT_BITS_FLAG 0x00000080 | 122 | #define CP_NAT_BITS_FLAG 0x00000080 |
122 | #define CP_CRC_RECOVERY_FLAG 0x00000040 | 123 | #define CP_CRC_RECOVERY_FLAG 0x00000040 |
@@ -212,6 +213,7 @@ struct f2fs_extent { | |||
212 | #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ | 213 | #define F2FS_DATA_EXIST 0x08 /* file inline data exist flag */ |
213 | #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ | 214 | #define F2FS_INLINE_DOTS 0x10 /* file having implicit dot dentries */ |
214 | #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ | 215 | #define F2FS_EXTRA_ATTR 0x20 /* file having extra attribute */ |
216 | #define F2FS_PIN_FILE 0x40 /* file should not be gced */ | ||
215 | 217 | ||
216 | struct f2fs_inode { | 218 | struct f2fs_inode { |
217 | __le16 i_mode; /* file mode */ | 219 | __le16 i_mode; /* file mode */ |
@@ -229,7 +231,13 @@ struct f2fs_inode { | |||
229 | __le32 i_ctime_nsec; /* change time in nano scale */ | 231 | __le32 i_ctime_nsec; /* change time in nano scale */ |
230 | __le32 i_mtime_nsec; /* modification time in nano scale */ | 232 | __le32 i_mtime_nsec; /* modification time in nano scale */ |
231 | __le32 i_generation; /* file version (for NFS) */ | 233 | __le32 i_generation; /* file version (for NFS) */ |
232 | __le32 i_current_depth; /* only for directory depth */ | 234 | union { |
235 | __le32 i_current_depth; /* only for directory depth */ | ||
236 | __le16 i_gc_failures; /* | ||
237 | * # of gc failures on pinned file. | ||
238 | * only for regular files. | ||
239 | */ | ||
240 | }; | ||
233 | __le32 i_xattr_nid; /* nid to save xattr */ | 241 | __le32 i_xattr_nid; /* nid to save xattr */ |
234 | __le32 i_flags; /* file attributes */ | 242 | __le32 i_flags; /* file attributes */ |
235 | __le32 i_pino; /* parent inode number */ | 243 | __le32 i_pino; /* parent inode number */ |
@@ -245,8 +253,10 @@ struct f2fs_inode { | |||
245 | __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ | 253 | __le16 i_inline_xattr_size; /* inline xattr size, unit: 4 bytes */ |
246 | __le32 i_projid; /* project id */ | 254 | __le32 i_projid; /* project id */ |
247 | __le32 i_inode_checksum;/* inode meta checksum */ | 255 | __le32 i_inode_checksum;/* inode meta checksum */ |
256 | __le64 i_crtime; /* creation time */ | ||
257 | __le32 i_crtime_nsec; /* creation time in nano scale */ | ||
248 | __le32 i_extra_end[0]; /* for attribute size calculation */ | 258 | __le32 i_extra_end[0]; /* for attribute size calculation */ |
249 | }; | 259 | } __packed; |
250 | __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ | 260 | __le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */ |
251 | }; | 261 | }; |
252 | __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), | 262 | __le32 i_nid[DEF_NIDS_PER_INODE]; /* direct(2), indirect(2), |
diff --git a/include/linux/fb.h b/include/linux/fb.h index bc24e48e396d..f577d3c89618 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
@@ -465,6 +465,11 @@ struct fb_info { | |||
465 | atomic_t count; | 465 | atomic_t count; |
466 | int node; | 466 | int node; |
467 | int flags; | 467 | int flags; |
468 | /* | ||
469 | * -1 by default, set to a FB_ROTATE_* value by the driver, if it knows | ||
470 | * a lcd is not mounted upright and fbcon should rotate to compensate. | ||
471 | */ | ||
472 | int fbcon_rotate_hint; | ||
468 | struct mutex lock; /* Lock for open/release/ioctl funcs */ | 473 | struct mutex lock; /* Lock for open/release/ioctl funcs */ |
469 | struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ | 474 | struct mutex mm_lock; /* Lock for fb_mmap and smem_* fields */ |
470 | struct fb_var_screeninfo var; /* Current var */ | 475 | struct fb_var_screeninfo var; /* Current var */ |
@@ -564,7 +569,10 @@ static inline struct apertures_struct *alloc_apertures(unsigned int max_num) { | |||
564 | #define fb_memcpy_fromfb sbus_memcpy_fromio | 569 | #define fb_memcpy_fromfb sbus_memcpy_fromio |
565 | #define fb_memcpy_tofb sbus_memcpy_toio | 570 | #define fb_memcpy_tofb sbus_memcpy_toio |
566 | 571 | ||
567 | #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || defined(__avr32__) || defined(__bfin__) || defined(__arm__) | 572 | #elif defined(__i386__) || defined(__alpha__) || defined(__x86_64__) || \ |
573 | defined(__hppa__) || defined(__sh__) || defined(__powerpc__) || \ | ||
574 | defined(__avr32__) || defined(__bfin__) || defined(__arm__) || \ | ||
575 | defined(__aarch64__) | ||
568 | 576 | ||
569 | #define fb_readb __raw_readb | 577 | #define fb_readb __raw_readb |
570 | #define fb_readw __raw_readw | 578 | #define fb_readw __raw_readw |
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h index 1c65817673db..41615f38bcff 100644 --- a/include/linux/fdtable.h +++ b/include/linux/fdtable.h | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/compiler.h> | 10 | #include <linux/compiler.h> |
11 | #include <linux/spinlock.h> | 11 | #include <linux/spinlock.h> |
12 | #include <linux/rcupdate.h> | 12 | #include <linux/rcupdate.h> |
13 | #include <linux/nospec.h> | ||
13 | #include <linux/types.h> | 14 | #include <linux/types.h> |
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
@@ -82,8 +83,10 @@ static inline struct file *__fcheck_files(struct files_struct *files, unsigned i | |||
82 | { | 83 | { |
83 | struct fdtable *fdt = rcu_dereference_raw(files->fdt); | 84 | struct fdtable *fdt = rcu_dereference_raw(files->fdt); |
84 | 85 | ||
85 | if (fd < fdt->max_fds) | 86 | if (fd < fdt->max_fds) { |
87 | fd = array_index_nospec(fd, fdt->max_fds); | ||
86 | return rcu_dereference_raw(fdt->fd[fd]); | 88 | return rcu_dereference_raw(fdt->fd[fd]); |
89 | } | ||
87 | return NULL; | 90 | return NULL; |
88 | } | 91 | } |
89 | 92 | ||
diff --git a/include/linux/filter.h b/include/linux/filter.h index 80b5b482cb46..276932d75975 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
@@ -18,7 +18,9 @@ | |||
18 | #include <linux/capability.h> | 18 | #include <linux/capability.h> |
19 | #include <linux/cryptohash.h> | 19 | #include <linux/cryptohash.h> |
20 | #include <linux/set_memory.h> | 20 | #include <linux/set_memory.h> |
21 | #include <linux/kallsyms.h> | ||
21 | 22 | ||
23 | #include <net/xdp.h> | ||
22 | #include <net/sch_generic.h> | 24 | #include <net/sch_generic.h> |
23 | 25 | ||
24 | #include <uapi/linux/filter.h> | 26 | #include <uapi/linux/filter.h> |
@@ -58,6 +60,9 @@ struct bpf_prog_aux; | |||
58 | /* unused opcode to mark special call to bpf_tail_call() helper */ | 60 | /* unused opcode to mark special call to bpf_tail_call() helper */ |
59 | #define BPF_TAIL_CALL 0xf0 | 61 | #define BPF_TAIL_CALL 0xf0 |
60 | 62 | ||
63 | /* unused opcode to mark call to interpreter with arguments */ | ||
64 | #define BPF_CALL_ARGS 0xe0 | ||
65 | |||
61 | /* As per nm, we expose JITed images as text (code) section for | 66 | /* As per nm, we expose JITed images as text (code) section for |
62 | * kallsyms. That way, tools like perf can find it to match | 67 | * kallsyms. That way, tools like perf can find it to match |
63 | * addresses. | 68 | * addresses. |
@@ -455,10 +460,14 @@ struct bpf_binary_header { | |||
455 | struct bpf_prog { | 460 | struct bpf_prog { |
456 | u16 pages; /* Number of allocated pages */ | 461 | u16 pages; /* Number of allocated pages */ |
457 | u16 jited:1, /* Is our filter JIT'ed? */ | 462 | u16 jited:1, /* Is our filter JIT'ed? */ |
463 | jit_requested:1,/* archs need to JIT the prog */ | ||
458 | locked:1, /* Program image locked? */ | 464 | locked:1, /* Program image locked? */ |
459 | gpl_compatible:1, /* Is filter GPL compatible? */ | 465 | gpl_compatible:1, /* Is filter GPL compatible? */ |
460 | cb_access:1, /* Is control block accessed? */ | 466 | cb_access:1, /* Is control block accessed? */ |
461 | dst_needed:1; /* Do we need dst entry? */ | 467 | dst_needed:1, /* Do we need dst entry? */ |
468 | blinded:1, /* Was blinded */ | ||
469 | is_func:1, /* program is a bpf function */ | ||
470 | kprobe_override:1; /* Do we override a kprobe? */ | ||
462 | enum bpf_prog_type type; /* Type of BPF program */ | 471 | enum bpf_prog_type type; /* Type of BPF program */ |
463 | u32 len; /* Number of filter blocks */ | 472 | u32 len; /* Number of filter blocks */ |
464 | u32 jited_len; /* Size of jited insns in bytes */ | 473 | u32 jited_len; /* Size of jited insns in bytes */ |
@@ -495,6 +504,7 @@ struct xdp_buff { | |||
495 | void *data_end; | 504 | void *data_end; |
496 | void *data_meta; | 505 | void *data_meta; |
497 | void *data_hard_start; | 506 | void *data_hard_start; |
507 | struct xdp_rxq_info *rxq; | ||
498 | }; | 508 | }; |
499 | 509 | ||
500 | /* Compute the linear packet data range [data, data_end) which | 510 | /* Compute the linear packet data range [data, data_end) which |
@@ -678,6 +688,8 @@ static inline int sk_filter(struct sock *sk, struct sk_buff *skb) | |||
678 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); | 688 | struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err); |
679 | void bpf_prog_free(struct bpf_prog *fp); | 689 | void bpf_prog_free(struct bpf_prog *fp); |
680 | 690 | ||
691 | bool bpf_opcode_in_insntable(u8 code); | ||
692 | |||
681 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); | 693 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
682 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | 694 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, |
683 | gfp_t gfp_extra_flags); | 695 | gfp_t gfp_extra_flags); |
@@ -709,11 +721,22 @@ bool sk_filter_charge(struct sock *sk, struct sk_filter *fp); | |||
709 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); | 721 | void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp); |
710 | 722 | ||
711 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | 723 | u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
724 | #define __bpf_call_base_args \ | ||
725 | ((u64 (*)(u64, u64, u64, u64, u64, const struct bpf_insn *)) \ | ||
726 | __bpf_call_base) | ||
712 | 727 | ||
713 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); | 728 | struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog); |
714 | void bpf_jit_compile(struct bpf_prog *prog); | 729 | void bpf_jit_compile(struct bpf_prog *prog); |
715 | bool bpf_helper_changes_pkt_data(void *func); | 730 | bool bpf_helper_changes_pkt_data(void *func); |
716 | 731 | ||
732 | static inline bool bpf_dump_raw_ok(void) | ||
733 | { | ||
734 | /* Reconstruction of call-sites is dependent on kallsyms, | ||
735 | * thus make dump the same restriction. | ||
736 | */ | ||
737 | return kallsyms_show_value() == 1; | ||
738 | } | ||
739 | |||
717 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 740 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
718 | const struct bpf_insn *patch, u32 len); | 741 | const struct bpf_insn *patch, u32 len); |
719 | 742 | ||
@@ -797,7 +820,7 @@ static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp) | |||
797 | return fp->jited && bpf_jit_is_ebpf(); | 820 | return fp->jited && bpf_jit_is_ebpf(); |
798 | } | 821 | } |
799 | 822 | ||
800 | static inline bool bpf_jit_blinding_enabled(void) | 823 | static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog) |
801 | { | 824 | { |
802 | /* These are the prerequisites, should someone ever have the | 825 | /* These are the prerequisites, should someone ever have the |
803 | * idea to call blinding outside of them, we make sure to | 826 | * idea to call blinding outside of them, we make sure to |
@@ -805,7 +828,7 @@ static inline bool bpf_jit_blinding_enabled(void) | |||
805 | */ | 828 | */ |
806 | if (!bpf_jit_is_ebpf()) | 829 | if (!bpf_jit_is_ebpf()) |
807 | return false; | 830 | return false; |
808 | if (!bpf_jit_enable) | 831 | if (!prog->jit_requested) |
809 | return false; | 832 | return false; |
810 | if (!bpf_jit_harden) | 833 | if (!bpf_jit_harden) |
811 | return false; | 834 | return false; |
@@ -982,9 +1005,20 @@ struct bpf_sock_ops_kern { | |||
982 | struct sock *sk; | 1005 | struct sock *sk; |
983 | u32 op; | 1006 | u32 op; |
984 | union { | 1007 | union { |
1008 | u32 args[4]; | ||
985 | u32 reply; | 1009 | u32 reply; |
986 | u32 replylong[4]; | 1010 | u32 replylong[4]; |
987 | }; | 1011 | }; |
1012 | u32 is_fullsock; | ||
1013 | u64 temp; /* temp and everything after is not | ||
1014 | * initialized to 0 before calling | ||
1015 | * the BPF program. New fields that | ||
1016 | * should be initialized to 0 should | ||
1017 | * be inserted before temp. | ||
1018 | * temp is scratch storage used by | ||
1019 | * sock_ops_convert_ctx_access | ||
1020 | * as temporary storage of a register. | ||
1021 | */ | ||
988 | }; | 1022 | }; |
989 | 1023 | ||
990 | #endif /* __LINUX_FILTER_H__ */ | 1024 | #endif /* __LINUX_FILTER_H__ */ |
diff --git a/include/linux/fpga/fpga-bridge.h b/include/linux/fpga/fpga-bridge.h index aa66c87c120b..3694821a6d2d 100644 --- a/include/linux/fpga/fpga-bridge.h +++ b/include/linux/fpga/fpga-bridge.h | |||
@@ -1,10 +1,11 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #include <linux/device.h> | ||
3 | #include <linux/fpga/fpga-mgr.h> | ||
4 | 2 | ||
5 | #ifndef _LINUX_FPGA_BRIDGE_H | 3 | #ifndef _LINUX_FPGA_BRIDGE_H |
6 | #define _LINUX_FPGA_BRIDGE_H | 4 | #define _LINUX_FPGA_BRIDGE_H |
7 | 5 | ||
6 | #include <linux/device.h> | ||
7 | #include <linux/fpga/fpga-mgr.h> | ||
8 | |||
8 | struct fpga_bridge; | 9 | struct fpga_bridge; |
9 | 10 | ||
10 | /** | 11 | /** |
@@ -12,11 +13,13 @@ struct fpga_bridge; | |||
12 | * @enable_show: returns the FPGA bridge's status | 13 | * @enable_show: returns the FPGA bridge's status |
13 | * @enable_set: set a FPGA bridge as enabled or disabled | 14 | * @enable_set: set a FPGA bridge as enabled or disabled |
14 | * @fpga_bridge_remove: set FPGA into a specific state during driver remove | 15 | * @fpga_bridge_remove: set FPGA into a specific state during driver remove |
16 | * @groups: optional attribute groups. | ||
15 | */ | 17 | */ |
16 | struct fpga_bridge_ops { | 18 | struct fpga_bridge_ops { |
17 | int (*enable_show)(struct fpga_bridge *bridge); | 19 | int (*enable_show)(struct fpga_bridge *bridge); |
18 | int (*enable_set)(struct fpga_bridge *bridge, bool enable); | 20 | int (*enable_set)(struct fpga_bridge *bridge, bool enable); |
19 | void (*fpga_bridge_remove)(struct fpga_bridge *bridge); | 21 | void (*fpga_bridge_remove)(struct fpga_bridge *bridge); |
22 | const struct attribute_group **groups; | ||
20 | }; | 23 | }; |
21 | 24 | ||
22 | /** | 25 | /** |
@@ -43,6 +46,8 @@ struct fpga_bridge { | |||
43 | 46 | ||
44 | struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, | 47 | struct fpga_bridge *of_fpga_bridge_get(struct device_node *node, |
45 | struct fpga_image_info *info); | 48 | struct fpga_image_info *info); |
49 | struct fpga_bridge *fpga_bridge_get(struct device *dev, | ||
50 | struct fpga_image_info *info); | ||
46 | void fpga_bridge_put(struct fpga_bridge *bridge); | 51 | void fpga_bridge_put(struct fpga_bridge *bridge); |
47 | int fpga_bridge_enable(struct fpga_bridge *bridge); | 52 | int fpga_bridge_enable(struct fpga_bridge *bridge); |
48 | int fpga_bridge_disable(struct fpga_bridge *bridge); | 53 | int fpga_bridge_disable(struct fpga_bridge *bridge); |
@@ -50,9 +55,12 @@ int fpga_bridge_disable(struct fpga_bridge *bridge); | |||
50 | int fpga_bridges_enable(struct list_head *bridge_list); | 55 | int fpga_bridges_enable(struct list_head *bridge_list); |
51 | int fpga_bridges_disable(struct list_head *bridge_list); | 56 | int fpga_bridges_disable(struct list_head *bridge_list); |
52 | void fpga_bridges_put(struct list_head *bridge_list); | 57 | void fpga_bridges_put(struct list_head *bridge_list); |
53 | int fpga_bridge_get_to_list(struct device_node *np, | 58 | int fpga_bridge_get_to_list(struct device *dev, |
54 | struct fpga_image_info *info, | 59 | struct fpga_image_info *info, |
55 | struct list_head *bridge_list); | 60 | struct list_head *bridge_list); |
61 | int of_fpga_bridge_get_to_list(struct device_node *np, | ||
62 | struct fpga_image_info *info, | ||
63 | struct list_head *bridge_list); | ||
56 | 64 | ||
57 | int fpga_bridge_register(struct device *dev, const char *name, | 65 | int fpga_bridge_register(struct device *dev, const char *name, |
58 | const struct fpga_bridge_ops *br_ops, void *priv); | 66 | const struct fpga_bridge_ops *br_ops, void *priv); |
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h index bfa14bc023fb..3c6de23aabdf 100644 --- a/include/linux/fpga/fpga-mgr.h +++ b/include/linux/fpga/fpga-mgr.h | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * FPGA Framework | 2 | * FPGA Framework |
3 | * | 3 | * |
4 | * Copyright (C) 2013-2015 Altera Corporation | 4 | * Copyright (C) 2013-2016 Altera Corporation |
5 | * Copyright (C) 2017 Intel Corporation | ||
5 | * | 6 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 7 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms and conditions of the GNU General Public License, | 8 | * under the terms and conditions of the GNU General Public License, |
@@ -15,12 +16,12 @@ | |||
15 | * You should have received a copy of the GNU General Public License along with | 16 | * You should have received a copy of the GNU General Public License along with |
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | 17 | * this program. If not, see <http://www.gnu.org/licenses/>. |
17 | */ | 18 | */ |
18 | #include <linux/mutex.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | |||
21 | #ifndef _LINUX_FPGA_MGR_H | 19 | #ifndef _LINUX_FPGA_MGR_H |
22 | #define _LINUX_FPGA_MGR_H | 20 | #define _LINUX_FPGA_MGR_H |
23 | 21 | ||
22 | #include <linux/mutex.h> | ||
23 | #include <linux/platform_device.h> | ||
24 | |||
24 | struct fpga_manager; | 25 | struct fpga_manager; |
25 | struct sg_table; | 26 | struct sg_table; |
26 | 27 | ||
@@ -83,12 +84,26 @@ enum fpga_mgr_states { | |||
83 | * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) | 84 | * @disable_timeout_us: maximum time to disable traffic through bridge (uSec) |
84 | * @config_complete_timeout_us: maximum time for FPGA to switch to operating | 85 | * @config_complete_timeout_us: maximum time for FPGA to switch to operating |
85 | * status in the write_complete op. | 86 | * status in the write_complete op. |
87 | * @firmware_name: name of FPGA image firmware file | ||
88 | * @sgt: scatter/gather table containing FPGA image | ||
89 | * @buf: contiguous buffer containing FPGA image | ||
90 | * @count: size of buf | ||
91 | * @dev: device that owns this | ||
92 | * @overlay: Device Tree overlay | ||
86 | */ | 93 | */ |
87 | struct fpga_image_info { | 94 | struct fpga_image_info { |
88 | u32 flags; | 95 | u32 flags; |
89 | u32 enable_timeout_us; | 96 | u32 enable_timeout_us; |
90 | u32 disable_timeout_us; | 97 | u32 disable_timeout_us; |
91 | u32 config_complete_timeout_us; | 98 | u32 config_complete_timeout_us; |
99 | char *firmware_name; | ||
100 | struct sg_table *sgt; | ||
101 | const char *buf; | ||
102 | size_t count; | ||
103 | struct device *dev; | ||
104 | #ifdef CONFIG_OF | ||
105 | struct device_node *overlay; | ||
106 | #endif | ||
92 | }; | 107 | }; |
93 | 108 | ||
94 | /** | 109 | /** |
@@ -100,6 +115,7 @@ struct fpga_image_info { | |||
100 | * @write_sg: write the scatter list of configuration data to the FPGA | 115 | * @write_sg: write the scatter list of configuration data to the FPGA |
101 | * @write_complete: set FPGA to operating state after writing is done | 116 | * @write_complete: set FPGA to operating state after writing is done |
102 | * @fpga_remove: optional: Set FPGA into a specific state during driver remove | 117 | * @fpga_remove: optional: Set FPGA into a specific state during driver remove |
118 | * @groups: optional attribute groups. | ||
103 | * | 119 | * |
104 | * fpga_manager_ops are the low level functions implemented by a specific | 120 | * fpga_manager_ops are the low level functions implemented by a specific |
105 | * fpga manager driver. The optional ones are tested for NULL before being | 121 | * fpga manager driver. The optional ones are tested for NULL before being |
@@ -116,6 +132,7 @@ struct fpga_manager_ops { | |||
116 | int (*write_complete)(struct fpga_manager *mgr, | 132 | int (*write_complete)(struct fpga_manager *mgr, |
117 | struct fpga_image_info *info); | 133 | struct fpga_image_info *info); |
118 | void (*fpga_remove)(struct fpga_manager *mgr); | 134 | void (*fpga_remove)(struct fpga_manager *mgr); |
135 | const struct attribute_group **groups; | ||
119 | }; | 136 | }; |
120 | 137 | ||
121 | /** | 138 | /** |
@@ -138,14 +155,14 @@ struct fpga_manager { | |||
138 | 155 | ||
139 | #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) | 156 | #define to_fpga_manager(d) container_of(d, struct fpga_manager, dev) |
140 | 157 | ||
141 | int fpga_mgr_buf_load(struct fpga_manager *mgr, struct fpga_image_info *info, | 158 | struct fpga_image_info *fpga_image_info_alloc(struct device *dev); |
142 | const char *buf, size_t count); | 159 | |
143 | int fpga_mgr_buf_load_sg(struct fpga_manager *mgr, struct fpga_image_info *info, | 160 | void fpga_image_info_free(struct fpga_image_info *info); |
144 | struct sg_table *sgt); | 161 | |
162 | int fpga_mgr_load(struct fpga_manager *mgr, struct fpga_image_info *info); | ||
145 | 163 | ||
146 | int fpga_mgr_firmware_load(struct fpga_manager *mgr, | 164 | int fpga_mgr_lock(struct fpga_manager *mgr); |
147 | struct fpga_image_info *info, | 165 | void fpga_mgr_unlock(struct fpga_manager *mgr); |
148 | const char *image_name); | ||
149 | 166 | ||
150 | struct fpga_manager *of_fpga_mgr_get(struct device_node *node); | 167 | struct fpga_manager *of_fpga_mgr_get(struct device_node *node); |
151 | 168 | ||
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h new file mode 100644 index 000000000000..b6520318ab9c --- /dev/null +++ b/include/linux/fpga/fpga-region.h | |||
@@ -0,0 +1,40 @@ | |||
1 | #ifndef _FPGA_REGION_H | ||
2 | #define _FPGA_REGION_H | ||
3 | |||
4 | #include <linux/device.h> | ||
5 | #include <linux/fpga/fpga-mgr.h> | ||
6 | #include <linux/fpga/fpga-bridge.h> | ||
7 | |||
8 | /** | ||
9 | * struct fpga_region - FPGA Region structure | ||
10 | * @dev: FPGA Region device | ||
11 | * @mutex: enforces exclusive reference to region | ||
12 | * @bridge_list: list of FPGA bridges specified in region | ||
13 | * @mgr: FPGA manager | ||
14 | * @info: FPGA image info | ||
15 | * @priv: private data | ||
16 | * @get_bridges: optional function to get bridges to a list | ||
17 | * @groups: optional attribute groups. | ||
18 | */ | ||
19 | struct fpga_region { | ||
20 | struct device dev; | ||
21 | struct mutex mutex; /* for exclusive reference to region */ | ||
22 | struct list_head bridge_list; | ||
23 | struct fpga_manager *mgr; | ||
24 | struct fpga_image_info *info; | ||
25 | void *priv; | ||
26 | int (*get_bridges)(struct fpga_region *region); | ||
27 | const struct attribute_group **groups; | ||
28 | }; | ||
29 | |||
30 | #define to_fpga_region(d) container_of(d, struct fpga_region, dev) | ||
31 | |||
32 | struct fpga_region *fpga_region_class_find( | ||
33 | struct device *start, const void *data, | ||
34 | int (*match)(struct device *, const void *)); | ||
35 | |||
36 | int fpga_region_program_fpga(struct fpga_region *region); | ||
37 | int fpga_region_register(struct device *dev, struct fpga_region *region); | ||
38 | int fpga_region_unregister(struct fpga_region *region); | ||
39 | |||
40 | #endif /* _FPGA_REGION_H */ | ||
diff --git a/include/linux/fs.h b/include/linux/fs.h index 511fbaabf624..2a815560fda0 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -639,7 +639,7 @@ struct inode { | |||
639 | struct hlist_head i_dentry; | 639 | struct hlist_head i_dentry; |
640 | struct rcu_head i_rcu; | 640 | struct rcu_head i_rcu; |
641 | }; | 641 | }; |
642 | u64 i_version; | 642 | atomic64_t i_version; |
643 | atomic_t i_count; | 643 | atomic_t i_count; |
644 | atomic_t i_dio_count; | 644 | atomic_t i_dio_count; |
645 | atomic_t i_writecount; | 645 | atomic_t i_writecount; |
@@ -748,6 +748,11 @@ static inline void inode_lock_nested(struct inode *inode, unsigned subclass) | |||
748 | down_write_nested(&inode->i_rwsem, subclass); | 748 | down_write_nested(&inode->i_rwsem, subclass); |
749 | } | 749 | } |
750 | 750 | ||
751 | static inline void inode_lock_shared_nested(struct inode *inode, unsigned subclass) | ||
752 | { | ||
753 | down_read_nested(&inode->i_rwsem, subclass); | ||
754 | } | ||
755 | |||
751 | void lock_two_nondirectories(struct inode *, struct inode*); | 756 | void lock_two_nondirectories(struct inode *, struct inode*); |
752 | void unlock_two_nondirectories(struct inode *, struct inode*); | 757 | void unlock_two_nondirectories(struct inode *, struct inode*); |
753 | 758 | ||
@@ -1359,7 +1364,7 @@ struct super_block { | |||
1359 | 1364 | ||
1360 | const struct fscrypt_operations *s_cop; | 1365 | const struct fscrypt_operations *s_cop; |
1361 | 1366 | ||
1362 | struct hlist_bl_head s_anon; /* anonymous dentries for (nfs) exporting */ | 1367 | struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ |
1363 | struct list_head s_mounts; /* list of mounts; _not_ for fs use */ | 1368 | struct list_head s_mounts; /* list of mounts; _not_ for fs use */ |
1364 | struct block_device *s_bdev; | 1369 | struct block_device *s_bdev; |
1365 | struct backing_dev_info *s_bdi; | 1370 | struct backing_dev_info *s_bdi; |
@@ -1608,6 +1613,10 @@ extern int vfs_whiteout(struct inode *, struct dentry *); | |||
1608 | extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, | 1613 | extern struct dentry *vfs_tmpfile(struct dentry *dentry, umode_t mode, |
1609 | int open_flag); | 1614 | int open_flag); |
1610 | 1615 | ||
1616 | int vfs_mkobj(struct dentry *, umode_t, | ||
1617 | int (*f)(struct dentry *, umode_t, void *), | ||
1618 | void *); | ||
1619 | |||
1611 | /* | 1620 | /* |
1612 | * VFS file helper functions. | 1621 | * VFS file helper functions. |
1613 | */ | 1622 | */ |
@@ -1698,7 +1707,7 @@ struct file_operations { | |||
1698 | ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); | 1707 | ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); |
1699 | int (*iterate) (struct file *, struct dir_context *); | 1708 | int (*iterate) (struct file *, struct dir_context *); |
1700 | int (*iterate_shared) (struct file *, struct dir_context *); | 1709 | int (*iterate_shared) (struct file *, struct dir_context *); |
1701 | unsigned int (*poll) (struct file *, struct poll_table_struct *); | 1710 | __poll_t (*poll) (struct file *, struct poll_table_struct *); |
1702 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); | 1711 | long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long); |
1703 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); | 1712 | long (*compat_ioctl) (struct file *, unsigned int, unsigned long); |
1704 | int (*mmap) (struct file *, struct vm_area_struct *); | 1713 | int (*mmap) (struct file *, struct vm_area_struct *); |
@@ -2036,21 +2045,6 @@ static inline void inode_dec_link_count(struct inode *inode) | |||
2036 | mark_inode_dirty(inode); | 2045 | mark_inode_dirty(inode); |
2037 | } | 2046 | } |
2038 | 2047 | ||
2039 | /** | ||
2040 | * inode_inc_iversion - increments i_version | ||
2041 | * @inode: inode that need to be updated | ||
2042 | * | ||
2043 | * Every time the inode is modified, the i_version field will be incremented. | ||
2044 | * The filesystem has to be mounted with i_version flag | ||
2045 | */ | ||
2046 | |||
2047 | static inline void inode_inc_iversion(struct inode *inode) | ||
2048 | { | ||
2049 | spin_lock(&inode->i_lock); | ||
2050 | inode->i_version++; | ||
2051 | spin_unlock(&inode->i_lock); | ||
2052 | } | ||
2053 | |||
2054 | enum file_time_flags { | 2048 | enum file_time_flags { |
2055 | S_ATIME = 1, | 2049 | S_ATIME = 1, |
2056 | S_MTIME = 2, | 2050 | S_MTIME = 2, |
@@ -2699,7 +2693,6 @@ extern sector_t bmap(struct inode *, sector_t); | |||
2699 | #endif | 2693 | #endif |
2700 | extern int notify_change(struct dentry *, struct iattr *, struct inode **); | 2694 | extern int notify_change(struct dentry *, struct iattr *, struct inode **); |
2701 | extern int inode_permission(struct inode *, int); | 2695 | extern int inode_permission(struct inode *, int); |
2702 | extern int __inode_permission(struct inode *, int); | ||
2703 | extern int generic_permission(struct inode *, int); | 2696 | extern int generic_permission(struct inode *, int); |
2704 | extern int __check_sticky(struct inode *dir, struct inode *inode); | 2697 | extern int __check_sticky(struct inode *dir, struct inode *inode); |
2705 | 2698 | ||
@@ -2992,6 +2985,7 @@ enum { | |||
2992 | }; | 2985 | }; |
2993 | 2986 | ||
2994 | void dio_end_io(struct bio *bio); | 2987 | void dio_end_io(struct bio *bio); |
2988 | void dio_warn_stale_pagecache(struct file *filp); | ||
2995 | 2989 | ||
2996 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, | 2990 | ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode, |
2997 | struct block_device *bdev, struct iov_iter *iter, | 2991 | struct block_device *bdev, struct iov_iter *iter, |
@@ -3239,6 +3233,8 @@ static inline int kiocb_set_rw_flags(struct kiocb *ki, rwf_t flags) | |||
3239 | ki->ki_flags |= IOCB_DSYNC; | 3233 | ki->ki_flags |= IOCB_DSYNC; |
3240 | if (flags & RWF_SYNC) | 3234 | if (flags & RWF_SYNC) |
3241 | ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); | 3235 | ki->ki_flags |= (IOCB_DSYNC | IOCB_SYNC); |
3236 | if (flags & RWF_APPEND) | ||
3237 | ki->ki_flags |= IOCB_APPEND; | ||
3242 | return 0; | 3238 | return 0; |
3243 | } | 3239 | } |
3244 | 3240 | ||
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 08b4b40c5aa8..952ab97af325 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h | |||
@@ -14,42 +14,13 @@ | |||
14 | #ifndef _LINUX_FSCRYPT_H | 14 | #ifndef _LINUX_FSCRYPT_H |
15 | #define _LINUX_FSCRYPT_H | 15 | #define _LINUX_FSCRYPT_H |
16 | 16 | ||
17 | #include <linux/key.h> | ||
18 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
19 | #include <linux/mm.h> | ||
20 | #include <linux/bio.h> | ||
21 | #include <linux/dcache.h> | ||
22 | #include <crypto/skcipher.h> | ||
23 | #include <uapi/linux/fs.h> | ||
24 | 18 | ||
25 | #define FS_CRYPTO_BLOCK_SIZE 16 | 19 | #define FS_CRYPTO_BLOCK_SIZE 16 |
26 | 20 | ||
21 | struct fscrypt_ctx; | ||
27 | struct fscrypt_info; | 22 | struct fscrypt_info; |
28 | 23 | ||
29 | struct fscrypt_ctx { | ||
30 | union { | ||
31 | struct { | ||
32 | struct page *bounce_page; /* Ciphertext page */ | ||
33 | struct page *control_page; /* Original page */ | ||
34 | } w; | ||
35 | struct { | ||
36 | struct bio *bio; | ||
37 | struct work_struct work; | ||
38 | } r; | ||
39 | struct list_head free_list; /* Free list */ | ||
40 | }; | ||
41 | u8 flags; /* Flags */ | ||
42 | }; | ||
43 | |||
44 | /** | ||
45 | * For encrypted symlinks, the ciphertext length is stored at the beginning | ||
46 | * of the string in little-endian format. | ||
47 | */ | ||
48 | struct fscrypt_symlink_data { | ||
49 | __le16 len; | ||
50 | char encrypted_path[1]; | ||
51 | } __packed; | ||
52 | |||
53 | struct fscrypt_str { | 24 | struct fscrypt_str { |
54 | unsigned char *name; | 25 | unsigned char *name; |
55 | u32 len; | 26 | u32 len; |
@@ -68,89 +39,14 @@ struct fscrypt_name { | |||
68 | #define fname_name(p) ((p)->disk_name.name) | 39 | #define fname_name(p) ((p)->disk_name.name) |
69 | #define fname_len(p) ((p)->disk_name.len) | 40 | #define fname_len(p) ((p)->disk_name.len) |
70 | 41 | ||
71 | /* | ||
72 | * fscrypt superblock flags | ||
73 | */ | ||
74 | #define FS_CFLG_OWN_PAGES (1U << 1) | ||
75 | |||
76 | /* | ||
77 | * crypto opertions for filesystems | ||
78 | */ | ||
79 | struct fscrypt_operations { | ||
80 | unsigned int flags; | ||
81 | const char *key_prefix; | ||
82 | int (*get_context)(struct inode *, void *, size_t); | ||
83 | int (*set_context)(struct inode *, const void *, size_t, void *); | ||
84 | bool (*dummy_context)(struct inode *); | ||
85 | bool (*empty_dir)(struct inode *); | ||
86 | unsigned (*max_namelen)(struct inode *); | ||
87 | }; | ||
88 | |||
89 | /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ | 42 | /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ |
90 | #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 | 43 | #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 |
91 | 44 | ||
92 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
93 | { | ||
94 | if (inode->i_sb->s_cop->dummy_context && | ||
95 | inode->i_sb->s_cop->dummy_context(inode)) | ||
96 | return true; | ||
97 | return false; | ||
98 | } | ||
99 | |||
100 | static inline bool fscrypt_valid_enc_modes(u32 contents_mode, | ||
101 | u32 filenames_mode) | ||
102 | { | ||
103 | if (contents_mode == FS_ENCRYPTION_MODE_AES_128_CBC && | ||
104 | filenames_mode == FS_ENCRYPTION_MODE_AES_128_CTS) | ||
105 | return true; | ||
106 | |||
107 | if (contents_mode == FS_ENCRYPTION_MODE_AES_256_XTS && | ||
108 | filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS) | ||
109 | return true; | ||
110 | |||
111 | return false; | ||
112 | } | ||
113 | |||
114 | static inline bool fscrypt_is_dot_dotdot(const struct qstr *str) | ||
115 | { | ||
116 | if (str->len == 1 && str->name[0] == '.') | ||
117 | return true; | ||
118 | |||
119 | if (str->len == 2 && str->name[0] == '.' && str->name[1] == '.') | ||
120 | return true; | ||
121 | |||
122 | return false; | ||
123 | } | ||
124 | |||
125 | #if __FS_HAS_ENCRYPTION | 45 | #if __FS_HAS_ENCRYPTION |
126 | |||
127 | static inline struct page *fscrypt_control_page(struct page *page) | ||
128 | { | ||
129 | return ((struct fscrypt_ctx *)page_private(page))->w.control_page; | ||
130 | } | ||
131 | |||
132 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
133 | { | ||
134 | return (inode->i_crypt_info != NULL); | ||
135 | } | ||
136 | |||
137 | #include <linux/fscrypt_supp.h> | 46 | #include <linux/fscrypt_supp.h> |
138 | 47 | #else | |
139 | #else /* !__FS_HAS_ENCRYPTION */ | ||
140 | |||
141 | static inline struct page *fscrypt_control_page(struct page *page) | ||
142 | { | ||
143 | WARN_ON_ONCE(1); | ||
144 | return ERR_PTR(-EINVAL); | ||
145 | } | ||
146 | |||
147 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
148 | { | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | #include <linux/fscrypt_notsupp.h> | 48 | #include <linux/fscrypt_notsupp.h> |
153 | #endif /* __FS_HAS_ENCRYPTION */ | 49 | #endif |
154 | 50 | ||
155 | /** | 51 | /** |
156 | * fscrypt_require_key - require an inode's encryption key | 52 | * fscrypt_require_key - require an inode's encryption key |
@@ -291,4 +187,68 @@ static inline int fscrypt_prepare_setattr(struct dentry *dentry, | |||
291 | return 0; | 187 | return 0; |
292 | } | 188 | } |
293 | 189 | ||
190 | /** | ||
191 | * fscrypt_prepare_symlink - prepare to create a possibly-encrypted symlink | ||
192 | * @dir: directory in which the symlink is being created | ||
193 | * @target: plaintext symlink target | ||
194 | * @len: length of @target excluding null terminator | ||
195 | * @max_len: space the filesystem has available to store the symlink target | ||
196 | * @disk_link: (out) the on-disk symlink target being prepared | ||
197 | * | ||
198 | * This function computes the size the symlink target will require on-disk, | ||
199 | * stores it in @disk_link->len, and validates it against @max_len. An | ||
200 | * encrypted symlink may be longer than the original. | ||
201 | * | ||
202 | * Additionally, @disk_link->name is set to @target if the symlink will be | ||
203 | * unencrypted, but left NULL if the symlink will be encrypted. For encrypted | ||
204 | * symlinks, the filesystem must call fscrypt_encrypt_symlink() to create the | ||
205 | * on-disk target later. (The reason for the two-step process is that some | ||
206 | * filesystems need to know the size of the symlink target before creating the | ||
207 | * inode, e.g. to determine whether it will be a "fast" or "slow" symlink.) | ||
208 | * | ||
209 | * Return: 0 on success, -ENAMETOOLONG if the symlink target is too long, | ||
210 | * -ENOKEY if the encryption key is missing, or another -errno code if a problem | ||
211 | * occurred while setting up the encryption key. | ||
212 | */ | ||
213 | static inline int fscrypt_prepare_symlink(struct inode *dir, | ||
214 | const char *target, | ||
215 | unsigned int len, | ||
216 | unsigned int max_len, | ||
217 | struct fscrypt_str *disk_link) | ||
218 | { | ||
219 | if (IS_ENCRYPTED(dir) || fscrypt_dummy_context_enabled(dir)) | ||
220 | return __fscrypt_prepare_symlink(dir, len, max_len, disk_link); | ||
221 | |||
222 | disk_link->name = (unsigned char *)target; | ||
223 | disk_link->len = len + 1; | ||
224 | if (disk_link->len > max_len) | ||
225 | return -ENAMETOOLONG; | ||
226 | return 0; | ||
227 | } | ||
228 | |||
229 | /** | ||
230 | * fscrypt_encrypt_symlink - encrypt the symlink target if needed | ||
231 | * @inode: symlink inode | ||
232 | * @target: plaintext symlink target | ||
233 | * @len: length of @target excluding null terminator | ||
234 | * @disk_link: (in/out) the on-disk symlink target being prepared | ||
235 | * | ||
236 | * If the symlink target needs to be encrypted, then this function encrypts it | ||
237 | * into @disk_link->name. fscrypt_prepare_symlink() must have been called | ||
238 | * previously to compute @disk_link->len. If the filesystem did not allocate a | ||
239 | * buffer for @disk_link->name after calling fscrypt_prepare_link(), then one | ||
240 | * will be kmalloc()'ed and the filesystem will be responsible for freeing it. | ||
241 | * | ||
242 | * Return: 0 on success, -errno on failure | ||
243 | */ | ||
244 | static inline int fscrypt_encrypt_symlink(struct inode *inode, | ||
245 | const char *target, | ||
246 | unsigned int len, | ||
247 | struct fscrypt_str *disk_link) | ||
248 | { | ||
249 | if (IS_ENCRYPTED(inode)) | ||
250 | return __fscrypt_encrypt_symlink(inode, target, len, disk_link); | ||
251 | return 0; | ||
252 | } | ||
253 | |||
294 | #endif /* _LINUX_FSCRYPT_H */ | 254 | #endif /* _LINUX_FSCRYPT_H */ |
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h index 63e58808519a..44b50c04bae9 100644 --- a/include/linux/fscrypt_notsupp.h +++ b/include/linux/fscrypt_notsupp.h | |||
@@ -14,6 +14,16 @@ | |||
14 | #ifndef _LINUX_FSCRYPT_NOTSUPP_H | 14 | #ifndef _LINUX_FSCRYPT_NOTSUPP_H |
15 | #define _LINUX_FSCRYPT_NOTSUPP_H | 15 | #define _LINUX_FSCRYPT_NOTSUPP_H |
16 | 16 | ||
17 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
18 | { | ||
19 | return false; | ||
20 | } | ||
21 | |||
22 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
23 | { | ||
24 | return false; | ||
25 | } | ||
26 | |||
17 | /* crypto.c */ | 27 | /* crypto.c */ |
18 | static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, | 28 | static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, |
19 | gfp_t gfp_flags) | 29 | gfp_t gfp_flags) |
@@ -43,6 +53,11 @@ static inline int fscrypt_decrypt_page(const struct inode *inode, | |||
43 | return -EOPNOTSUPP; | 53 | return -EOPNOTSUPP; |
44 | } | 54 | } |
45 | 55 | ||
56 | static inline struct page *fscrypt_control_page(struct page *page) | ||
57 | { | ||
58 | WARN_ON_ONCE(1); | ||
59 | return ERR_PTR(-EINVAL); | ||
60 | } | ||
46 | 61 | ||
47 | static inline void fscrypt_restore_control_page(struct page *page) | 62 | static inline void fscrypt_restore_control_page(struct page *page) |
48 | { | 63 | { |
@@ -90,8 +105,7 @@ static inline int fscrypt_get_encryption_info(struct inode *inode) | |||
90 | return -EOPNOTSUPP; | 105 | return -EOPNOTSUPP; |
91 | } | 106 | } |
92 | 107 | ||
93 | static inline void fscrypt_put_encryption_info(struct inode *inode, | 108 | static inline void fscrypt_put_encryption_info(struct inode *inode) |
94 | struct fscrypt_info *ci) | ||
95 | { | 109 | { |
96 | return; | 110 | return; |
97 | } | 111 | } |
@@ -116,16 +130,8 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) | |||
116 | return; | 130 | return; |
117 | } | 131 | } |
118 | 132 | ||
119 | static inline u32 fscrypt_fname_encrypted_size(const struct inode *inode, | ||
120 | u32 ilen) | ||
121 | { | ||
122 | /* never happens */ | ||
123 | WARN_ON(1); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, | 133 | static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, |
128 | u32 ilen, | 134 | u32 max_encrypted_len, |
129 | struct fscrypt_str *crypto_str) | 135 | struct fscrypt_str *crypto_str) |
130 | { | 136 | { |
131 | return -EOPNOTSUPP; | 137 | return -EOPNOTSUPP; |
@@ -144,13 +150,6 @@ static inline int fscrypt_fname_disk_to_usr(struct inode *inode, | |||
144 | return -EOPNOTSUPP; | 150 | return -EOPNOTSUPP; |
145 | } | 151 | } |
146 | 152 | ||
147 | static inline int fscrypt_fname_usr_to_disk(struct inode *inode, | ||
148 | const struct qstr *iname, | ||
149 | struct fscrypt_str *oname) | ||
150 | { | ||
151 | return -EOPNOTSUPP; | ||
152 | } | ||
153 | |||
154 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | 153 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, |
155 | const u8 *de_name, u32 de_name_len) | 154 | const u8 *de_name, u32 de_name_len) |
156 | { | 155 | { |
@@ -208,4 +207,28 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir, | |||
208 | return -EOPNOTSUPP; | 207 | return -EOPNOTSUPP; |
209 | } | 208 | } |
210 | 209 | ||
210 | static inline int __fscrypt_prepare_symlink(struct inode *dir, | ||
211 | unsigned int len, | ||
212 | unsigned int max_len, | ||
213 | struct fscrypt_str *disk_link) | ||
214 | { | ||
215 | return -EOPNOTSUPP; | ||
216 | } | ||
217 | |||
218 | static inline int __fscrypt_encrypt_symlink(struct inode *inode, | ||
219 | const char *target, | ||
220 | unsigned int len, | ||
221 | struct fscrypt_str *disk_link) | ||
222 | { | ||
223 | return -EOPNOTSUPP; | ||
224 | } | ||
225 | |||
226 | static inline const char *fscrypt_get_symlink(struct inode *inode, | ||
227 | const void *caddr, | ||
228 | unsigned int max_size, | ||
229 | struct delayed_call *done) | ||
230 | { | ||
231 | return ERR_PTR(-EOPNOTSUPP); | ||
232 | } | ||
233 | |||
211 | #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ | 234 | #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ |
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h index cf9e9fc02f0a..477a7a6504d2 100644 --- a/include/linux/fscrypt_supp.h +++ b/include/linux/fscrypt_supp.h | |||
@@ -11,8 +11,54 @@ | |||
11 | #ifndef _LINUX_FSCRYPT_SUPP_H | 11 | #ifndef _LINUX_FSCRYPT_SUPP_H |
12 | #define _LINUX_FSCRYPT_SUPP_H | 12 | #define _LINUX_FSCRYPT_SUPP_H |
13 | 13 | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/slab.h> | ||
16 | |||
17 | /* | ||
18 | * fscrypt superblock flags | ||
19 | */ | ||
20 | #define FS_CFLG_OWN_PAGES (1U << 1) | ||
21 | |||
22 | /* | ||
23 | * crypto operations for filesystems | ||
24 | */ | ||
25 | struct fscrypt_operations { | ||
26 | unsigned int flags; | ||
27 | const char *key_prefix; | ||
28 | int (*get_context)(struct inode *, void *, size_t); | ||
29 | int (*set_context)(struct inode *, const void *, size_t, void *); | ||
30 | bool (*dummy_context)(struct inode *); | ||
31 | bool (*empty_dir)(struct inode *); | ||
32 | unsigned (*max_namelen)(struct inode *); | ||
33 | }; | ||
34 | |||
35 | struct fscrypt_ctx { | ||
36 | union { | ||
37 | struct { | ||
38 | struct page *bounce_page; /* Ciphertext page */ | ||
39 | struct page *control_page; /* Original page */ | ||
40 | } w; | ||
41 | struct { | ||
42 | struct bio *bio; | ||
43 | struct work_struct work; | ||
44 | } r; | ||
45 | struct list_head free_list; /* Free list */ | ||
46 | }; | ||
47 | u8 flags; /* Flags */ | ||
48 | }; | ||
49 | |||
50 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
51 | { | ||
52 | return (inode->i_crypt_info != NULL); | ||
53 | } | ||
54 | |||
55 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
56 | { | ||
57 | return inode->i_sb->s_cop->dummy_context && | ||
58 | inode->i_sb->s_cop->dummy_context(inode); | ||
59 | } | ||
60 | |||
14 | /* crypto.c */ | 61 | /* crypto.c */ |
15 | extern struct kmem_cache *fscrypt_info_cachep; | ||
16 | extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); | 62 | extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); |
17 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); | 63 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); |
18 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, | 64 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, |
@@ -20,6 +66,12 @@ extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, | |||
20 | u64, gfp_t); | 66 | u64, gfp_t); |
21 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, | 67 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, |
22 | unsigned int, u64); | 68 | unsigned int, u64); |
69 | |||
70 | static inline struct page *fscrypt_control_page(struct page *page) | ||
71 | { | ||
72 | return ((struct fscrypt_ctx *)page_private(page))->w.control_page; | ||
73 | } | ||
74 | |||
23 | extern void fscrypt_restore_control_page(struct page *); | 75 | extern void fscrypt_restore_control_page(struct page *); |
24 | 76 | ||
25 | extern const struct dentry_operations fscrypt_d_ops; | 77 | extern const struct dentry_operations fscrypt_d_ops; |
@@ -44,7 +96,7 @@ extern int fscrypt_inherit_context(struct inode *, struct inode *, | |||
44 | void *, bool); | 96 | void *, bool); |
45 | /* keyinfo.c */ | 97 | /* keyinfo.c */ |
46 | extern int fscrypt_get_encryption_info(struct inode *); | 98 | extern int fscrypt_get_encryption_info(struct inode *); |
47 | extern void fscrypt_put_encryption_info(struct inode *, struct fscrypt_info *); | 99 | extern void fscrypt_put_encryption_info(struct inode *); |
48 | 100 | ||
49 | /* fname.c */ | 101 | /* fname.c */ |
50 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, | 102 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, |
@@ -55,14 +107,11 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname) | |||
55 | kfree(fname->crypto_buf.name); | 107 | kfree(fname->crypto_buf.name); |
56 | } | 108 | } |
57 | 109 | ||
58 | extern u32 fscrypt_fname_encrypted_size(const struct inode *, u32); | ||
59 | extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, | 110 | extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, |
60 | struct fscrypt_str *); | 111 | struct fscrypt_str *); |
61 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); | 112 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); |
62 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, | 113 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, |
63 | const struct fscrypt_str *, struct fscrypt_str *); | 114 | const struct fscrypt_str *, struct fscrypt_str *); |
64 | extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *, | ||
65 | struct fscrypt_str *); | ||
66 | 115 | ||
67 | #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 | 116 | #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 |
68 | 117 | ||
@@ -153,5 +202,14 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir, | |||
153 | struct dentry *new_dentry, | 202 | struct dentry *new_dentry, |
154 | unsigned int flags); | 203 | unsigned int flags); |
155 | extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); | 204 | extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); |
205 | extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, | ||
206 | unsigned int max_len, | ||
207 | struct fscrypt_str *disk_link); | ||
208 | extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, | ||
209 | unsigned int len, | ||
210 | struct fscrypt_str *disk_link); | ||
211 | extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, | ||
212 | unsigned int max_size, | ||
213 | struct delayed_call *done); | ||
156 | 214 | ||
157 | #endif /* _LINUX_FSCRYPT_SUPP_H */ | 215 | #endif /* _LINUX_FSCRYPT_SUPP_H */ |
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h index 411a84c6c400..4fa1a489efe4 100644 --- a/include/linux/fwnode.h +++ b/include/linux/fwnode.h | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
16 | 16 | ||
17 | struct fwnode_operations; | 17 | struct fwnode_operations; |
18 | struct device; | ||
18 | 19 | ||
19 | struct fwnode_handle { | 20 | struct fwnode_handle { |
20 | struct fwnode_handle *secondary; | 21 | struct fwnode_handle *secondary; |
@@ -51,6 +52,7 @@ struct fwnode_reference_args { | |||
51 | * struct fwnode_operations - Operations for fwnode interface | 52 | * struct fwnode_operations - Operations for fwnode interface |
52 | * @get: Get a reference to an fwnode. | 53 | * @get: Get a reference to an fwnode. |
53 | * @put: Put a reference to an fwnode. | 54 | * @put: Put a reference to an fwnode. |
55 | * @device_get_match_data: Return the device driver match data. | ||
54 | * @property_present: Return true if a property is present. | 56 | * @property_present: Return true if a property is present. |
55 | * @property_read_integer_array: Read an array of integer properties. Return | 57 | * @property_read_integer_array: Read an array of integer properties. Return |
56 | * zero on success, a negative error code | 58 | * zero on success, a negative error code |
@@ -71,6 +73,8 @@ struct fwnode_operations { | |||
71 | struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); | 73 | struct fwnode_handle *(*get)(struct fwnode_handle *fwnode); |
72 | void (*put)(struct fwnode_handle *fwnode); | 74 | void (*put)(struct fwnode_handle *fwnode); |
73 | bool (*device_is_available)(const struct fwnode_handle *fwnode); | 75 | bool (*device_is_available)(const struct fwnode_handle *fwnode); |
76 | void *(*device_get_match_data)(const struct fwnode_handle *fwnode, | ||
77 | const struct device *dev); | ||
74 | bool (*property_present)(const struct fwnode_handle *fwnode, | 78 | bool (*property_present)(const struct fwnode_handle *fwnode, |
75 | const char *propname); | 79 | const char *propname); |
76 | int (*property_read_int_array)(const struct fwnode_handle *fwnode, | 80 | int (*property_read_int_array)(const struct fwnode_handle *fwnode, |
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index ecc2928e8046..bc738504ab4a 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h | |||
@@ -31,8 +31,7 @@ extern wait_queue_head_t genl_sk_destructing_waitq; | |||
31 | * @p: The pointer to read, prior to dereferencing | 31 | * @p: The pointer to read, prior to dereferencing |
32 | * | 32 | * |
33 | * Return the value of the specified RCU-protected pointer, but omit | 33 | * Return the value of the specified RCU-protected pointer, but omit |
34 | * both the smp_read_barrier_depends() and the READ_ONCE(), because | 34 | * the READ_ONCE(), because caller holds genl mutex. |
35 | * caller holds genl mutex. | ||
36 | */ | 35 | */ |
37 | #define genl_dereference(p) \ | 36 | #define genl_dereference(p) \ |
38 | rcu_dereference_protected(p, lockdep_genl_is_held()) | 37 | rcu_dereference_protected(p, lockdep_genl_is_held()) |
diff --git a/include/linux/genhd.h b/include/linux/genhd.h index 5144ebe046c9..5e3531027b51 100644 --- a/include/linux/genhd.h +++ b/include/linux/genhd.h | |||
@@ -395,6 +395,11 @@ static inline void add_disk(struct gendisk *disk) | |||
395 | { | 395 | { |
396 | device_add_disk(NULL, disk); | 396 | device_add_disk(NULL, disk); |
397 | } | 397 | } |
398 | extern void device_add_disk_no_queue_reg(struct device *parent, struct gendisk *disk); | ||
399 | static inline void add_disk_no_queue_reg(struct gendisk *disk) | ||
400 | { | ||
401 | device_add_disk_no_queue_reg(NULL, disk); | ||
402 | } | ||
398 | 403 | ||
399 | extern void del_gendisk(struct gendisk *gp); | 404 | extern void del_gendisk(struct gendisk *gp); |
400 | extern struct gendisk *get_gendisk(dev_t dev, int *partno); | 405 | extern struct gendisk *get_gendisk(dev_t dev, int *partno); |
diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h index 604967609e55..83f81ac53282 100644 --- a/include/linux/genl_magic_func.h +++ b/include/linux/genl_magic_func.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #ifndef GENL_MAGIC_FUNC_H | 2 | #ifndef GENL_MAGIC_FUNC_H |
3 | #define GENL_MAGIC_FUNC_H | 3 | #define GENL_MAGIC_FUNC_H |
4 | 4 | ||
5 | #include <linux/build_bug.h> | ||
5 | #include <linux/genl_magic_struct.h> | 6 | #include <linux/genl_magic_struct.h> |
6 | 7 | ||
7 | /* | 8 | /* |
@@ -132,17 +133,6 @@ static void dprint_array(const char *dir, int nla_type, | |||
132 | * use one static buffer for parsing of nested attributes */ | 133 | * use one static buffer for parsing of nested attributes */ |
133 | static struct nlattr *nested_attr_tb[128]; | 134 | static struct nlattr *nested_attr_tb[128]; |
134 | 135 | ||
135 | #ifndef BUILD_BUG_ON | ||
136 | /* Force a compilation error if condition is true */ | ||
137 | #define BUILD_BUG_ON(condition) ((void)BUILD_BUG_ON_ZERO(condition)) | ||
138 | /* Force a compilation error if condition is true, but also produce a | ||
139 | result (of value 0 and type size_t), so the expression can be used | ||
140 | e.g. in a structure initializer (or where-ever else comma expressions | ||
141 | aren't permitted). */ | ||
142 | #define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) | ||
143 | #define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) | ||
144 | #endif | ||
145 | |||
146 | #undef GENL_struct | 136 | #undef GENL_struct |
147 | #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ | 137 | #define GENL_struct(tag_name, tag_number, s_name, s_fields) \ |
148 | /* *_from_attrs functions are static, but potentially unused */ \ | 138 | /* *_from_attrs functions are static, but potentially unused */ \ |
diff --git a/include/linux/gpio.h b/include/linux/gpio.h index 8ef7fc0ce0f0..91ed23468530 100644 --- a/include/linux/gpio.h +++ b/include/linux/gpio.h | |||
@@ -1,4 +1,14 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | ||
3 | * <linux/gpio.h> | ||
4 | * | ||
5 | * This is the LEGACY GPIO bulk include file, including legacy APIs. It is | ||
6 | * used for GPIO drivers still referencing the global GPIO numberspace, | ||
7 | * and should not be included in new code. | ||
8 | * | ||
9 | * If you're implementing a GPIO driver, only include <linux/gpio/driver.h> | ||
10 | * If you're implementing a GPIO consumer, only include <linux/gpio/consumer.h> | ||
11 | */ | ||
2 | #ifndef __LINUX_GPIO_H | 12 | #ifndef __LINUX_GPIO_H |
3 | #define __LINUX_GPIO_H | 13 | #define __LINUX_GPIO_H |
4 | 14 | ||
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h index 7447d85dbe2f..dbd065963296 100644 --- a/include/linux/gpio/consumer.h +++ b/include/linux/gpio/consumer.h | |||
@@ -139,6 +139,7 @@ void gpiod_set_raw_array_value_cansleep(unsigned int array_size, | |||
139 | int *value_array); | 139 | int *value_array); |
140 | 140 | ||
141 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); | 141 | int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce); |
142 | int gpiod_set_transitory(struct gpio_desc *desc, bool transitory); | ||
142 | 143 | ||
143 | int gpiod_is_active_low(const struct gpio_desc *desc); | 144 | int gpiod_is_active_low(const struct gpio_desc *desc); |
144 | int gpiod_cansleep(const struct gpio_desc *desc); | 145 | int gpiod_cansleep(const struct gpio_desc *desc); |
@@ -150,8 +151,14 @@ struct gpio_desc *gpio_to_desc(unsigned gpio); | |||
150 | int desc_to_gpio(const struct gpio_desc *desc); | 151 | int desc_to_gpio(const struct gpio_desc *desc); |
151 | 152 | ||
152 | /* Child properties interface */ | 153 | /* Child properties interface */ |
154 | struct device_node; | ||
153 | struct fwnode_handle; | 155 | struct fwnode_handle; |
154 | 156 | ||
157 | struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, | ||
158 | struct device_node *node, | ||
159 | const char *propname, int index, | ||
160 | enum gpiod_flags dflags, | ||
161 | const char *label); | ||
155 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, | 162 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, |
156 | const char *propname, int index, | 163 | const char *propname, int index, |
157 | enum gpiod_flags dflags, | 164 | enum gpiod_flags dflags, |
@@ -431,6 +438,13 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) | |||
431 | return -ENOSYS; | 438 | return -ENOSYS; |
432 | } | 439 | } |
433 | 440 | ||
441 | static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory) | ||
442 | { | ||
443 | /* GPIO can never have been requested */ | ||
444 | WARN_ON(1); | ||
445 | return -ENOSYS; | ||
446 | } | ||
447 | |||
434 | static inline int gpiod_is_active_low(const struct gpio_desc *desc) | 448 | static inline int gpiod_is_active_low(const struct gpio_desc *desc) |
435 | { | 449 | { |
436 | /* GPIO can never have been requested */ | 450 | /* GPIO can never have been requested */ |
@@ -464,9 +478,20 @@ static inline int desc_to_gpio(const struct gpio_desc *desc) | |||
464 | } | 478 | } |
465 | 479 | ||
466 | /* Child properties interface */ | 480 | /* Child properties interface */ |
481 | struct device_node; | ||
467 | struct fwnode_handle; | 482 | struct fwnode_handle; |
468 | 483 | ||
469 | static inline | 484 | static inline |
485 | struct gpio_desc *devm_gpiod_get_from_of_node(struct device *dev, | ||
486 | struct device_node *node, | ||
487 | const char *propname, int index, | ||
488 | enum gpiod_flags dflags, | ||
489 | const char *label) | ||
490 | { | ||
491 | return ERR_PTR(-ENOSYS); | ||
492 | } | ||
493 | |||
494 | static inline | ||
470 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, | 495 | struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode, |
471 | const char *propname, int index, | 496 | const char *propname, int index, |
472 | enum gpiod_flags dflags, | 497 | enum gpiod_flags dflags, |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 7258cd676df4..1ba9a331ec51 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
@@ -436,6 +436,9 @@ int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip, | |||
436 | struct lock_class_key *lock_key, | 436 | struct lock_class_key *lock_key, |
437 | struct lock_class_key *request_key); | 437 | struct lock_class_key *request_key); |
438 | 438 | ||
439 | bool gpiochip_irqchip_irq_valid(const struct gpio_chip *gpiochip, | ||
440 | unsigned int offset); | ||
441 | |||
439 | #ifdef CONFIG_LOCKDEP | 442 | #ifdef CONFIG_LOCKDEP |
440 | 443 | ||
441 | /* | 444 | /* |
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index 846be7c69a52..b2f2dc638463 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h | |||
@@ -10,8 +10,8 @@ enum gpio_lookup_flags { | |||
10 | GPIO_ACTIVE_LOW = (1 << 0), | 10 | GPIO_ACTIVE_LOW = (1 << 0), |
11 | GPIO_OPEN_DRAIN = (1 << 1), | 11 | GPIO_OPEN_DRAIN = (1 << 1), |
12 | GPIO_OPEN_SOURCE = (1 << 2), | 12 | GPIO_OPEN_SOURCE = (1 << 2), |
13 | GPIO_SLEEP_MAINTAIN_VALUE = (0 << 3), | 13 | GPIO_PERSISTENT = (0 << 3), |
14 | GPIO_SLEEP_MAY_LOSE_VALUE = (1 << 3), | 14 | GPIO_TRANSITORY = (1 << 3), |
15 | }; | 15 | }; |
16 | 16 | ||
17 | /** | 17 | /** |
diff --git a/include/linux/hid.h b/include/linux/hid.h index d491027a7c22..091a81cf330f 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
@@ -281,6 +281,7 @@ struct hid_item { | |||
281 | 281 | ||
282 | #define HID_DG_DEVICECONFIG 0x000d000e | 282 | #define HID_DG_DEVICECONFIG 0x000d000e |
283 | #define HID_DG_DEVICESETTINGS 0x000d0023 | 283 | #define HID_DG_DEVICESETTINGS 0x000d0023 |
284 | #define HID_DG_AZIMUTH 0x000d003f | ||
284 | #define HID_DG_CONFIDENCE 0x000d0047 | 285 | #define HID_DG_CONFIDENCE 0x000d0047 |
285 | #define HID_DG_WIDTH 0x000d0048 | 286 | #define HID_DG_WIDTH 0x000d0048 |
286 | #define HID_DG_HEIGHT 0x000d0049 | 287 | #define HID_DG_HEIGHT 0x000d0049 |
@@ -342,6 +343,7 @@ struct hid_item { | |||
342 | #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 | 343 | #define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000 |
343 | #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 | 344 | #define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000 |
344 | #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 | 345 | #define HID_QUIRK_NO_OUTPUT_REPORTS_ON_INTR_EP 0x00040000 |
346 | #define HID_QUIRK_HAVE_SPECIAL_DRIVER 0x00080000 | ||
345 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 | 347 | #define HID_QUIRK_FULLSPEED_INTERVAL 0x10000000 |
346 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 | 348 | #define HID_QUIRK_NO_INIT_REPORTS 0x20000000 |
347 | #define HID_QUIRK_NO_IGNORE 0x40000000 | 349 | #define HID_QUIRK_NO_IGNORE 0x40000000 |
@@ -671,6 +673,7 @@ struct hid_usage_id { | |||
671 | * to be called) | 673 | * to be called) |
672 | * @dyn_list: list of dynamically added device ids | 674 | * @dyn_list: list of dynamically added device ids |
673 | * @dyn_lock: lock protecting @dyn_list | 675 | * @dyn_lock: lock protecting @dyn_list |
676 | * @match: check if the given device is handled by this driver | ||
674 | * @probe: new device inserted | 677 | * @probe: new device inserted |
675 | * @remove: device removed (NULL if not a hot-plug capable driver) | 678 | * @remove: device removed (NULL if not a hot-plug capable driver) |
676 | * @report_table: on which reports to call raw_event (NULL means all) | 679 | * @report_table: on which reports to call raw_event (NULL means all) |
@@ -683,6 +686,8 @@ struct hid_usage_id { | |||
683 | * @input_mapped: invoked on input registering after mapping an usage | 686 | * @input_mapped: invoked on input registering after mapping an usage |
684 | * @input_configured: invoked just before the device is registered | 687 | * @input_configured: invoked just before the device is registered |
685 | * @feature_mapping: invoked on feature registering | 688 | * @feature_mapping: invoked on feature registering |
689 | * @bus_add_driver: invoked when a HID driver is about to be added | ||
690 | * @bus_removed_driver: invoked when a HID driver has been removed | ||
686 | * @suspend: invoked on suspend (NULL means nop) | 691 | * @suspend: invoked on suspend (NULL means nop) |
687 | * @resume: invoked on resume if device was not reset (NULL means nop) | 692 | * @resume: invoked on resume if device was not reset (NULL means nop) |
688 | * @reset_resume: invoked on resume if device was reset (NULL means nop) | 693 | * @reset_resume: invoked on resume if device was reset (NULL means nop) |
@@ -711,6 +716,7 @@ struct hid_driver { | |||
711 | struct list_head dyn_list; | 716 | struct list_head dyn_list; |
712 | spinlock_t dyn_lock; | 717 | spinlock_t dyn_lock; |
713 | 718 | ||
719 | bool (*match)(struct hid_device *dev, bool ignore_special_driver); | ||
714 | int (*probe)(struct hid_device *dev, const struct hid_device_id *id); | 720 | int (*probe)(struct hid_device *dev, const struct hid_device_id *id); |
715 | void (*remove)(struct hid_device *dev); | 721 | void (*remove)(struct hid_device *dev); |
716 | 722 | ||
@@ -736,6 +742,8 @@ struct hid_driver { | |||
736 | void (*feature_mapping)(struct hid_device *hdev, | 742 | void (*feature_mapping)(struct hid_device *hdev, |
737 | struct hid_field *field, | 743 | struct hid_field *field, |
738 | struct hid_usage *usage); | 744 | struct hid_usage *usage); |
745 | void (*bus_add_driver)(struct hid_driver *driver); | ||
746 | void (*bus_removed_driver)(struct hid_driver *driver); | ||
739 | #ifdef CONFIG_PM | 747 | #ifdef CONFIG_PM |
740 | int (*suspend)(struct hid_device *hdev, pm_message_t message); | 748 | int (*suspend)(struct hid_device *hdev, pm_message_t message); |
741 | int (*resume)(struct hid_device *hdev); | 749 | int (*resume)(struct hid_device *hdev); |
@@ -814,6 +822,8 @@ extern bool hid_ignore(struct hid_device *); | |||
814 | extern int hid_add_device(struct hid_device *); | 822 | extern int hid_add_device(struct hid_device *); |
815 | extern void hid_destroy_device(struct hid_device *); | 823 | extern void hid_destroy_device(struct hid_device *); |
816 | 824 | ||
825 | extern struct bus_type hid_bus_type; | ||
826 | |||
817 | extern int __must_check __hid_register_driver(struct hid_driver *, | 827 | extern int __must_check __hid_register_driver(struct hid_driver *, |
818 | struct module *, const char *mod_name); | 828 | struct module *, const char *mod_name); |
819 | 829 | ||
@@ -860,8 +870,12 @@ int hid_open_report(struct hid_device *device); | |||
860 | int hid_check_keys_pressed(struct hid_device *hid); | 870 | int hid_check_keys_pressed(struct hid_device *hid); |
861 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); | 871 | int hid_connect(struct hid_device *hid, unsigned int connect_mask); |
862 | void hid_disconnect(struct hid_device *hid); | 872 | void hid_disconnect(struct hid_device *hid); |
863 | const struct hid_device_id *hid_match_id(struct hid_device *hdev, | 873 | bool hid_match_one_id(const struct hid_device *hdev, |
874 | const struct hid_device_id *id); | ||
875 | const struct hid_device_id *hid_match_id(const struct hid_device *hdev, | ||
864 | const struct hid_device_id *id); | 876 | const struct hid_device_id *id); |
877 | const struct hid_device_id *hid_match_device(struct hid_device *hdev, | ||
878 | struct hid_driver *hdrv); | ||
865 | s32 hid_snto32(__u32 value, unsigned n); | 879 | s32 hid_snto32(__u32 value, unsigned n); |
866 | __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, | 880 | __u32 hid_field_extract(const struct hid_device *hid, __u8 *report, |
867 | unsigned offset, unsigned n); | 881 | unsigned offset, unsigned n); |
@@ -1098,9 +1112,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, | |||
1098 | int interrupt); | 1112 | int interrupt); |
1099 | 1113 | ||
1100 | /* HID quirks API */ | 1114 | /* HID quirks API */ |
1101 | u32 usbhid_lookup_quirk(const u16 idVendor, const u16 idProduct); | 1115 | unsigned long hid_lookup_quirk(const struct hid_device *hdev); |
1102 | int usbhid_quirks_init(char **quirks_param); | 1116 | int hid_quirks_init(char **quirks_param, __u16 bus, int count); |
1103 | void usbhid_quirks_exit(void); | 1117 | void hid_quirks_exit(__u16 bus); |
1104 | 1118 | ||
1105 | #ifdef CONFIG_HID_PID | 1119 | #ifdef CONFIG_HID_PID |
1106 | int hid_pidff_init(struct hid_device *hid); | 1120 | int hid_pidff_init(struct hid_device *hid); |
diff --git a/include/linux/hil_mlc.h b/include/linux/hil_mlc.h index 394a8405dd74..774f7d3b8f6a 100644 --- a/include/linux/hil_mlc.h +++ b/include/linux/hil_mlc.h | |||
@@ -144,12 +144,12 @@ struct hil_mlc { | |||
144 | hil_packet ipacket[16]; | 144 | hil_packet ipacket[16]; |
145 | hil_packet imatch; | 145 | hil_packet imatch; |
146 | int icount; | 146 | int icount; |
147 | struct timeval instart; | 147 | unsigned long instart; |
148 | suseconds_t intimeout; | 148 | unsigned long intimeout; |
149 | 149 | ||
150 | int ddi; /* Last operational device id */ | 150 | int ddi; /* Last operational device id */ |
151 | int lcv; /* LCV to throttle loops */ | 151 | int lcv; /* LCV to throttle loops */ |
152 | struct timeval lcv_tv; /* Time loop was started */ | 152 | time64_t lcv_time; /* Time loop was started */ |
153 | 153 | ||
154 | int di_map[7]; /* Maps below items to live devs */ | 154 | int di_map[7]; /* Maps below items to live devs */ |
155 | struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; | 155 | struct hil_mlc_devinfo di[HIL_MLC_DEVMEM]; |
diff --git a/include/linux/hp_sdc.h b/include/linux/hp_sdc.h index d392975d8887..6f1dee7e67e0 100644 --- a/include/linux/hp_sdc.h +++ b/include/linux/hp_sdc.h | |||
@@ -281,7 +281,7 @@ typedef struct { | |||
281 | hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ | 281 | hp_sdc_transaction *tq[HP_SDC_QUEUE_LEN]; /* All pending read/writes */ |
282 | 282 | ||
283 | int rcurr, rqty; /* Current read transact in process */ | 283 | int rcurr, rqty; /* Current read transact in process */ |
284 | struct timeval rtv; /* Time when current read started */ | 284 | ktime_t rtime; /* Time when current read started */ |
285 | int wcurr; /* Current write transact in process */ | 285 | int wcurr; /* Current write transact in process */ |
286 | 286 | ||
287 | int dev_err; /* carries status from registration */ | 287 | int dev_err; /* carries status from registration */ |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 012c37fdb688..c7902ca7c9f4 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -28,13 +28,29 @@ struct hrtimer_cpu_base; | |||
28 | 28 | ||
29 | /* | 29 | /* |
30 | * Mode arguments of xxx_hrtimer functions: | 30 | * Mode arguments of xxx_hrtimer functions: |
31 | * | ||
32 | * HRTIMER_MODE_ABS - Time value is absolute | ||
33 | * HRTIMER_MODE_REL - Time value is relative to now | ||
34 | * HRTIMER_MODE_PINNED - Timer is bound to CPU (is only considered | ||
35 | * when starting the timer) | ||
36 | * HRTIMER_MODE_SOFT - Timer callback function will be executed in | ||
37 | * soft irq context | ||
31 | */ | 38 | */ |
32 | enum hrtimer_mode { | 39 | enum hrtimer_mode { |
33 | HRTIMER_MODE_ABS = 0x0, /* Time value is absolute */ | 40 | HRTIMER_MODE_ABS = 0x00, |
34 | HRTIMER_MODE_REL = 0x1, /* Time value is relative to now */ | 41 | HRTIMER_MODE_REL = 0x01, |
35 | HRTIMER_MODE_PINNED = 0x02, /* Timer is bound to CPU */ | 42 | HRTIMER_MODE_PINNED = 0x02, |
36 | HRTIMER_MODE_ABS_PINNED = 0x02, | 43 | HRTIMER_MODE_SOFT = 0x04, |
37 | HRTIMER_MODE_REL_PINNED = 0x03, | 44 | |
45 | HRTIMER_MODE_ABS_PINNED = HRTIMER_MODE_ABS | HRTIMER_MODE_PINNED, | ||
46 | HRTIMER_MODE_REL_PINNED = HRTIMER_MODE_REL | HRTIMER_MODE_PINNED, | ||
47 | |||
48 | HRTIMER_MODE_ABS_SOFT = HRTIMER_MODE_ABS | HRTIMER_MODE_SOFT, | ||
49 | HRTIMER_MODE_REL_SOFT = HRTIMER_MODE_REL | HRTIMER_MODE_SOFT, | ||
50 | |||
51 | HRTIMER_MODE_ABS_PINNED_SOFT = HRTIMER_MODE_ABS_PINNED | HRTIMER_MODE_SOFT, | ||
52 | HRTIMER_MODE_REL_PINNED_SOFT = HRTIMER_MODE_REL_PINNED | HRTIMER_MODE_SOFT, | ||
53 | |||
38 | }; | 54 | }; |
39 | 55 | ||
40 | /* | 56 | /* |
@@ -87,6 +103,7 @@ enum hrtimer_restart { | |||
87 | * @base: pointer to the timer base (per cpu and per clock) | 103 | * @base: pointer to the timer base (per cpu and per clock) |
88 | * @state: state information (See bit values above) | 104 | * @state: state information (See bit values above) |
89 | * @is_rel: Set if the timer was armed relative | 105 | * @is_rel: Set if the timer was armed relative |
106 | * @is_soft: Set if hrtimer will be expired in soft interrupt context. | ||
90 | * | 107 | * |
91 | * The hrtimer structure must be initialized by hrtimer_init() | 108 | * The hrtimer structure must be initialized by hrtimer_init() |
92 | */ | 109 | */ |
@@ -97,6 +114,7 @@ struct hrtimer { | |||
97 | struct hrtimer_clock_base *base; | 114 | struct hrtimer_clock_base *base; |
98 | u8 state; | 115 | u8 state; |
99 | u8 is_rel; | 116 | u8 is_rel; |
117 | u8 is_soft; | ||
100 | }; | 118 | }; |
101 | 119 | ||
102 | /** | 120 | /** |
@@ -112,9 +130,9 @@ struct hrtimer_sleeper { | |||
112 | }; | 130 | }; |
113 | 131 | ||
114 | #ifdef CONFIG_64BIT | 132 | #ifdef CONFIG_64BIT |
115 | # define HRTIMER_CLOCK_BASE_ALIGN 64 | 133 | # define __hrtimer_clock_base_align ____cacheline_aligned |
116 | #else | 134 | #else |
117 | # define HRTIMER_CLOCK_BASE_ALIGN 32 | 135 | # define __hrtimer_clock_base_align |
118 | #endif | 136 | #endif |
119 | 137 | ||
120 | /** | 138 | /** |
@@ -123,48 +141,57 @@ struct hrtimer_sleeper { | |||
123 | * @index: clock type index for per_cpu support when moving a | 141 | * @index: clock type index for per_cpu support when moving a |
124 | * timer to a base on another cpu. | 142 | * timer to a base on another cpu. |
125 | * @clockid: clock id for per_cpu support | 143 | * @clockid: clock id for per_cpu support |
144 | * @seq: seqcount around __run_hrtimer | ||
145 | * @running: pointer to the currently running hrtimer | ||
126 | * @active: red black tree root node for the active timers | 146 | * @active: red black tree root node for the active timers |
127 | * @get_time: function to retrieve the current time of the clock | 147 | * @get_time: function to retrieve the current time of the clock |
128 | * @offset: offset of this clock to the monotonic base | 148 | * @offset: offset of this clock to the monotonic base |
129 | */ | 149 | */ |
130 | struct hrtimer_clock_base { | 150 | struct hrtimer_clock_base { |
131 | struct hrtimer_cpu_base *cpu_base; | 151 | struct hrtimer_cpu_base *cpu_base; |
132 | int index; | 152 | unsigned int index; |
133 | clockid_t clockid; | 153 | clockid_t clockid; |
154 | seqcount_t seq; | ||
155 | struct hrtimer *running; | ||
134 | struct timerqueue_head active; | 156 | struct timerqueue_head active; |
135 | ktime_t (*get_time)(void); | 157 | ktime_t (*get_time)(void); |
136 | ktime_t offset; | 158 | ktime_t offset; |
137 | } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); | 159 | } __hrtimer_clock_base_align; |
138 | 160 | ||
139 | enum hrtimer_base_type { | 161 | enum hrtimer_base_type { |
140 | HRTIMER_BASE_MONOTONIC, | 162 | HRTIMER_BASE_MONOTONIC, |
141 | HRTIMER_BASE_REALTIME, | 163 | HRTIMER_BASE_REALTIME, |
142 | HRTIMER_BASE_BOOTTIME, | 164 | HRTIMER_BASE_BOOTTIME, |
143 | HRTIMER_BASE_TAI, | 165 | HRTIMER_BASE_TAI, |
166 | HRTIMER_BASE_MONOTONIC_SOFT, | ||
167 | HRTIMER_BASE_REALTIME_SOFT, | ||
168 | HRTIMER_BASE_BOOTTIME_SOFT, | ||
169 | HRTIMER_BASE_TAI_SOFT, | ||
144 | HRTIMER_MAX_CLOCK_BASES, | 170 | HRTIMER_MAX_CLOCK_BASES, |
145 | }; | 171 | }; |
146 | 172 | ||
147 | /* | 173 | /** |
148 | * struct hrtimer_cpu_base - the per cpu clock bases | 174 | * struct hrtimer_cpu_base - the per cpu clock bases |
149 | * @lock: lock protecting the base and associated clock bases | 175 | * @lock: lock protecting the base and associated clock bases |
150 | * and timers | 176 | * and timers |
151 | * @seq: seqcount around __run_hrtimer | ||
152 | * @running: pointer to the currently running hrtimer | ||
153 | * @cpu: cpu number | 177 | * @cpu: cpu number |
154 | * @active_bases: Bitfield to mark bases with active timers | 178 | * @active_bases: Bitfield to mark bases with active timers |
155 | * @clock_was_set_seq: Sequence counter of clock was set events | 179 | * @clock_was_set_seq: Sequence counter of clock was set events |
156 | * @migration_enabled: The migration of hrtimers to other cpus is enabled | ||
157 | * @nohz_active: The nohz functionality is enabled | ||
158 | * @expires_next: absolute time of the next event which was scheduled | ||
159 | * via clock_set_next_event() | ||
160 | * @next_timer: Pointer to the first expiring timer | ||
161 | * @in_hrtirq: hrtimer_interrupt() is currently executing | ||
162 | * @hres_active: State of high resolution mode | 180 | * @hres_active: State of high resolution mode |
181 | * @in_hrtirq: hrtimer_interrupt() is currently executing | ||
163 | * @hang_detected: The last hrtimer interrupt detected a hang | 182 | * @hang_detected: The last hrtimer interrupt detected a hang |
183 | * @softirq_activated: displays, if the softirq is raised - update of softirq | ||
184 | * related settings is not required then. | ||
164 | * @nr_events: Total number of hrtimer interrupt events | 185 | * @nr_events: Total number of hrtimer interrupt events |
165 | * @nr_retries: Total number of hrtimer interrupt retries | 186 | * @nr_retries: Total number of hrtimer interrupt retries |
166 | * @nr_hangs: Total number of hrtimer interrupt hangs | 187 | * @nr_hangs: Total number of hrtimer interrupt hangs |
167 | * @max_hang_time: Maximum time spent in hrtimer_interrupt | 188 | * @max_hang_time: Maximum time spent in hrtimer_interrupt |
189 | * @expires_next: absolute time of the next event, is required for remote | ||
190 | * hrtimer enqueue; it is the total first expiry time (hard | ||
191 | * and soft hrtimer are taken into account) | ||
192 | * @next_timer: Pointer to the first expiring timer | ||
193 | * @softirq_expires_next: Time to check, if soft queues needs also to be expired | ||
194 | * @softirq_next_timer: Pointer to the first expiring softirq based timer | ||
168 | * @clock_base: array of clock bases for this cpu | 195 | * @clock_base: array of clock bases for this cpu |
169 | * | 196 | * |
170 | * Note: next_timer is just an optimization for __remove_hrtimer(). | 197 | * Note: next_timer is just an optimization for __remove_hrtimer(). |
@@ -173,31 +200,28 @@ enum hrtimer_base_type { | |||
173 | */ | 200 | */ |
174 | struct hrtimer_cpu_base { | 201 | struct hrtimer_cpu_base { |
175 | raw_spinlock_t lock; | 202 | raw_spinlock_t lock; |
176 | seqcount_t seq; | ||
177 | struct hrtimer *running; | ||
178 | unsigned int cpu; | 203 | unsigned int cpu; |
179 | unsigned int active_bases; | 204 | unsigned int active_bases; |
180 | unsigned int clock_was_set_seq; | 205 | unsigned int clock_was_set_seq; |
181 | bool migration_enabled; | 206 | unsigned int hres_active : 1, |
182 | bool nohz_active; | 207 | in_hrtirq : 1, |
208 | hang_detected : 1, | ||
209 | softirq_activated : 1; | ||
183 | #ifdef CONFIG_HIGH_RES_TIMERS | 210 | #ifdef CONFIG_HIGH_RES_TIMERS |
184 | unsigned int in_hrtirq : 1, | ||
185 | hres_active : 1, | ||
186 | hang_detected : 1; | ||
187 | ktime_t expires_next; | ||
188 | struct hrtimer *next_timer; | ||
189 | unsigned int nr_events; | 211 | unsigned int nr_events; |
190 | unsigned int nr_retries; | 212 | unsigned short nr_retries; |
191 | unsigned int nr_hangs; | 213 | unsigned short nr_hangs; |
192 | unsigned int max_hang_time; | 214 | unsigned int max_hang_time; |
193 | #endif | 215 | #endif |
216 | ktime_t expires_next; | ||
217 | struct hrtimer *next_timer; | ||
218 | ktime_t softirq_expires_next; | ||
219 | struct hrtimer *softirq_next_timer; | ||
194 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 220 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
195 | } ____cacheline_aligned; | 221 | } ____cacheline_aligned; |
196 | 222 | ||
197 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) | 223 | static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time) |
198 | { | 224 | { |
199 | BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN); | ||
200 | |||
201 | timer->node.expires = time; | 225 | timer->node.expires = time; |
202 | timer->_softexpires = time; | 226 | timer->_softexpires = time; |
203 | } | 227 | } |
@@ -266,16 +290,17 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |||
266 | return timer->base->get_time(); | 290 | return timer->base->get_time(); |
267 | } | 291 | } |
268 | 292 | ||
293 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
294 | { | ||
295 | return IS_ENABLED(CONFIG_HIGH_RES_TIMERS) ? | ||
296 | timer->base->cpu_base->hres_active : 0; | ||
297 | } | ||
298 | |||
269 | #ifdef CONFIG_HIGH_RES_TIMERS | 299 | #ifdef CONFIG_HIGH_RES_TIMERS |
270 | struct clock_event_device; | 300 | struct clock_event_device; |
271 | 301 | ||
272 | extern void hrtimer_interrupt(struct clock_event_device *dev); | 302 | extern void hrtimer_interrupt(struct clock_event_device *dev); |
273 | 303 | ||
274 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
275 | { | ||
276 | return timer->base->cpu_base->hres_active; | ||
277 | } | ||
278 | |||
279 | /* | 304 | /* |
280 | * The resolution of the clocks. The resolution value is returned in | 305 | * The resolution of the clocks. The resolution value is returned in |
281 | * the clock_getres() system call to give application programmers an | 306 | * the clock_getres() system call to give application programmers an |
@@ -298,11 +323,6 @@ extern unsigned int hrtimer_resolution; | |||
298 | 323 | ||
299 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC | 324 | #define hrtimer_resolution (unsigned int)LOW_RES_NSEC |
300 | 325 | ||
301 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
302 | { | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static inline void clock_was_set_delayed(void) { } | 326 | static inline void clock_was_set_delayed(void) { } |
307 | 327 | ||
308 | #endif | 328 | #endif |
@@ -365,11 +385,12 @@ extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, | |||
365 | u64 range_ns, const enum hrtimer_mode mode); | 385 | u64 range_ns, const enum hrtimer_mode mode); |
366 | 386 | ||
367 | /** | 387 | /** |
368 | * hrtimer_start - (re)start an hrtimer on the current CPU | 388 | * hrtimer_start - (re)start an hrtimer |
369 | * @timer: the timer to be added | 389 | * @timer: the timer to be added |
370 | * @tim: expiry time | 390 | * @tim: expiry time |
371 | * @mode: expiry mode: absolute (HRTIMER_MODE_ABS) or | 391 | * @mode: timer mode: absolute (HRTIMER_MODE_ABS) or |
372 | * relative (HRTIMER_MODE_REL) | 392 | * relative (HRTIMER_MODE_REL), and pinned (HRTIMER_MODE_PINNED); |
393 | * softirq based mode is considered for debug purpose only! | ||
373 | */ | 394 | */ |
374 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, | 395 | static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim, |
375 | const enum hrtimer_mode mode) | 396 | const enum hrtimer_mode mode) |
@@ -422,7 +443,7 @@ static inline int hrtimer_is_queued(struct hrtimer *timer) | |||
422 | */ | 443 | */ |
423 | static inline int hrtimer_callback_running(struct hrtimer *timer) | 444 | static inline int hrtimer_callback_running(struct hrtimer *timer) |
424 | { | 445 | { |
425 | return timer->base->cpu_base->running == timer; | 446 | return timer->base->running == timer; |
426 | } | 447 | } |
427 | 448 | ||
428 | /* Forward a hrtimer so it expires after now: */ | 449 | /* Forward a hrtimer so it expires after now: */ |
@@ -466,7 +487,7 @@ extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta, | |||
466 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, | 487 | extern int schedule_hrtimeout_range_clock(ktime_t *expires, |
467 | u64 delta, | 488 | u64 delta, |
468 | const enum hrtimer_mode mode, | 489 | const enum hrtimer_mode mode, |
469 | int clock); | 490 | clockid_t clock_id); |
470 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); | 491 | extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode); |
471 | 492 | ||
472 | /* Soft interrupt function to run the hrtimer queues: */ | 493 | /* Soft interrupt function to run the hrtimer queues: */ |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 82a25880714a..36fa6a2a82e3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -119,6 +119,7 @@ long hugetlb_unreserve_pages(struct inode *inode, long start, long end, | |||
119 | long freed); | 119 | long freed); |
120 | bool isolate_huge_page(struct page *page, struct list_head *list); | 120 | bool isolate_huge_page(struct page *page, struct list_head *list); |
121 | void putback_active_hugepage(struct page *page); | 121 | void putback_active_hugepage(struct page *page); |
122 | void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason); | ||
122 | void free_huge_page(struct page *page); | 123 | void free_huge_page(struct page *page); |
123 | void hugetlb_fix_reserve_counts(struct inode *inode); | 124 | void hugetlb_fix_reserve_counts(struct inode *inode); |
124 | extern struct mutex *hugetlb_fault_mutex_table; | 125 | extern struct mutex *hugetlb_fault_mutex_table; |
@@ -129,7 +130,6 @@ u32 hugetlb_fault_mutex_hash(struct hstate *h, struct mm_struct *mm, | |||
129 | 130 | ||
130 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); | 131 | pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud); |
131 | 132 | ||
132 | extern int hugepages_treat_as_movable; | ||
133 | extern int sysctl_hugetlb_shm_group; | 133 | extern int sysctl_hugetlb_shm_group; |
134 | extern struct list_head huge_boot_pages; | 134 | extern struct list_head huge_boot_pages; |
135 | 135 | ||
@@ -158,6 +158,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | |||
158 | unsigned long address, unsigned long end, pgprot_t newprot); | 158 | unsigned long address, unsigned long end, pgprot_t newprot); |
159 | 159 | ||
160 | bool is_hugetlb_entry_migration(pte_t pte); | 160 | bool is_hugetlb_entry_migration(pte_t pte); |
161 | |||
161 | #else /* !CONFIG_HUGETLB_PAGE */ | 162 | #else /* !CONFIG_HUGETLB_PAGE */ |
162 | 163 | ||
163 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) | 164 | static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) |
@@ -198,6 +199,7 @@ static inline bool isolate_huge_page(struct page *page, struct list_head *list) | |||
198 | return false; | 199 | return false; |
199 | } | 200 | } |
200 | #define putback_active_hugepage(p) do {} while (0) | 201 | #define putback_active_hugepage(p) do {} while (0) |
202 | #define move_hugetlb_state(old, new, reason) do {} while (0) | ||
201 | 203 | ||
202 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, | 204 | static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma, |
203 | unsigned long address, unsigned long end, pgprot_t newprot) | 205 | unsigned long address, unsigned long end, pgprot_t newprot) |
@@ -271,6 +273,17 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) | |||
271 | return sb->s_fs_info; | 273 | return sb->s_fs_info; |
272 | } | 274 | } |
273 | 275 | ||
276 | struct hugetlbfs_inode_info { | ||
277 | struct shared_policy policy; | ||
278 | struct inode vfs_inode; | ||
279 | unsigned int seals; | ||
280 | }; | ||
281 | |||
282 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) | ||
283 | { | ||
284 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); | ||
285 | } | ||
286 | |||
274 | extern const struct file_operations hugetlbfs_file_operations; | 287 | extern const struct file_operations hugetlbfs_file_operations; |
275 | extern const struct vm_operations_struct hugetlb_vm_ops; | 288 | extern const struct vm_operations_struct hugetlb_vm_ops; |
276 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, | 289 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
@@ -343,10 +356,10 @@ struct huge_bootmem_page { | |||
343 | struct page *alloc_huge_page(struct vm_area_struct *vma, | 356 | struct page *alloc_huge_page(struct vm_area_struct *vma, |
344 | unsigned long addr, int avoid_reserve); | 357 | unsigned long addr, int avoid_reserve); |
345 | struct page *alloc_huge_page_node(struct hstate *h, int nid); | 358 | struct page *alloc_huge_page_node(struct hstate *h, int nid); |
346 | struct page *alloc_huge_page_noerr(struct vm_area_struct *vma, | ||
347 | unsigned long addr, int avoid_reserve); | ||
348 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, | 359 | struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, |
349 | nodemask_t *nmask); | 360 | nodemask_t *nmask); |
361 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, | ||
362 | unsigned long address); | ||
350 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, | 363 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
351 | pgoff_t idx); | 364 | pgoff_t idx); |
352 | 365 | ||
@@ -524,7 +537,7 @@ struct hstate {}; | |||
524 | #define alloc_huge_page(v, a, r) NULL | 537 | #define alloc_huge_page(v, a, r) NULL |
525 | #define alloc_huge_page_node(h, nid) NULL | 538 | #define alloc_huge_page_node(h, nid) NULL |
526 | #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL | 539 | #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL |
527 | #define alloc_huge_page_noerr(v, a, r) NULL | 540 | #define alloc_huge_page_vma(h, vma, address) NULL |
528 | #define alloc_bootmem_huge_page(h) NULL | 541 | #define alloc_bootmem_huge_page(h) NULL |
529 | #define hstate_file(f) NULL | 542 | #define hstate_file(f) NULL |
530 | #define hstate_sizelog(s) NULL | 543 | #define hstate_sizelog(s) NULL |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index 6c9336626592..93bd6fcd6e62 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
@@ -127,28 +127,6 @@ struct hv_ring_buffer_info { | |||
127 | u32 priv_read_index; | 127 | u32 priv_read_index; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | /* | ||
131 | * | ||
132 | * hv_get_ringbuffer_availbytes() | ||
133 | * | ||
134 | * Get number of bytes available to read and to write to | ||
135 | * for the specified ring buffer | ||
136 | */ | ||
137 | static inline void | ||
138 | hv_get_ringbuffer_availbytes(const struct hv_ring_buffer_info *rbi, | ||
139 | u32 *read, u32 *write) | ||
140 | { | ||
141 | u32 read_loc, write_loc, dsize; | ||
142 | |||
143 | /* Capture the read/write indices before they changed */ | ||
144 | read_loc = rbi->ring_buffer->read_index; | ||
145 | write_loc = rbi->ring_buffer->write_index; | ||
146 | dsize = rbi->ring_datasize; | ||
147 | |||
148 | *write = write_loc >= read_loc ? dsize - (write_loc - read_loc) : | ||
149 | read_loc - write_loc; | ||
150 | *read = dsize - *write; | ||
151 | } | ||
152 | 130 | ||
153 | static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) | 131 | static inline u32 hv_get_bytes_to_read(const struct hv_ring_buffer_info *rbi) |
154 | { | 132 | { |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 0f774406fad0..419a38e7c315 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
@@ -55,7 +55,7 @@ typedef int (*i2c_slave_cb_t)(struct i2c_client *, enum i2c_slave_event, u8 *); | |||
55 | struct module; | 55 | struct module; |
56 | struct property_entry; | 56 | struct property_entry; |
57 | 57 | ||
58 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | 58 | #if IS_ENABLED(CONFIG_I2C) |
59 | /* | 59 | /* |
60 | * The master routines are the ones normally used to transmit data to devices | 60 | * The master routines are the ones normally used to transmit data to devices |
61 | * on a bus (or read from them). Apart from two basic transfer functions to | 61 | * on a bus (or read from them). Apart from two basic transfer functions to |
@@ -63,10 +63,68 @@ struct property_entry; | |||
63 | * transmit an arbitrary number of messages without interruption. | 63 | * transmit an arbitrary number of messages without interruption. |
64 | * @count must be be less than 64k since msg.len is u16. | 64 | * @count must be be less than 64k since msg.len is u16. |
65 | */ | 65 | */ |
66 | extern int i2c_master_send(const struct i2c_client *client, const char *buf, | 66 | extern int i2c_transfer_buffer_flags(const struct i2c_client *client, |
67 | int count); | 67 | char *buf, int count, u16 flags); |
68 | extern int i2c_master_recv(const struct i2c_client *client, char *buf, | 68 | |
69 | int count); | 69 | /** |
70 | * i2c_master_recv - issue a single I2C message in master receive mode | ||
71 | * @client: Handle to slave device | ||
72 | * @buf: Where to store data read from slave | ||
73 | * @count: How many bytes to read, must be less than 64k since msg.len is u16 | ||
74 | * | ||
75 | * Returns negative errno, or else the number of bytes read. | ||
76 | */ | ||
77 | static inline int i2c_master_recv(const struct i2c_client *client, | ||
78 | char *buf, int count) | ||
79 | { | ||
80 | return i2c_transfer_buffer_flags(client, buf, count, I2C_M_RD); | ||
81 | }; | ||
82 | |||
83 | /** | ||
84 | * i2c_master_recv_dmasafe - issue a single I2C message in master receive mode | ||
85 | * using a DMA safe buffer | ||
86 | * @client: Handle to slave device | ||
87 | * @buf: Where to store data read from slave, must be safe to use with DMA | ||
88 | * @count: How many bytes to read, must be less than 64k since msg.len is u16 | ||
89 | * | ||
90 | * Returns negative errno, or else the number of bytes read. | ||
91 | */ | ||
92 | static inline int i2c_master_recv_dmasafe(const struct i2c_client *client, | ||
93 | char *buf, int count) | ||
94 | { | ||
95 | return i2c_transfer_buffer_flags(client, buf, count, | ||
96 | I2C_M_RD | I2C_M_DMA_SAFE); | ||
97 | }; | ||
98 | |||
99 | /** | ||
100 | * i2c_master_send - issue a single I2C message in master transmit mode | ||
101 | * @client: Handle to slave device | ||
102 | * @buf: Data that will be written to the slave | ||
103 | * @count: How many bytes to write, must be less than 64k since msg.len is u16 | ||
104 | * | ||
105 | * Returns negative errno, or else the number of bytes written. | ||
106 | */ | ||
107 | static inline int i2c_master_send(const struct i2c_client *client, | ||
108 | const char *buf, int count) | ||
109 | { | ||
110 | return i2c_transfer_buffer_flags(client, (char *)buf, count, 0); | ||
111 | }; | ||
112 | |||
113 | /** | ||
114 | * i2c_master_send_dmasafe - issue a single I2C message in master transmit mode | ||
115 | * using a DMA safe buffer | ||
116 | * @client: Handle to slave device | ||
117 | * @buf: Data that will be written to the slave, must be safe to use with DMA | ||
118 | * @count: How many bytes to write, must be less than 64k since msg.len is u16 | ||
119 | * | ||
120 | * Returns negative errno, or else the number of bytes written. | ||
121 | */ | ||
122 | static inline int i2c_master_send_dmasafe(const struct i2c_client *client, | ||
123 | const char *buf, int count) | ||
124 | { | ||
125 | return i2c_transfer_buffer_flags(client, (char *)buf, count, | ||
126 | I2C_M_DMA_SAFE); | ||
127 | }; | ||
70 | 128 | ||
71 | /* Transfer num messages. | 129 | /* Transfer num messages. |
72 | */ | 130 | */ |
@@ -354,7 +412,7 @@ struct i2c_board_info { | |||
354 | .type = dev_type, .addr = (dev_addr) | 412 | .type = dev_type, .addr = (dev_addr) |
355 | 413 | ||
356 | 414 | ||
357 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | 415 | #if IS_ENABLED(CONFIG_I2C) |
358 | /* Add-on boards should register/unregister their devices; e.g. a board | 416 | /* Add-on boards should register/unregister their devices; e.g. a board |
359 | * with integrated I2C, a config eeprom, sensors, and a codec that's | 417 | * with integrated I2C, a config eeprom, sensors, and a codec that's |
360 | * used in conjunction with the primary hardware. | 418 | * used in conjunction with the primary hardware. |
@@ -485,40 +543,43 @@ struct i2c_timings { | |||
485 | /** | 543 | /** |
486 | * struct i2c_bus_recovery_info - I2C bus recovery information | 544 | * struct i2c_bus_recovery_info - I2C bus recovery information |
487 | * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or | 545 | * @recover_bus: Recover routine. Either pass driver's recover_bus() routine, or |
488 | * i2c_generic_scl_recovery() or i2c_generic_gpio_recovery(). | 546 | * i2c_generic_scl_recovery(). |
489 | * @get_scl: This gets current value of SCL line. Mandatory for generic SCL | 547 | * @get_scl: This gets current value of SCL line. Mandatory for generic SCL |
490 | * recovery. Used internally for generic GPIO recovery. | 548 | * recovery. Populated internally for generic GPIO recovery. |
491 | * @set_scl: This sets/clears SCL line. Mandatory for generic SCL recovery. Used | 549 | * @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery. |
492 | * internally for generic GPIO recovery. | 550 | * Populated internally for generic GPIO recovery. |
493 | * @get_sda: This gets current value of SDA line. Optional for generic SCL | 551 | * @get_sda: This gets current value of SDA line. Optional for generic SCL |
494 | * recovery. Used internally, if sda_gpio is a valid GPIO, for generic GPIO | 552 | * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic |
495 | * recovery. | 553 | * GPIO recovery. |
554 | * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery. | ||
555 | * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO | ||
556 | * recovery. | ||
496 | * @prepare_recovery: This will be called before starting recovery. Platform may | 557 | * @prepare_recovery: This will be called before starting recovery. Platform may |
497 | * configure padmux here for SDA/SCL line or something else they want. | 558 | * configure padmux here for SDA/SCL line or something else they want. |
498 | * @unprepare_recovery: This will be called after completing recovery. Platform | 559 | * @unprepare_recovery: This will be called after completing recovery. Platform |
499 | * may configure padmux here for SDA/SCL line or something else they want. | 560 | * may configure padmux here for SDA/SCL line or something else they want. |
500 | * @scl_gpio: gpio number of the SCL line. Only required for GPIO recovery. | 561 | * @scl_gpiod: gpiod of the SCL line. Only required for GPIO recovery. |
501 | * @sda_gpio: gpio number of the SDA line. Only required for GPIO recovery. | 562 | * @sda_gpiod: gpiod of the SDA line. Only required for GPIO recovery. |
502 | */ | 563 | */ |
503 | struct i2c_bus_recovery_info { | 564 | struct i2c_bus_recovery_info { |
504 | int (*recover_bus)(struct i2c_adapter *); | 565 | int (*recover_bus)(struct i2c_adapter *adap); |
505 | 566 | ||
506 | int (*get_scl)(struct i2c_adapter *); | 567 | int (*get_scl)(struct i2c_adapter *adap); |
507 | void (*set_scl)(struct i2c_adapter *, int val); | 568 | void (*set_scl)(struct i2c_adapter *adap, int val); |
508 | int (*get_sda)(struct i2c_adapter *); | 569 | int (*get_sda)(struct i2c_adapter *adap); |
570 | void (*set_sda)(struct i2c_adapter *adap, int val); | ||
509 | 571 | ||
510 | void (*prepare_recovery)(struct i2c_adapter *); | 572 | void (*prepare_recovery)(struct i2c_adapter *adap); |
511 | void (*unprepare_recovery)(struct i2c_adapter *); | 573 | void (*unprepare_recovery)(struct i2c_adapter *adap); |
512 | 574 | ||
513 | /* gpio recovery */ | 575 | /* gpio recovery */ |
514 | int scl_gpio; | 576 | struct gpio_desc *scl_gpiod; |
515 | int sda_gpio; | 577 | struct gpio_desc *sda_gpiod; |
516 | }; | 578 | }; |
517 | 579 | ||
518 | int i2c_recover_bus(struct i2c_adapter *adap); | 580 | int i2c_recover_bus(struct i2c_adapter *adap); |
519 | 581 | ||
520 | /* Generic recovery routines */ | 582 | /* Generic recovery routines */ |
521 | int i2c_generic_gpio_recovery(struct i2c_adapter *adap); | ||
522 | int i2c_generic_scl_recovery(struct i2c_adapter *adap); | 583 | int i2c_generic_scl_recovery(struct i2c_adapter *adap); |
523 | 584 | ||
524 | /** | 585 | /** |
@@ -706,7 +767,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter) | |||
706 | 767 | ||
707 | /* administration... | 768 | /* administration... |
708 | */ | 769 | */ |
709 | #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) | 770 | #if IS_ENABLED(CONFIG_I2C) |
710 | extern int i2c_add_adapter(struct i2c_adapter *); | 771 | extern int i2c_add_adapter(struct i2c_adapter *); |
711 | extern void i2c_del_adapter(struct i2c_adapter *); | 772 | extern void i2c_del_adapter(struct i2c_adapter *); |
712 | extern int i2c_add_numbered_adapter(struct i2c_adapter *); | 773 | extern int i2c_add_numbered_adapter(struct i2c_adapter *); |
@@ -769,6 +830,9 @@ static inline u8 i2c_8bit_addr_from_msg(const struct i2c_msg *msg) | |||
769 | return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); | 830 | return (msg->addr << 1) | (msg->flags & I2C_M_RD ? 1 : 0); |
770 | } | 831 | } |
771 | 832 | ||
833 | u8 *i2c_get_dma_safe_msg_buf(struct i2c_msg *msg, unsigned int threshold); | ||
834 | void i2c_release_dma_safe_msg_buf(struct i2c_msg *msg, u8 *buf); | ||
835 | |||
772 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); | 836 | int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr); |
773 | /** | 837 | /** |
774 | * module_i2c_driver() - Helper macro for registering a modular I2C driver | 838 | * module_i2c_driver() - Helper macro for registering a modular I2C driver |
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h deleted file mode 100644 index 4dbe651f71f5..000000000000 --- a/include/linux/i7300_idle.h +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef I7300_IDLE_H | ||
4 | #define I7300_IDLE_H | ||
5 | |||
6 | #include <linux/pci.h> | ||
7 | |||
8 | /* | ||
9 | * I/O AT controls (PCI bus 0 device 8 function 0) | ||
10 | * DIMM controls (PCI bus 0 device 16 function 1) | ||
11 | */ | ||
12 | #define IOAT_BUS 0 | ||
13 | #define IOAT_DEVFN PCI_DEVFN(8, 0) | ||
14 | #define MEMCTL_BUS 0 | ||
15 | #define MEMCTL_DEVFN PCI_DEVFN(16, 1) | ||
16 | |||
17 | struct fbd_ioat { | ||
18 | unsigned int vendor; | ||
19 | unsigned int ioat_dev; | ||
20 | unsigned int enabled; | ||
21 | }; | ||
22 | |||
23 | /* | ||
24 | * The i5000 chip-set has the same hooks as the i7300 | ||
25 | * but it is not enabled by default and must be manually | ||
26 | * manually enabled with "forceload=1" because it is | ||
27 | * only lightly validated. | ||
28 | */ | ||
29 | |||
30 | static const struct fbd_ioat fbd_ioat_list[] = { | ||
31 | {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1}, | ||
32 | {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0}, | ||
33 | {0, 0} | ||
34 | }; | ||
35 | |||
36 | /* table of devices that work with this driver */ | ||
37 | static const struct pci_device_id pci_tbl[] = { | ||
38 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) }, | ||
39 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) }, | ||
40 | { } /* Terminating entry */ | ||
41 | }; | ||
42 | |||
43 | /* Check for known platforms with I/O-AT */ | ||
44 | static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev, | ||
45 | struct pci_dev **ioat_dev, | ||
46 | int enable_all) | ||
47 | { | ||
48 | int i; | ||
49 | struct pci_dev *memdev, *dmadev; | ||
50 | |||
51 | memdev = pci_get_bus_and_slot(MEMCTL_BUS, MEMCTL_DEVFN); | ||
52 | if (!memdev) | ||
53 | return -ENODEV; | ||
54 | |||
55 | for (i = 0; pci_tbl[i].vendor != 0; i++) { | ||
56 | if (memdev->vendor == pci_tbl[i].vendor && | ||
57 | memdev->device == pci_tbl[i].device) { | ||
58 | break; | ||
59 | } | ||
60 | } | ||
61 | if (pci_tbl[i].vendor == 0) | ||
62 | return -ENODEV; | ||
63 | |||
64 | dmadev = pci_get_bus_and_slot(IOAT_BUS, IOAT_DEVFN); | ||
65 | if (!dmadev) | ||
66 | return -ENODEV; | ||
67 | |||
68 | for (i = 0; fbd_ioat_list[i].vendor != 0; i++) { | ||
69 | if (dmadev->vendor == fbd_ioat_list[i].vendor && | ||
70 | dmadev->device == fbd_ioat_list[i].ioat_dev) { | ||
71 | if (!(fbd_ioat_list[i].enabled || enable_all)) | ||
72 | continue; | ||
73 | if (fbd_dev) | ||
74 | *fbd_dev = memdev; | ||
75 | if (ioat_dev) | ||
76 | *ioat_dev = dmadev; | ||
77 | |||
78 | return 0; | ||
79 | } | ||
80 | } | ||
81 | return -ENODEV; | ||
82 | } | ||
83 | |||
84 | #endif | ||
diff --git a/include/linux/idr.h b/include/linux/idr.h index fa14f834e4ed..7d6a6313f0ab 100644 --- a/include/linux/idr.h +++ b/include/linux/idr.h | |||
@@ -15,10 +15,10 @@ | |||
15 | #include <linux/radix-tree.h> | 15 | #include <linux/radix-tree.h> |
16 | #include <linux/gfp.h> | 16 | #include <linux/gfp.h> |
17 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
18 | #include <linux/bug.h> | ||
19 | 18 | ||
20 | struct idr { | 19 | struct idr { |
21 | struct radix_tree_root idr_rt; | 20 | struct radix_tree_root idr_rt; |
21 | unsigned int idr_base; | ||
22 | unsigned int idr_next; | 22 | unsigned int idr_next; |
23 | }; | 23 | }; |
24 | 24 | ||
@@ -31,10 +31,26 @@ struct idr { | |||
31 | /* Set the IDR flag and the IDR_FREE tag */ | 31 | /* Set the IDR flag and the IDR_FREE tag */ |
32 | #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) | 32 | #define IDR_RT_MARKER ((__force gfp_t)(3 << __GFP_BITS_SHIFT)) |
33 | 33 | ||
34 | #define IDR_INIT \ | 34 | #define IDR_INIT_BASE(base) { \ |
35 | { \ | 35 | .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER), \ |
36 | .idr_rt = RADIX_TREE_INIT(IDR_RT_MARKER) \ | 36 | .idr_base = (base), \ |
37 | .idr_next = 0, \ | ||
37 | } | 38 | } |
39 | |||
40 | /** | ||
41 | * IDR_INIT() - Initialise an IDR. | ||
42 | * | ||
43 | * A freshly-initialised IDR contains no IDs. | ||
44 | */ | ||
45 | #define IDR_INIT IDR_INIT_BASE(0) | ||
46 | |||
47 | /** | ||
48 | * DEFINE_IDR() - Define a statically-allocated IDR | ||
49 | * @name: Name of IDR | ||
50 | * | ||
51 | * An IDR defined using this macro is ready for use with no additional | ||
52 | * initialisation required. It contains no IDs. | ||
53 | */ | ||
38 | #define DEFINE_IDR(name) struct idr name = IDR_INIT | 54 | #define DEFINE_IDR(name) struct idr name = IDR_INIT |
39 | 55 | ||
40 | /** | 56 | /** |
@@ -82,80 +98,52 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val) | |||
82 | 98 | ||
83 | void idr_preload(gfp_t gfp_mask); | 99 | void idr_preload(gfp_t gfp_mask); |
84 | 100 | ||
85 | int idr_alloc_cmn(struct idr *idr, void *ptr, unsigned long *index, | 101 | int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t); |
86 | unsigned long start, unsigned long end, gfp_t gfp, | 102 | int __must_check idr_alloc_u32(struct idr *, void *ptr, u32 *id, |
87 | bool ext); | 103 | unsigned long max, gfp_t); |
88 | 104 | int idr_alloc_cyclic(struct idr *, void *ptr, int start, int end, gfp_t); | |
89 | /** | 105 | void *idr_remove(struct idr *, unsigned long id); |
90 | * idr_alloc - allocate an id | 106 | void *idr_find(const struct idr *, unsigned long id); |
91 | * @idr: idr handle | ||
92 | * @ptr: pointer to be associated with the new id | ||
93 | * @start: the minimum id (inclusive) | ||
94 | * @end: the maximum id (exclusive) | ||
95 | * @gfp: memory allocation flags | ||
96 | * | ||
97 | * Allocates an unused ID in the range [start, end). Returns -ENOSPC | ||
98 | * if there are no unused IDs in that range. | ||
99 | * | ||
100 | * Note that @end is treated as max when <= 0. This is to always allow | ||
101 | * using @start + N as @end as long as N is inside integer range. | ||
102 | * | ||
103 | * Simultaneous modifications to the @idr are not allowed and should be | ||
104 | * prevented by the user, usually with a lock. idr_alloc() may be called | ||
105 | * concurrently with read-only accesses to the @idr, such as idr_find() and | ||
106 | * idr_for_each_entry(). | ||
107 | */ | ||
108 | static inline int idr_alloc(struct idr *idr, void *ptr, | ||
109 | int start, int end, gfp_t gfp) | ||
110 | { | ||
111 | unsigned long id; | ||
112 | int ret; | ||
113 | |||
114 | if (WARN_ON_ONCE(start < 0)) | ||
115 | return -EINVAL; | ||
116 | |||
117 | ret = idr_alloc_cmn(idr, ptr, &id, start, end, gfp, false); | ||
118 | |||
119 | if (ret) | ||
120 | return ret; | ||
121 | |||
122 | return id; | ||
123 | } | ||
124 | |||
125 | static inline int idr_alloc_ext(struct idr *idr, void *ptr, | ||
126 | unsigned long *index, | ||
127 | unsigned long start, | ||
128 | unsigned long end, | ||
129 | gfp_t gfp) | ||
130 | { | ||
131 | return idr_alloc_cmn(idr, ptr, index, start, end, gfp, true); | ||
132 | } | ||
133 | |||
134 | int idr_alloc_cyclic(struct idr *, void *entry, int start, int end, gfp_t); | ||
135 | int idr_for_each(const struct idr *, | 107 | int idr_for_each(const struct idr *, |
136 | int (*fn)(int id, void *p, void *data), void *data); | 108 | int (*fn)(int id, void *p, void *data), void *data); |
137 | void *idr_get_next(struct idr *, int *nextid); | 109 | void *idr_get_next(struct idr *, int *nextid); |
138 | void *idr_get_next_ext(struct idr *idr, unsigned long *nextid); | 110 | void *idr_get_next_ul(struct idr *, unsigned long *nextid); |
139 | void *idr_replace(struct idr *, void *, int id); | 111 | void *idr_replace(struct idr *, void *, unsigned long id); |
140 | void *idr_replace_ext(struct idr *idr, void *ptr, unsigned long id); | ||
141 | void idr_destroy(struct idr *); | 112 | void idr_destroy(struct idr *); |
142 | 113 | ||
143 | static inline void *idr_remove_ext(struct idr *idr, unsigned long id) | 114 | /** |
144 | { | 115 | * idr_init_base() - Initialise an IDR. |
145 | return radix_tree_delete_item(&idr->idr_rt, id, NULL); | 116 | * @idr: IDR handle. |
146 | } | 117 | * @base: The base value for the IDR. |
147 | 118 | * | |
148 | static inline void *idr_remove(struct idr *idr, int id) | 119 | * This variation of idr_init() creates an IDR which will allocate IDs |
120 | * starting at %base. | ||
121 | */ | ||
122 | static inline void idr_init_base(struct idr *idr, int base) | ||
149 | { | 123 | { |
150 | return idr_remove_ext(idr, id); | 124 | INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); |
125 | idr->idr_base = base; | ||
126 | idr->idr_next = 0; | ||
151 | } | 127 | } |
152 | 128 | ||
129 | /** | ||
130 | * idr_init() - Initialise an IDR. | ||
131 | * @idr: IDR handle. | ||
132 | * | ||
133 | * Initialise a dynamically allocated IDR. To initialise a | ||
134 | * statically allocated IDR, use DEFINE_IDR(). | ||
135 | */ | ||
153 | static inline void idr_init(struct idr *idr) | 136 | static inline void idr_init(struct idr *idr) |
154 | { | 137 | { |
155 | INIT_RADIX_TREE(&idr->idr_rt, IDR_RT_MARKER); | 138 | idr_init_base(idr, 0); |
156 | idr->idr_next = 0; | ||
157 | } | 139 | } |
158 | 140 | ||
141 | /** | ||
142 | * idr_is_empty() - Are there any IDs allocated? | ||
143 | * @idr: IDR handle. | ||
144 | * | ||
145 | * Return: %true if any IDs have been allocated from this IDR. | ||
146 | */ | ||
159 | static inline bool idr_is_empty(const struct idr *idr) | 147 | static inline bool idr_is_empty(const struct idr *idr) |
160 | { | 148 | { |
161 | return radix_tree_empty(&idr->idr_rt) && | 149 | return radix_tree_empty(&idr->idr_rt) && |
@@ -174,50 +162,38 @@ static inline void idr_preload_end(void) | |||
174 | } | 162 | } |
175 | 163 | ||
176 | /** | 164 | /** |
177 | * idr_find - return pointer for given id | 165 | * idr_for_each_entry() - Iterate over an IDR's elements of a given type. |
178 | * @idr: idr handle | 166 | * @idr: IDR handle. |
179 | * @id: lookup key | 167 | * @entry: The type * to use as cursor |
180 | * | 168 | * @id: Entry ID. |
181 | * Return the pointer given the id it has been registered with. A %NULL | ||
182 | * return indicates that @id is not valid or you passed %NULL in | ||
183 | * idr_get_new(). | ||
184 | * | 169 | * |
185 | * This function can be called under rcu_read_lock(), given that the leaf | 170 | * @entry and @id do not need to be initialized before the loop, and |
186 | * pointers lifetimes are correctly managed. | 171 | * after normal termination @entry is left with the value NULL. This |
172 | * is convenient for a "not found" value. | ||
187 | */ | 173 | */ |
188 | static inline void *idr_find_ext(const struct idr *idr, unsigned long id) | 174 | #define idr_for_each_entry(idr, entry, id) \ |
189 | { | 175 | for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) |
190 | return radix_tree_lookup(&idr->idr_rt, id); | ||
191 | } | ||
192 | |||
193 | static inline void *idr_find(const struct idr *idr, int id) | ||
194 | { | ||
195 | return idr_find_ext(idr, id); | ||
196 | } | ||
197 | 176 | ||
198 | /** | 177 | /** |
199 | * idr_for_each_entry - iterate over an idr's elements of a given type | 178 | * idr_for_each_entry_ul() - Iterate over an IDR's elements of a given type. |
200 | * @idr: idr handle | 179 | * @idr: IDR handle. |
201 | * @entry: the type * to use as cursor | 180 | * @entry: The type * to use as cursor. |
202 | * @id: id entry's key | 181 | * @id: Entry ID. |
203 | * | 182 | * |
204 | * @entry and @id do not need to be initialized before the loop, and | 183 | * @entry and @id do not need to be initialized before the loop, and |
205 | * after normal terminatinon @entry is left with the value NULL. This | 184 | * after normal termination @entry is left with the value NULL. This |
206 | * is convenient for a "not found" value. | 185 | * is convenient for a "not found" value. |
207 | */ | 186 | */ |
208 | #define idr_for_each_entry(idr, entry, id) \ | 187 | #define idr_for_each_entry_ul(idr, entry, id) \ |
209 | for (id = 0; ((entry) = idr_get_next(idr, &(id))) != NULL; ++id) | 188 | for (id = 0; ((entry) = idr_get_next_ul(idr, &(id))) != NULL; ++id) |
210 | #define idr_for_each_entry_ext(idr, entry, id) \ | ||
211 | for (id = 0; ((entry) = idr_get_next_ext(idr, &(id))) != NULL; ++id) | ||
212 | 189 | ||
213 | /** | 190 | /** |
214 | * idr_for_each_entry_continue - continue iteration over an idr's elements of a given type | 191 | * idr_for_each_entry_continue() - Continue iteration over an IDR's elements of a given type |
215 | * @idr: idr handle | 192 | * @idr: IDR handle. |
216 | * @entry: the type * to use as cursor | 193 | * @entry: The type * to use as a cursor. |
217 | * @id: id entry's key | 194 | * @id: Entry ID. |
218 | * | 195 | * |
219 | * Continue to iterate over list of given type, continuing after | 196 | * Continue to iterate over entries, continuing after the current position. |
220 | * the current position. | ||
221 | */ | 197 | */ |
222 | #define idr_for_each_entry_continue(idr, entry, id) \ | 198 | #define idr_for_each_entry_continue(idr, entry, id) \ |
223 | for ((entry) = idr_get_next((idr), &(id)); \ | 199 | for ((entry) = idr_get_next((idr), &(id)); \ |
diff --git a/include/linux/if_link.h b/include/linux/if_link.h index 4c54611e03e9..622658dfbf0a 100644 --- a/include/linux/if_link.h +++ b/include/linux/if_link.h | |||
@@ -13,6 +13,8 @@ struct ifla_vf_stats { | |||
13 | __u64 tx_bytes; | 13 | __u64 tx_bytes; |
14 | __u64 broadcast; | 14 | __u64 broadcast; |
15 | __u64 multicast; | 15 | __u64 multicast; |
16 | __u64 rx_dropped; | ||
17 | __u64 tx_dropped; | ||
16 | }; | 18 | }; |
17 | 19 | ||
18 | struct ifla_vf_info { | 20 | struct ifla_vf_info { |
diff --git a/include/linux/if_macvlan.h b/include/linux/if_macvlan.h index bedf54b6f943..4cb7aeeafce0 100644 --- a/include/linux/if_macvlan.h +++ b/include/linux/if_macvlan.h | |||
@@ -30,10 +30,10 @@ struct macvlan_dev { | |||
30 | enum macvlan_mode mode; | 30 | enum macvlan_mode mode; |
31 | u16 flags; | 31 | u16 flags; |
32 | int nest_level; | 32 | int nest_level; |
33 | unsigned int macaddr_count; | ||
33 | #ifdef CONFIG_NET_POLL_CONTROLLER | 34 | #ifdef CONFIG_NET_POLL_CONTROLLER |
34 | struct netpoll *netpoll; | 35 | struct netpoll *netpoll; |
35 | #endif | 36 | #endif |
36 | unsigned int macaddr_count; | ||
37 | }; | 37 | }; |
38 | 38 | ||
39 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, | 39 | static inline void macvlan_count_rx(const struct macvlan_dev *vlan, |
diff --git a/include/linux/if_tap.h b/include/linux/if_tap.h index 3ecef57c31e3..8e66866c11be 100644 --- a/include/linux/if_tap.h +++ b/include/linux/if_tap.h | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | #if IS_ENABLED(CONFIG_TAP) | 5 | #if IS_ENABLED(CONFIG_TAP) |
6 | struct socket *tap_get_socket(struct file *); | 6 | struct socket *tap_get_socket(struct file *); |
7 | struct skb_array *tap_get_skb_array(struct file *file); | 7 | struct ptr_ring *tap_get_ptr_ring(struct file *file); |
8 | #else | 8 | #else |
9 | #include <linux/err.h> | 9 | #include <linux/err.h> |
10 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
@@ -14,7 +14,7 @@ static inline struct socket *tap_get_socket(struct file *f) | |||
14 | { | 14 | { |
15 | return ERR_PTR(-EINVAL); | 15 | return ERR_PTR(-EINVAL); |
16 | } | 16 | } |
17 | static inline struct skb_array *tap_get_skb_array(struct file *f) | 17 | static inline struct ptr_ring *tap_get_ptr_ring(struct file *f) |
18 | { | 18 | { |
19 | return ERR_PTR(-EINVAL); | 19 | return ERR_PTR(-EINVAL); |
20 | } | 20 | } |
@@ -70,7 +70,7 @@ struct tap_queue { | |||
70 | u16 queue_index; | 70 | u16 queue_index; |
71 | bool enabled; | 71 | bool enabled; |
72 | struct list_head next; | 72 | struct list_head next; |
73 | struct skb_array skb_array; | 73 | struct ptr_ring ring; |
74 | }; | 74 | }; |
75 | 75 | ||
76 | rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); | 76 | rx_handler_result_t tap_handle_frame(struct sk_buff **pskb); |
diff --git a/include/linux/if_tun.h b/include/linux/if_tun.h index bf9bdf42d577..c5b0a75a7812 100644 --- a/include/linux/if_tun.h +++ b/include/linux/if_tun.h | |||
@@ -17,9 +17,14 @@ | |||
17 | 17 | ||
18 | #include <uapi/linux/if_tun.h> | 18 | #include <uapi/linux/if_tun.h> |
19 | 19 | ||
20 | #define TUN_XDP_FLAG 0x1UL | ||
21 | |||
20 | #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) | 22 | #if defined(CONFIG_TUN) || defined(CONFIG_TUN_MODULE) |
21 | struct socket *tun_get_socket(struct file *); | 23 | struct socket *tun_get_socket(struct file *); |
22 | struct skb_array *tun_get_skb_array(struct file *file); | 24 | struct ptr_ring *tun_get_tx_ring(struct file *file); |
25 | bool tun_is_xdp_buff(void *ptr); | ||
26 | void *tun_xdp_to_ptr(void *ptr); | ||
27 | void *tun_ptr_to_xdp(void *ptr); | ||
23 | #else | 28 | #else |
24 | #include <linux/err.h> | 29 | #include <linux/err.h> |
25 | #include <linux/errno.h> | 30 | #include <linux/errno.h> |
@@ -29,9 +34,21 @@ static inline struct socket *tun_get_socket(struct file *f) | |||
29 | { | 34 | { |
30 | return ERR_PTR(-EINVAL); | 35 | return ERR_PTR(-EINVAL); |
31 | } | 36 | } |
32 | static inline struct skb_array *tun_get_skb_array(struct file *f) | 37 | static inline struct ptr_ring *tun_get_tx_ring(struct file *f) |
33 | { | 38 | { |
34 | return ERR_PTR(-EINVAL); | 39 | return ERR_PTR(-EINVAL); |
35 | } | 40 | } |
41 | static inline bool tun_is_xdp_buff(void *ptr) | ||
42 | { | ||
43 | return false; | ||
44 | } | ||
45 | static inline void *tun_xdp_to_ptr(void *ptr) | ||
46 | { | ||
47 | return NULL; | ||
48 | } | ||
49 | static inline void *tun_ptr_to_xdp(void *ptr) | ||
50 | { | ||
51 | return NULL; | ||
52 | } | ||
36 | #endif /* CONFIG_TUN */ | 53 | #endif /* CONFIG_TUN */ |
37 | #endif /* __IF_TUN_H */ | 54 | #endif /* __IF_TUN_H */ |
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h index f12a61be1ede..11579fd4126e 100644 --- a/include/linux/iio/iio.h +++ b/include/linux/iio/iio.h | |||
@@ -578,8 +578,8 @@ const struct iio_chan_spec | |||
578 | * iio_device_register() - register a device with the IIO subsystem | 578 | * iio_device_register() - register a device with the IIO subsystem |
579 | * @indio_dev: Device structure filled by the device driver | 579 | * @indio_dev: Device structure filled by the device driver |
580 | **/ | 580 | **/ |
581 | #define iio_device_register(iio_dev) \ | 581 | #define iio_device_register(indio_dev) \ |
582 | __iio_device_register((iio_dev), THIS_MODULE) | 582 | __iio_device_register((indio_dev), THIS_MODULE) |
583 | int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); | 583 | int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); |
584 | void iio_device_unregister(struct iio_dev *indio_dev); | 584 | void iio_device_unregister(struct iio_dev *indio_dev); |
585 | /** | 585 | /** |
diff --git a/include/linux/iio/machine.h b/include/linux/iio/machine.h index 1601a2a63a72..5e1cfa75f652 100644 --- a/include/linux/iio/machine.h +++ b/include/linux/iio/machine.h | |||
@@ -28,4 +28,11 @@ struct iio_map { | |||
28 | void *consumer_data; | 28 | void *consumer_data; |
29 | }; | 29 | }; |
30 | 30 | ||
31 | #define IIO_MAP(_provider_channel, _consumer_dev_name, _consumer_channel) \ | ||
32 | { \ | ||
33 | .adc_channel_label = _provider_channel, \ | ||
34 | .consumer_dev_name = _consumer_dev_name, \ | ||
35 | .consumer_channel = _consumer_channel, \ | ||
36 | } | ||
37 | |||
31 | #endif | 38 | #endif |
diff --git a/include/linux/iio/trigger.h b/include/linux/iio/trigger.h index 7d5e44518379..b19b7204ef84 100644 --- a/include/linux/iio/trigger.h +++ b/include/linux/iio/trigger.h | |||
@@ -43,12 +43,13 @@ struct iio_trigger_ops { | |||
43 | /** | 43 | /** |
44 | * struct iio_trigger - industrial I/O trigger device | 44 | * struct iio_trigger - industrial I/O trigger device |
45 | * @ops: [DRIVER] operations structure | 45 | * @ops: [DRIVER] operations structure |
46 | * @owner: [INTERN] owner of this driver module | ||
46 | * @id: [INTERN] unique id number | 47 | * @id: [INTERN] unique id number |
47 | * @name: [DRIVER] unique name | 48 | * @name: [DRIVER] unique name |
48 | * @dev: [DRIVER] associated device (if relevant) | 49 | * @dev: [DRIVER] associated device (if relevant) |
49 | * @list: [INTERN] used in maintenance of global trigger list | 50 | * @list: [INTERN] used in maintenance of global trigger list |
50 | * @alloc_list: [DRIVER] used for driver specific trigger list | 51 | * @alloc_list: [DRIVER] used for driver specific trigger list |
51 | * @use_count: use count for the trigger | 52 | * @use_count: [INTERN] use count for the trigger. |
52 | * @subirq_chip: [INTERN] associate 'virtual' irq chip. | 53 | * @subirq_chip: [INTERN] associate 'virtual' irq chip. |
53 | * @subirq_base: [INTERN] base number for irqs provided by trigger. | 54 | * @subirq_base: [INTERN] base number for irqs provided by trigger. |
54 | * @subirqs: [INTERN] information about the 'child' irqs. | 55 | * @subirqs: [INTERN] information about the 'child' irqs. |
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h index 1ac5bf95bfdd..e16fe7d44a71 100644 --- a/include/linux/inetdevice.h +++ b/include/linux/inetdevice.h | |||
@@ -173,7 +173,7 @@ static inline struct net_device *ip_dev_find(struct net *net, __be32 addr) | |||
173 | } | 173 | } |
174 | 174 | ||
175 | int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); | 175 | int inet_addr_onlink(struct in_device *in_dev, __be32 a, __be32 b); |
176 | int devinet_ioctl(struct net *net, unsigned int cmd, void __user *); | 176 | int devinet_ioctl(struct net *net, unsigned int cmd, struct ifreq *); |
177 | void devinet_init(void); | 177 | void devinet_init(void); |
178 | struct in_device *inetdev_by_index(struct net *, int); | 178 | struct in_device *inetdev_by_index(struct net *, int); |
179 | __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); | 179 | __be32 inet_select_addr(const struct net_device *dev, __be32 dst, int scope); |
diff --git a/include/linux/init.h b/include/linux/init.h index ea1b31101d9e..506a98151131 100644 --- a/include/linux/init.h +++ b/include/linux/init.h | |||
@@ -5,6 +5,13 @@ | |||
5 | #include <linux/compiler.h> | 5 | #include <linux/compiler.h> |
6 | #include <linux/types.h> | 6 | #include <linux/types.h> |
7 | 7 | ||
8 | /* Built-in __init functions needn't be compiled with retpoline */ | ||
9 | #if defined(RETPOLINE) && !defined(MODULE) | ||
10 | #define __noretpoline __attribute__((indirect_branch("keep"))) | ||
11 | #else | ||
12 | #define __noretpoline | ||
13 | #endif | ||
14 | |||
8 | /* These macros are used to mark some functions or | 15 | /* These macros are used to mark some functions or |
9 | * initialized data (doesn't apply to uninitialized data) | 16 | * initialized data (doesn't apply to uninitialized data) |
10 | * as `initialization' functions. The kernel can take this | 17 | * as `initialization' functions. The kernel can take this |
@@ -40,7 +47,7 @@ | |||
40 | 47 | ||
41 | /* These are for everybody (although not all archs will actually | 48 | /* These are for everybody (although not all archs will actually |
42 | discard it in modules) */ | 49 | discard it in modules) */ |
43 | #define __init __section(.init.text) __cold __latent_entropy | 50 | #define __init __section(.init.text) __cold __latent_entropy __noretpoline |
44 | #define __initdata __section(.init.data) | 51 | #define __initdata __section(.init.data) |
45 | #define __initconst __section(.init.rodata) | 52 | #define __initconst __section(.init.rodata) |
46 | #define __exitdata __section(.exit.data) | 53 | #define __exitdata __section(.exit.data) |
diff --git a/include/linux/input/gpio_tilt.h b/include/linux/input/gpio_tilt.h deleted file mode 100644 index f9d932476a80..000000000000 --- a/include/linux/input/gpio_tilt.h +++ /dev/null | |||
@@ -1,74 +0,0 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _INPUT_GPIO_TILT_H | ||
3 | #define _INPUT_GPIO_TILT_H | ||
4 | |||
5 | /** | ||
6 | * struct gpio_tilt_axis - Axis used by the tilt switch | ||
7 | * @axis: Constant describing the axis, e.g. ABS_X | ||
8 | * @min: minimum value for abs_param | ||
9 | * @max: maximum value for abs_param | ||
10 | * @fuzz: fuzz value for abs_param | ||
11 | * @flat: flat value for abs_param | ||
12 | */ | ||
13 | struct gpio_tilt_axis { | ||
14 | int axis; | ||
15 | int min; | ||
16 | int max; | ||
17 | int fuzz; | ||
18 | int flat; | ||
19 | }; | ||
20 | |||
21 | /** | ||
22 | * struct gpio_tilt_state - state description | ||
23 | * @gpios: bitfield of gpio target-states for the value | ||
24 | * @axes: array containing the axes settings for the gpio state | ||
25 | * The array indizes must correspond to the axes defined | ||
26 | * in platform_data | ||
27 | * | ||
28 | * This structure describes a supported axis settings | ||
29 | * and the necessary gpio-state which represent it. | ||
30 | * | ||
31 | * The n-th bit in the bitfield describes the state of the n-th GPIO | ||
32 | * from the gpios-array defined in gpio_regulator_config below. | ||
33 | */ | ||
34 | struct gpio_tilt_state { | ||
35 | int gpios; | ||
36 | int *axes; | ||
37 | }; | ||
38 | |||
39 | /** | ||
40 | * struct gpio_tilt_platform_data | ||
41 | * @gpios: Array containing the gpios determining the tilt state | ||
42 | * @nr_gpios: Number of gpios | ||
43 | * @axes: Array of gpio_tilt_axis descriptions | ||
44 | * @nr_axes: Number of axes | ||
45 | * @states: Array of gpio_tilt_state entries describing | ||
46 | * the gpio state for specific tilts | ||
47 | * @nr_states: Number of states available | ||
48 | * @debounce_interval: debounce ticks interval in msecs | ||
49 | * @poll_interval: polling interval in msecs - for polling driver only | ||
50 | * @enable: callback to enable the tilt switch | ||
51 | * @disable: callback to disable the tilt switch | ||
52 | * | ||
53 | * This structure contains gpio-tilt-switch configuration | ||
54 | * information that must be passed by platform code to the | ||
55 | * gpio-tilt input driver. | ||
56 | */ | ||
57 | struct gpio_tilt_platform_data { | ||
58 | struct gpio *gpios; | ||
59 | int nr_gpios; | ||
60 | |||
61 | struct gpio_tilt_axis *axes; | ||
62 | int nr_axes; | ||
63 | |||
64 | struct gpio_tilt_state *states; | ||
65 | int nr_states; | ||
66 | |||
67 | int debounce_interval; | ||
68 | |||
69 | unsigned int poll_interval; | ||
70 | int (*enable)(struct device *dev); | ||
71 | void (*disable)(struct device *dev); | ||
72 | }; | ||
73 | |||
74 | #endif | ||
diff --git a/include/linux/integrity.h b/include/linux/integrity.h index c2d6082a1a4c..858d3f4a2241 100644 --- a/include/linux/integrity.h +++ b/include/linux/integrity.h | |||
@@ -14,6 +14,7 @@ | |||
14 | 14 | ||
15 | enum integrity_status { | 15 | enum integrity_status { |
16 | INTEGRITY_PASS = 0, | 16 | INTEGRITY_PASS = 0, |
17 | INTEGRITY_PASS_IMMUTABLE, | ||
17 | INTEGRITY_FAIL, | 18 | INTEGRITY_FAIL, |
18 | INTEGRITY_NOLABEL, | 19 | INTEGRITY_NOLABEL, |
19 | INTEGRITY_NOXATTRS, | 20 | INTEGRITY_NOXATTRS, |
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index f3274d9f46a2..8dad3dd26eae 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
@@ -83,7 +83,9 @@ | |||
83 | /* | 83 | /* |
84 | * Decoding Capability Register | 84 | * Decoding Capability Register |
85 | */ | 85 | */ |
86 | #define cap_5lp_support(c) (((c) >> 60) & 1) | ||
86 | #define cap_pi_support(c) (((c) >> 59) & 1) | 87 | #define cap_pi_support(c) (((c) >> 59) & 1) |
88 | #define cap_fl1gp_support(c) (((c) >> 56) & 1) | ||
87 | #define cap_read_drain(c) (((c) >> 55) & 1) | 89 | #define cap_read_drain(c) (((c) >> 55) & 1) |
88 | #define cap_write_drain(c) (((c) >> 54) & 1) | 90 | #define cap_write_drain(c) (((c) >> 54) & 1) |
89 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) | 91 | #define cap_max_amask_val(c) (((c) >> 48) & 0x3f) |
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 93b4183cf53d..da0ebaec25f0 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h | |||
@@ -265,7 +265,7 @@ extern struct resource * __devm_request_region(struct device *dev, | |||
265 | extern void __devm_release_region(struct device *dev, struct resource *parent, | 265 | extern void __devm_release_region(struct device *dev, struct resource *parent, |
266 | resource_size_t start, resource_size_t n); | 266 | resource_size_t start, resource_size_t n); |
267 | extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); | 267 | extern int iomem_map_sanity_check(resource_size_t addr, unsigned long size); |
268 | extern int iomem_is_exclusive(u64 addr); | 268 | extern bool iomem_is_exclusive(u64 addr); |
269 | 269 | ||
270 | extern int | 270 | extern int |
271 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 271 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 0e81035b678f..b11fcdfd0770 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h | |||
@@ -13,10 +13,13 @@ | |||
13 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | 13 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #define IRQ_WORK_PENDING 1UL | 16 | #define IRQ_WORK_PENDING BIT(0) |
17 | #define IRQ_WORK_BUSY 2UL | 17 | #define IRQ_WORK_BUSY BIT(1) |
18 | #define IRQ_WORK_FLAGS 3UL | 18 | |
19 | #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ | 19 | /* Doesn't want IPI, wait for tick: */ |
20 | #define IRQ_WORK_LAZY BIT(2) | ||
21 | |||
22 | #define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY) | ||
20 | 23 | ||
21 | struct irq_work { | 24 | struct irq_work { |
22 | unsigned long flags; | 25 | unsigned long flags; |
diff --git a/include/linux/iversion.h b/include/linux/iversion.h new file mode 100644 index 000000000000..be50ef7cedab --- /dev/null +++ b/include/linux/iversion.h | |||
@@ -0,0 +1,337 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _LINUX_IVERSION_H | ||
3 | #define _LINUX_IVERSION_H | ||
4 | |||
5 | #include <linux/fs.h> | ||
6 | |||
7 | /* | ||
8 | * The inode->i_version field: | ||
9 | * --------------------------- | ||
10 | * The change attribute (i_version) is mandated by NFSv4 and is mostly for | ||
11 | * knfsd, but is also used for other purposes (e.g. IMA). The i_version must | ||
12 | * appear different to observers if there was a change to the inode's data or | ||
13 | * metadata since it was last queried. | ||
14 | * | ||
15 | * Observers see the i_version as a 64-bit number that never decreases. If it | ||
16 | * remains the same since it was last checked, then nothing has changed in the | ||
17 | * inode. If it's different then something has changed. Observers cannot infer | ||
18 | * anything about the nature or magnitude of the changes from the value, only | ||
19 | * that the inode has changed in some fashion. | ||
20 | * | ||
21 | * Not all filesystems properly implement the i_version counter. Subsystems that | ||
22 | * want to use i_version field on an inode should first check whether the | ||
23 | * filesystem sets the SB_I_VERSION flag (usually via the IS_I_VERSION macro). | ||
24 | * | ||
25 | * Those that set SB_I_VERSION will automatically have their i_version counter | ||
26 | * incremented on writes to normal files. If the SB_I_VERSION is not set, then | ||
27 | * the VFS will not touch it on writes, and the filesystem can use it how it | ||
28 | * wishes. Note that the filesystem is always responsible for updating the | ||
29 | * i_version on namespace changes in directories (mkdir, rmdir, unlink, etc.). | ||
30 | * We consider these sorts of filesystems to have a kernel-managed i_version. | ||
31 | * | ||
32 | * It may be impractical for filesystems to keep i_version updates atomic with | ||
33 | * respect to the changes that cause them. They should, however, guarantee | ||
34 | * that i_version updates are never visible before the changes that caused | ||
35 | * them. Also, i_version updates should never be delayed longer than it takes | ||
36 | * the original change to reach disk. | ||
37 | * | ||
38 | * This implementation uses the low bit in the i_version field as a flag to | ||
39 | * track when the value has been queried. If it has not been queried since it | ||
40 | * was last incremented, we can skip the increment in most cases. | ||
41 | * | ||
42 | * In the event that we're updating the ctime, we will usually go ahead and | ||
43 | * bump the i_version anyway. Since that has to go to stable storage in some | ||
44 | * fashion, we might as well increment it as well. | ||
45 | * | ||
46 | * With this implementation, the value should always appear to observers to | ||
47 | * increase over time if the file has changed. It's recommended to use | ||
48 | * inode_eq_iversion() helper to compare values. | ||
49 | * | ||
50 | * Note that some filesystems (e.g. NFS and AFS) just use the field to store | ||
51 | * a server-provided value (for the most part). For that reason, those | ||
52 | * filesystems do not set SB_I_VERSION. These filesystems are considered to | ||
53 | * have a self-managed i_version. | ||
54 | * | ||
55 | * Persistently storing the i_version | ||
56 | * ---------------------------------- | ||
57 | * Queries of the i_version field are not gated on them hitting the backing | ||
58 | * store. It's always possible that the host could crash after allowing | ||
59 | * a query of the value but before it has made it to disk. | ||
60 | * | ||
61 | * To mitigate this problem, filesystems should always use | ||
62 | * inode_set_iversion_queried when loading an existing inode from disk. This | ||
63 | * ensures that the next attempted inode increment will result in the value | ||
64 | * changing. | ||
65 | * | ||
66 | * Storing the value to disk therefore does not count as a query, so those | ||
67 | * filesystems should use inode_peek_iversion to grab the value to be stored. | ||
68 | * There is no need to flag the value as having been queried in that case. | ||
69 | */ | ||
70 | |||
71 | /* | ||
72 | * We borrow the lowest bit in the i_version to use as a flag to tell whether | ||
73 | * it has been queried since we last incremented it. If it has, then we must | ||
74 | * increment it on the next change. After that, we can clear the flag and | ||
75 | * avoid incrementing it again until it has again been queried. | ||
76 | */ | ||
77 | #define I_VERSION_QUERIED_SHIFT (1) | ||
78 | #define I_VERSION_QUERIED (1ULL << (I_VERSION_QUERIED_SHIFT - 1)) | ||
79 | #define I_VERSION_INCREMENT (1ULL << I_VERSION_QUERIED_SHIFT) | ||
80 | |||
81 | /** | ||
82 | * inode_set_iversion_raw - set i_version to the specified raw value | ||
83 | * @inode: inode to set | ||
84 | * @val: new i_version value to set | ||
85 | * | ||
86 | * Set @inode's i_version field to @val. This function is for use by | ||
87 | * filesystems that self-manage the i_version. | ||
88 | * | ||
89 | * For example, the NFS client stores its NFSv4 change attribute in this way, | ||
90 | * and the AFS client stores the data_version from the server here. | ||
91 | */ | ||
92 | static inline void | ||
93 | inode_set_iversion_raw(struct inode *inode, u64 val) | ||
94 | { | ||
95 | atomic64_set(&inode->i_version, val); | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * inode_peek_iversion_raw - grab a "raw" iversion value | ||
100 | * @inode: inode from which i_version should be read | ||
101 | * | ||
102 | * Grab a "raw" inode->i_version value and return it. The i_version is not | ||
103 | * flagged or converted in any way. This is mostly used to access a self-managed | ||
104 | * i_version. | ||
105 | * | ||
106 | * With those filesystems, we want to treat the i_version as an entirely | ||
107 | * opaque value. | ||
108 | */ | ||
109 | static inline u64 | ||
110 | inode_peek_iversion_raw(const struct inode *inode) | ||
111 | { | ||
112 | return atomic64_read(&inode->i_version); | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * inode_set_iversion - set i_version to a particular value | ||
117 | * @inode: inode to set | ||
118 | * @val: new i_version value to set | ||
119 | * | ||
120 | * Set @inode's i_version field to @val. This function is for filesystems with | ||
121 | * a kernel-managed i_version, for initializing a newly-created inode from | ||
122 | * scratch. | ||
123 | * | ||
124 | * In this case, we do not set the QUERIED flag since we know that this value | ||
125 | * has never been queried. | ||
126 | */ | ||
127 | static inline void | ||
128 | inode_set_iversion(struct inode *inode, u64 val) | ||
129 | { | ||
130 | inode_set_iversion_raw(inode, val << I_VERSION_QUERIED_SHIFT); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * inode_set_iversion_queried - set i_version to a particular value as quereied | ||
135 | * @inode: inode to set | ||
136 | * @val: new i_version value to set | ||
137 | * | ||
138 | * Set @inode's i_version field to @val, and flag it for increment on the next | ||
139 | * change. | ||
140 | * | ||
141 | * Filesystems that persistently store the i_version on disk should use this | ||
142 | * when loading an existing inode from disk. | ||
143 | * | ||
144 | * When loading in an i_version value from a backing store, we can't be certain | ||
145 | * that it wasn't previously viewed before being stored. Thus, we must assume | ||
146 | * that it was, to ensure that we don't end up handing out the same value for | ||
147 | * different versions of the same inode. | ||
148 | */ | ||
149 | static inline void | ||
150 | inode_set_iversion_queried(struct inode *inode, u64 val) | ||
151 | { | ||
152 | inode_set_iversion_raw(inode, (val << I_VERSION_QUERIED_SHIFT) | | ||
153 | I_VERSION_QUERIED); | ||
154 | } | ||
155 | |||
156 | /** | ||
157 | * inode_maybe_inc_iversion - increments i_version | ||
158 | * @inode: inode with the i_version that should be updated | ||
159 | * @force: increment the counter even if it's not necessary? | ||
160 | * | ||
161 | * Every time the inode is modified, the i_version field must be seen to have | ||
162 | * changed by any observer. | ||
163 | * | ||
164 | * If "force" is set or the QUERIED flag is set, then ensure that we increment | ||
165 | * the value, and clear the queried flag. | ||
166 | * | ||
167 | * In the common case where neither is set, then we can return "false" without | ||
168 | * updating i_version. | ||
169 | * | ||
170 | * If this function returns false, and no other metadata has changed, then we | ||
171 | * can avoid logging the metadata. | ||
172 | */ | ||
173 | static inline bool | ||
174 | inode_maybe_inc_iversion(struct inode *inode, bool force) | ||
175 | { | ||
176 | u64 cur, old, new; | ||
177 | |||
178 | /* | ||
179 | * The i_version field is not strictly ordered with any other inode | ||
180 | * information, but the legacy inode_inc_iversion code used a spinlock | ||
181 | * to serialize increments. | ||
182 | * | ||
183 | * Here, we add full memory barriers to ensure that any de-facto | ||
184 | * ordering with other info is preserved. | ||
185 | * | ||
186 | * This barrier pairs with the barrier in inode_query_iversion() | ||
187 | */ | ||
188 | smp_mb(); | ||
189 | cur = inode_peek_iversion_raw(inode); | ||
190 | for (;;) { | ||
191 | /* If flag is clear then we needn't do anything */ | ||
192 | if (!force && !(cur & I_VERSION_QUERIED)) | ||
193 | return false; | ||
194 | |||
195 | /* Since lowest bit is flag, add 2 to avoid it */ | ||
196 | new = (cur & ~I_VERSION_QUERIED) + I_VERSION_INCREMENT; | ||
197 | |||
198 | old = atomic64_cmpxchg(&inode->i_version, cur, new); | ||
199 | if (likely(old == cur)) | ||
200 | break; | ||
201 | cur = old; | ||
202 | } | ||
203 | return true; | ||
204 | } | ||
205 | |||
206 | |||
207 | /** | ||
208 | * inode_inc_iversion - forcibly increment i_version | ||
209 | * @inode: inode that needs to be updated | ||
210 | * | ||
211 | * Forcbily increment the i_version field. This always results in a change to | ||
212 | * the observable value. | ||
213 | */ | ||
214 | static inline void | ||
215 | inode_inc_iversion(struct inode *inode) | ||
216 | { | ||
217 | inode_maybe_inc_iversion(inode, true); | ||
218 | } | ||
219 | |||
220 | /** | ||
221 | * inode_iversion_need_inc - is the i_version in need of being incremented? | ||
222 | * @inode: inode to check | ||
223 | * | ||
224 | * Returns whether the inode->i_version counter needs incrementing on the next | ||
225 | * change. Just fetch the value and check the QUERIED flag. | ||
226 | */ | ||
227 | static inline bool | ||
228 | inode_iversion_need_inc(struct inode *inode) | ||
229 | { | ||
230 | return inode_peek_iversion_raw(inode) & I_VERSION_QUERIED; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * inode_inc_iversion_raw - forcibly increment raw i_version | ||
235 | * @inode: inode that needs to be updated | ||
236 | * | ||
237 | * Forcbily increment the raw i_version field. This always results in a change | ||
238 | * to the raw value. | ||
239 | * | ||
240 | * NFS will use the i_version field to store the value from the server. It | ||
241 | * mostly treats it as opaque, but in the case where it holds a write | ||
242 | * delegation, it must increment the value itself. This function does that. | ||
243 | */ | ||
244 | static inline void | ||
245 | inode_inc_iversion_raw(struct inode *inode) | ||
246 | { | ||
247 | atomic64_inc(&inode->i_version); | ||
248 | } | ||
249 | |||
250 | /** | ||
251 | * inode_peek_iversion - read i_version without flagging it to be incremented | ||
252 | * @inode: inode from which i_version should be read | ||
253 | * | ||
254 | * Read the inode i_version counter for an inode without registering it as a | ||
255 | * query. | ||
256 | * | ||
257 | * This is typically used by local filesystems that need to store an i_version | ||
258 | * on disk. In that situation, it's not necessary to flag it as having been | ||
259 | * viewed, as the result won't be used to gauge changes from that point. | ||
260 | */ | ||
261 | static inline u64 | ||
262 | inode_peek_iversion(const struct inode *inode) | ||
263 | { | ||
264 | return inode_peek_iversion_raw(inode) >> I_VERSION_QUERIED_SHIFT; | ||
265 | } | ||
266 | |||
267 | /** | ||
268 | * inode_query_iversion - read i_version for later use | ||
269 | * @inode: inode from which i_version should be read | ||
270 | * | ||
271 | * Read the inode i_version counter. This should be used by callers that wish | ||
272 | * to store the returned i_version for later comparison. This will guarantee | ||
273 | * that a later query of the i_version will result in a different value if | ||
274 | * anything has changed. | ||
275 | * | ||
276 | * In this implementation, we fetch the current value, set the QUERIED flag and | ||
277 | * then try to swap it into place with a cmpxchg, if it wasn't already set. If | ||
278 | * that fails, we try again with the newly fetched value from the cmpxchg. | ||
279 | */ | ||
280 | static inline u64 | ||
281 | inode_query_iversion(struct inode *inode) | ||
282 | { | ||
283 | u64 cur, old, new; | ||
284 | |||
285 | cur = inode_peek_iversion_raw(inode); | ||
286 | for (;;) { | ||
287 | /* If flag is already set, then no need to swap */ | ||
288 | if (cur & I_VERSION_QUERIED) { | ||
289 | /* | ||
290 | * This barrier (and the implicit barrier in the | ||
291 | * cmpxchg below) pairs with the barrier in | ||
292 | * inode_maybe_inc_iversion(). | ||
293 | */ | ||
294 | smp_mb(); | ||
295 | break; | ||
296 | } | ||
297 | |||
298 | new = cur | I_VERSION_QUERIED; | ||
299 | old = atomic64_cmpxchg(&inode->i_version, cur, new); | ||
300 | if (likely(old == cur)) | ||
301 | break; | ||
302 | cur = old; | ||
303 | } | ||
304 | return cur >> I_VERSION_QUERIED_SHIFT; | ||
305 | } | ||
306 | |||
307 | /** | ||
308 | * inode_eq_iversion_raw - check whether the raw i_version counter has changed | ||
309 | * @inode: inode to check | ||
310 | * @old: old value to check against its i_version | ||
311 | * | ||
312 | * Compare the current raw i_version counter with a previous one. Returns true | ||
313 | * if they are the same or false if they are different. | ||
314 | */ | ||
315 | static inline bool | ||
316 | inode_eq_iversion_raw(const struct inode *inode, u64 old) | ||
317 | { | ||
318 | return inode_peek_iversion_raw(inode) == old; | ||
319 | } | ||
320 | |||
321 | /** | ||
322 | * inode_eq_iversion - check whether the i_version counter has changed | ||
323 | * @inode: inode to check | ||
324 | * @old: old value to check against its i_version | ||
325 | * | ||
326 | * Compare an i_version counter with a previous one. Returns true if they are | ||
327 | * the same, and false if they are different. | ||
328 | * | ||
329 | * Note that we don't need to set the QUERIED flag in this case, as the value | ||
330 | * in the inode is not being recorded for later use. | ||
331 | */ | ||
332 | static inline bool | ||
333 | inode_eq_iversion(const struct inode *inode, u64 old) | ||
334 | { | ||
335 | return inode_peek_iversion(inode) == old; | ||
336 | } | ||
337 | #endif | ||
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index 296d1e0ea87b..b708e5169d1d 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -418,26 +418,41 @@ static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) | |||
418 | #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) | 418 | #define JI_WAIT_DATA (1 << __JI_WAIT_DATA) |
419 | 419 | ||
420 | /** | 420 | /** |
421 | * struct jbd_inode is the structure linking inodes in ordered mode | 421 | * struct jbd_inode - The jbd_inode type is the structure linking inodes in |
422 | * present in a transaction so that we can sync them during commit. | 422 | * ordered mode present in a transaction so that we can sync them during commit. |
423 | */ | 423 | */ |
424 | struct jbd2_inode { | 424 | struct jbd2_inode { |
425 | /* Which transaction does this inode belong to? Either the running | 425 | /** |
426 | * transaction or the committing one. [j_list_lock] */ | 426 | * @i_transaction: |
427 | * | ||
428 | * Which transaction does this inode belong to? Either the running | ||
429 | * transaction or the committing one. [j_list_lock] | ||
430 | */ | ||
427 | transaction_t *i_transaction; | 431 | transaction_t *i_transaction; |
428 | 432 | ||
429 | /* Pointer to the running transaction modifying inode's data in case | 433 | /** |
430 | * there is already a committing transaction touching it. [j_list_lock] */ | 434 | * @i_next_transaction: |
435 | * | ||
436 | * Pointer to the running transaction modifying inode's data in case | ||
437 | * there is already a committing transaction touching it. [j_list_lock] | ||
438 | */ | ||
431 | transaction_t *i_next_transaction; | 439 | transaction_t *i_next_transaction; |
432 | 440 | ||
433 | /* List of inodes in the i_transaction [j_list_lock] */ | 441 | /** |
442 | * @i_list: List of inodes in the i_transaction [j_list_lock] | ||
443 | */ | ||
434 | struct list_head i_list; | 444 | struct list_head i_list; |
435 | 445 | ||
436 | /* VFS inode this inode belongs to [constant during the lifetime | 446 | /** |
437 | * of the structure] */ | 447 | * @i_vfs_inode: |
448 | * | ||
449 | * VFS inode this inode belongs to [constant for lifetime of structure] | ||
450 | */ | ||
438 | struct inode *i_vfs_inode; | 451 | struct inode *i_vfs_inode; |
439 | 452 | ||
440 | /* Flags of inode [j_list_lock] */ | 453 | /** |
454 | * @i_flags: Flags of inode [j_list_lock] | ||
455 | */ | ||
441 | unsigned long i_flags; | 456 | unsigned long i_flags; |
442 | }; | 457 | }; |
443 | 458 | ||
@@ -447,12 +462,20 @@ struct jbd2_revoke_table_s; | |||
447 | * struct handle_s - The handle_s type is the concrete type associated with | 462 | * struct handle_s - The handle_s type is the concrete type associated with |
448 | * handle_t. | 463 | * handle_t. |
449 | * @h_transaction: Which compound transaction is this update a part of? | 464 | * @h_transaction: Which compound transaction is this update a part of? |
465 | * @h_journal: Which journal handle belongs to - used iff h_reserved set. | ||
466 | * @h_rsv_handle: Handle reserved for finishing the logical operation. | ||
450 | * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. | 467 | * @h_buffer_credits: Number of remaining buffers we are allowed to dirty. |
451 | * @h_ref: Reference count on this handle | 468 | * @h_ref: Reference count on this handle. |
452 | * @h_err: Field for caller's use to track errors through large fs operations | 469 | * @h_err: Field for caller's use to track errors through large fs operations. |
453 | * @h_sync: flag for sync-on-close | 470 | * @h_sync: Flag for sync-on-close. |
454 | * @h_jdata: flag to force data journaling | 471 | * @h_jdata: Flag to force data journaling. |
455 | * @h_aborted: flag indicating fatal error on handle | 472 | * @h_reserved: Flag for handle for reserved credits. |
473 | * @h_aborted: Flag indicating fatal error on handle. | ||
474 | * @h_type: For handle statistics. | ||
475 | * @h_line_no: For handle statistics. | ||
476 | * @h_start_jiffies: Handle Start time. | ||
477 | * @h_requested_credits: Holds @h_buffer_credits after handle is started. | ||
478 | * @saved_alloc_context: Saved context while transaction is open. | ||
456 | **/ | 479 | **/ |
457 | 480 | ||
458 | /* Docbook can't yet cope with the bit fields, but will leave the documentation | 481 | /* Docbook can't yet cope with the bit fields, but will leave the documentation |
@@ -462,32 +485,23 @@ struct jbd2_revoke_table_s; | |||
462 | struct jbd2_journal_handle | 485 | struct jbd2_journal_handle |
463 | { | 486 | { |
464 | union { | 487 | union { |
465 | /* Which compound transaction is this update a part of? */ | ||
466 | transaction_t *h_transaction; | 488 | transaction_t *h_transaction; |
467 | /* Which journal handle belongs to - used iff h_reserved set */ | 489 | /* Which journal handle belongs to - used iff h_reserved set */ |
468 | journal_t *h_journal; | 490 | journal_t *h_journal; |
469 | }; | 491 | }; |
470 | 492 | ||
471 | /* Handle reserved for finishing the logical operation */ | ||
472 | handle_t *h_rsv_handle; | 493 | handle_t *h_rsv_handle; |
473 | |||
474 | /* Number of remaining buffers we are allowed to dirty: */ | ||
475 | int h_buffer_credits; | 494 | int h_buffer_credits; |
476 | |||
477 | /* Reference count on this handle */ | ||
478 | int h_ref; | 495 | int h_ref; |
479 | |||
480 | /* Field for caller's use to track errors through large fs */ | ||
481 | /* operations */ | ||
482 | int h_err; | 496 | int h_err; |
483 | 497 | ||
484 | /* Flags [no locking] */ | 498 | /* Flags [no locking] */ |
485 | unsigned int h_sync: 1; /* sync-on-close */ | 499 | unsigned int h_sync: 1; |
486 | unsigned int h_jdata: 1; /* force data journaling */ | 500 | unsigned int h_jdata: 1; |
487 | unsigned int h_reserved: 1; /* handle with reserved credits */ | 501 | unsigned int h_reserved: 1; |
488 | unsigned int h_aborted: 1; /* fatal error on handle */ | 502 | unsigned int h_aborted: 1; |
489 | unsigned int h_type: 8; /* for handle statistics */ | 503 | unsigned int h_type: 8; |
490 | unsigned int h_line_no: 16; /* for handle statistics */ | 504 | unsigned int h_line_no: 16; |
491 | 505 | ||
492 | unsigned long h_start_jiffies; | 506 | unsigned long h_start_jiffies; |
493 | unsigned int h_requested_credits; | 507 | unsigned int h_requested_credits; |
@@ -729,228 +743,253 @@ jbd2_time_diff(unsigned long start, unsigned long end) | |||
729 | /** | 743 | /** |
730 | * struct journal_s - The journal_s type is the concrete type associated with | 744 | * struct journal_s - The journal_s type is the concrete type associated with |
731 | * journal_t. | 745 | * journal_t. |
732 | * @j_flags: General journaling state flags | ||
733 | * @j_errno: Is there an outstanding uncleared error on the journal (from a | ||
734 | * prior abort)? | ||
735 | * @j_sb_buffer: First part of superblock buffer | ||
736 | * @j_superblock: Second part of superblock buffer | ||
737 | * @j_format_version: Version of the superblock format | ||
738 | * @j_state_lock: Protect the various scalars in the journal | ||
739 | * @j_barrier_count: Number of processes waiting to create a barrier lock | ||
740 | * @j_barrier: The barrier lock itself | ||
741 | * @j_running_transaction: The current running transaction.. | ||
742 | * @j_committing_transaction: the transaction we are pushing to disk | ||
743 | * @j_checkpoint_transactions: a linked circular list of all transactions | ||
744 | * waiting for checkpointing | ||
745 | * @j_wait_transaction_locked: Wait queue for waiting for a locked transaction | ||
746 | * to start committing, or for a barrier lock to be released | ||
747 | * @j_wait_done_commit: Wait queue for waiting for commit to complete | ||
748 | * @j_wait_commit: Wait queue to trigger commit | ||
749 | * @j_wait_updates: Wait queue to wait for updates to complete | ||
750 | * @j_wait_reserved: Wait queue to wait for reserved buffer credits to drop | ||
751 | * @j_checkpoint_mutex: Mutex for locking against concurrent checkpoints | ||
752 | * @j_head: Journal head - identifies the first unused block in the journal | ||
753 | * @j_tail: Journal tail - identifies the oldest still-used block in the | ||
754 | * journal. | ||
755 | * @j_free: Journal free - how many free blocks are there in the journal? | ||
756 | * @j_first: The block number of the first usable block | ||
757 | * @j_last: The block number one beyond the last usable block | ||
758 | * @j_dev: Device where we store the journal | ||
759 | * @j_blocksize: blocksize for the location where we store the journal. | ||
760 | * @j_blk_offset: starting block offset for into the device where we store the | ||
761 | * journal | ||
762 | * @j_fs_dev: Device which holds the client fs. For internal journal this will | ||
763 | * be equal to j_dev | ||
764 | * @j_reserved_credits: Number of buffers reserved from the running transaction | ||
765 | * @j_maxlen: Total maximum capacity of the journal region on disk. | ||
766 | * @j_list_lock: Protects the buffer lists and internal buffer state. | ||
767 | * @j_inode: Optional inode where we store the journal. If present, all journal | ||
768 | * block numbers are mapped into this inode via bmap(). | ||
769 | * @j_tail_sequence: Sequence number of the oldest transaction in the log | ||
770 | * @j_transaction_sequence: Sequence number of the next transaction to grant | ||
771 | * @j_commit_sequence: Sequence number of the most recently committed | ||
772 | * transaction | ||
773 | * @j_commit_request: Sequence number of the most recent transaction wanting | ||
774 | * commit | ||
775 | * @j_uuid: Uuid of client object. | ||
776 | * @j_task: Pointer to the current commit thread for this journal | ||
777 | * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a | ||
778 | * single compound commit transaction | ||
779 | * @j_commit_interval: What is the maximum transaction lifetime before we begin | ||
780 | * a commit? | ||
781 | * @j_commit_timer: The timer used to wakeup the commit thread | ||
782 | * @j_revoke_lock: Protect the revoke table | ||
783 | * @j_revoke: The revoke table - maintains the list of revoked blocks in the | ||
784 | * current transaction. | ||
785 | * @j_revoke_table: alternate revoke tables for j_revoke | ||
786 | * @j_wbuf: array of buffer_heads for jbd2_journal_commit_transaction | ||
787 | * @j_wbufsize: maximum number of buffer_heads allowed in j_wbuf, the | ||
788 | * number that will fit in j_blocksize | ||
789 | * @j_last_sync_writer: most recent pid which did a synchronous write | ||
790 | * @j_history_lock: Protect the transactions statistics history | ||
791 | * @j_proc_entry: procfs entry for the jbd statistics directory | ||
792 | * @j_stats: Overall statistics | ||
793 | * @j_private: An opaque pointer to fs-private information. | ||
794 | * @j_trans_commit_map: Lockdep entity to track transaction commit dependencies | ||
795 | */ | 746 | */ |
796 | |||
797 | struct journal_s | 747 | struct journal_s |
798 | { | 748 | { |
799 | /* General journaling state flags [j_state_lock] */ | 749 | /** |
750 | * @j_flags: General journaling state flags [j_state_lock] | ||
751 | */ | ||
800 | unsigned long j_flags; | 752 | unsigned long j_flags; |
801 | 753 | ||
802 | /* | 754 | /** |
755 | * @j_errno: | ||
756 | * | ||
803 | * Is there an outstanding uncleared error on the journal (from a prior | 757 | * Is there an outstanding uncleared error on the journal (from a prior |
804 | * abort)? [j_state_lock] | 758 | * abort)? [j_state_lock] |
805 | */ | 759 | */ |
806 | int j_errno; | 760 | int j_errno; |
807 | 761 | ||
808 | /* The superblock buffer */ | 762 | /** |
763 | * @j_sb_buffer: The first part of the superblock buffer. | ||
764 | */ | ||
809 | struct buffer_head *j_sb_buffer; | 765 | struct buffer_head *j_sb_buffer; |
766 | |||
767 | /** | ||
768 | * @j_superblock: The second part of the superblock buffer. | ||
769 | */ | ||
810 | journal_superblock_t *j_superblock; | 770 | journal_superblock_t *j_superblock; |
811 | 771 | ||
812 | /* Version of the superblock format */ | 772 | /** |
773 | * @j_format_version: Version of the superblock format. | ||
774 | */ | ||
813 | int j_format_version; | 775 | int j_format_version; |
814 | 776 | ||
815 | /* | 777 | /** |
816 | * Protect the various scalars in the journal | 778 | * @j_state_lock: Protect the various scalars in the journal. |
817 | */ | 779 | */ |
818 | rwlock_t j_state_lock; | 780 | rwlock_t j_state_lock; |
819 | 781 | ||
820 | /* | 782 | /** |
783 | * @j_barrier_count: | ||
784 | * | ||
821 | * Number of processes waiting to create a barrier lock [j_state_lock] | 785 | * Number of processes waiting to create a barrier lock [j_state_lock] |
822 | */ | 786 | */ |
823 | int j_barrier_count; | 787 | int j_barrier_count; |
824 | 788 | ||
825 | /* The barrier lock itself */ | 789 | /** |
790 | * @j_barrier: The barrier lock itself. | ||
791 | */ | ||
826 | struct mutex j_barrier; | 792 | struct mutex j_barrier; |
827 | 793 | ||
828 | /* | 794 | /** |
795 | * @j_running_transaction: | ||
796 | * | ||
829 | * Transactions: The current running transaction... | 797 | * Transactions: The current running transaction... |
830 | * [j_state_lock] [caller holding open handle] | 798 | * [j_state_lock] [caller holding open handle] |
831 | */ | 799 | */ |
832 | transaction_t *j_running_transaction; | 800 | transaction_t *j_running_transaction; |
833 | 801 | ||
834 | /* | 802 | /** |
803 | * @j_committing_transaction: | ||
804 | * | ||
835 | * the transaction we are pushing to disk | 805 | * the transaction we are pushing to disk |
836 | * [j_state_lock] [caller holding open handle] | 806 | * [j_state_lock] [caller holding open handle] |
837 | */ | 807 | */ |
838 | transaction_t *j_committing_transaction; | 808 | transaction_t *j_committing_transaction; |
839 | 809 | ||
840 | /* | 810 | /** |
811 | * @j_checkpoint_transactions: | ||
812 | * | ||
841 | * ... and a linked circular list of all transactions waiting for | 813 | * ... and a linked circular list of all transactions waiting for |
842 | * checkpointing. [j_list_lock] | 814 | * checkpointing. [j_list_lock] |
843 | */ | 815 | */ |
844 | transaction_t *j_checkpoint_transactions; | 816 | transaction_t *j_checkpoint_transactions; |
845 | 817 | ||
846 | /* | 818 | /** |
819 | * @j_wait_transaction_locked: | ||
820 | * | ||
847 | * Wait queue for waiting for a locked transaction to start committing, | 821 | * Wait queue for waiting for a locked transaction to start committing, |
848 | * or for a barrier lock to be released | 822 | * or for a barrier lock to be released. |
849 | */ | 823 | */ |
850 | wait_queue_head_t j_wait_transaction_locked; | 824 | wait_queue_head_t j_wait_transaction_locked; |
851 | 825 | ||
852 | /* Wait queue for waiting for commit to complete */ | 826 | /** |
827 | * @j_wait_done_commit: Wait queue for waiting for commit to complete. | ||
828 | */ | ||
853 | wait_queue_head_t j_wait_done_commit; | 829 | wait_queue_head_t j_wait_done_commit; |
854 | 830 | ||
855 | /* Wait queue to trigger commit */ | 831 | /** |
832 | * @j_wait_commit: Wait queue to trigger commit. | ||
833 | */ | ||
856 | wait_queue_head_t j_wait_commit; | 834 | wait_queue_head_t j_wait_commit; |
857 | 835 | ||
858 | /* Wait queue to wait for updates to complete */ | 836 | /** |
837 | * @j_wait_updates: Wait queue to wait for updates to complete. | ||
838 | */ | ||
859 | wait_queue_head_t j_wait_updates; | 839 | wait_queue_head_t j_wait_updates; |
860 | 840 | ||
861 | /* Wait queue to wait for reserved buffer credits to drop */ | 841 | /** |
842 | * @j_wait_reserved: | ||
843 | * | ||
844 | * Wait queue to wait for reserved buffer credits to drop. | ||
845 | */ | ||
862 | wait_queue_head_t j_wait_reserved; | 846 | wait_queue_head_t j_wait_reserved; |
863 | 847 | ||
864 | /* Semaphore for locking against concurrent checkpoints */ | 848 | /** |
849 | * @j_checkpoint_mutex: | ||
850 | * | ||
851 | * Semaphore for locking against concurrent checkpoints. | ||
852 | */ | ||
865 | struct mutex j_checkpoint_mutex; | 853 | struct mutex j_checkpoint_mutex; |
866 | 854 | ||
867 | /* | 855 | /** |
856 | * @j_chkpt_bhs: | ||
857 | * | ||
868 | * List of buffer heads used by the checkpoint routine. This | 858 | * List of buffer heads used by the checkpoint routine. This |
869 | * was moved from jbd2_log_do_checkpoint() to reduce stack | 859 | * was moved from jbd2_log_do_checkpoint() to reduce stack |
870 | * usage. Access to this array is controlled by the | 860 | * usage. Access to this array is controlled by the |
871 | * j_checkpoint_mutex. [j_checkpoint_mutex] | 861 | * @j_checkpoint_mutex. [j_checkpoint_mutex] |
872 | */ | 862 | */ |
873 | struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; | 863 | struct buffer_head *j_chkpt_bhs[JBD2_NR_BATCH]; |
874 | 864 | ||
875 | /* | 865 | /** |
866 | * @j_head: | ||
867 | * | ||
876 | * Journal head: identifies the first unused block in the journal. | 868 | * Journal head: identifies the first unused block in the journal. |
877 | * [j_state_lock] | 869 | * [j_state_lock] |
878 | */ | 870 | */ |
879 | unsigned long j_head; | 871 | unsigned long j_head; |
880 | 872 | ||
881 | /* | 873 | /** |
874 | * @j_tail: | ||
875 | * | ||
882 | * Journal tail: identifies the oldest still-used block in the journal. | 876 | * Journal tail: identifies the oldest still-used block in the journal. |
883 | * [j_state_lock] | 877 | * [j_state_lock] |
884 | */ | 878 | */ |
885 | unsigned long j_tail; | 879 | unsigned long j_tail; |
886 | 880 | ||
887 | /* | 881 | /** |
882 | * @j_free: | ||
883 | * | ||
888 | * Journal free: how many free blocks are there in the journal? | 884 | * Journal free: how many free blocks are there in the journal? |
889 | * [j_state_lock] | 885 | * [j_state_lock] |
890 | */ | 886 | */ |
891 | unsigned long j_free; | 887 | unsigned long j_free; |
892 | 888 | ||
893 | /* | 889 | /** |
894 | * Journal start and end: the block numbers of the first usable block | 890 | * @j_first: |
895 | * and one beyond the last usable block in the journal. [j_state_lock] | 891 | * |
892 | * The block number of the first usable block in the journal | ||
893 | * [j_state_lock]. | ||
896 | */ | 894 | */ |
897 | unsigned long j_first; | 895 | unsigned long j_first; |
896 | |||
897 | /** | ||
898 | * @j_last: | ||
899 | * | ||
900 | * The block number one beyond the last usable block in the journal | ||
901 | * [j_state_lock]. | ||
902 | */ | ||
898 | unsigned long j_last; | 903 | unsigned long j_last; |
899 | 904 | ||
900 | /* | 905 | /** |
901 | * Device, blocksize and starting block offset for the location where we | 906 | * @j_dev: Device where we store the journal. |
902 | * store the journal. | ||
903 | */ | 907 | */ |
904 | struct block_device *j_dev; | 908 | struct block_device *j_dev; |
909 | |||
910 | /** | ||
911 | * @j_blocksize: Block size for the location where we store the journal. | ||
912 | */ | ||
905 | int j_blocksize; | 913 | int j_blocksize; |
914 | |||
915 | /** | ||
916 | * @j_blk_offset: | ||
917 | * | ||
918 | * Starting block offset into the device where we store the journal. | ||
919 | */ | ||
906 | unsigned long long j_blk_offset; | 920 | unsigned long long j_blk_offset; |
921 | |||
922 | /** | ||
923 | * @j_devname: Journal device name. | ||
924 | */ | ||
907 | char j_devname[BDEVNAME_SIZE+24]; | 925 | char j_devname[BDEVNAME_SIZE+24]; |
908 | 926 | ||
909 | /* | 927 | /** |
928 | * @j_fs_dev: | ||
929 | * | ||
910 | * Device which holds the client fs. For internal journal this will be | 930 | * Device which holds the client fs. For internal journal this will be |
911 | * equal to j_dev. | 931 | * equal to j_dev. |
912 | */ | 932 | */ |
913 | struct block_device *j_fs_dev; | 933 | struct block_device *j_fs_dev; |
914 | 934 | ||
915 | /* Total maximum capacity of the journal region on disk. */ | 935 | /** |
936 | * @j_maxlen: Total maximum capacity of the journal region on disk. | ||
937 | */ | ||
916 | unsigned int j_maxlen; | 938 | unsigned int j_maxlen; |
917 | 939 | ||
918 | /* Number of buffers reserved from the running transaction */ | 940 | /** |
941 | * @j_reserved_credits: | ||
942 | * | ||
943 | * Number of buffers reserved from the running transaction. | ||
944 | */ | ||
919 | atomic_t j_reserved_credits; | 945 | atomic_t j_reserved_credits; |
920 | 946 | ||
921 | /* | 947 | /** |
922 | * Protects the buffer lists and internal buffer state. | 948 | * @j_list_lock: Protects the buffer lists and internal buffer state. |
923 | */ | 949 | */ |
924 | spinlock_t j_list_lock; | 950 | spinlock_t j_list_lock; |
925 | 951 | ||
926 | /* Optional inode where we store the journal. If present, all */ | 952 | /** |
927 | /* journal block numbers are mapped into this inode via */ | 953 | * @j_inode: |
928 | /* bmap(). */ | 954 | * |
955 | * Optional inode where we store the journal. If present, all | ||
956 | * journal block numbers are mapped into this inode via bmap(). | ||
957 | */ | ||
929 | struct inode *j_inode; | 958 | struct inode *j_inode; |
930 | 959 | ||
931 | /* | 960 | /** |
961 | * @j_tail_sequence: | ||
962 | * | ||
932 | * Sequence number of the oldest transaction in the log [j_state_lock] | 963 | * Sequence number of the oldest transaction in the log [j_state_lock] |
933 | */ | 964 | */ |
934 | tid_t j_tail_sequence; | 965 | tid_t j_tail_sequence; |
935 | 966 | ||
936 | /* | 967 | /** |
968 | * @j_transaction_sequence: | ||
969 | * | ||
937 | * Sequence number of the next transaction to grant [j_state_lock] | 970 | * Sequence number of the next transaction to grant [j_state_lock] |
938 | */ | 971 | */ |
939 | tid_t j_transaction_sequence; | 972 | tid_t j_transaction_sequence; |
940 | 973 | ||
941 | /* | 974 | /** |
975 | * @j_commit_sequence: | ||
976 | * | ||
942 | * Sequence number of the most recently committed transaction | 977 | * Sequence number of the most recently committed transaction |
943 | * [j_state_lock]. | 978 | * [j_state_lock]. |
944 | */ | 979 | */ |
945 | tid_t j_commit_sequence; | 980 | tid_t j_commit_sequence; |
946 | 981 | ||
947 | /* | 982 | /** |
983 | * @j_commit_request: | ||
984 | * | ||
948 | * Sequence number of the most recent transaction wanting commit | 985 | * Sequence number of the most recent transaction wanting commit |
949 | * [j_state_lock] | 986 | * [j_state_lock] |
950 | */ | 987 | */ |
951 | tid_t j_commit_request; | 988 | tid_t j_commit_request; |
952 | 989 | ||
953 | /* | 990 | /** |
991 | * @j_uuid: | ||
992 | * | ||
954 | * Journal uuid: identifies the object (filesystem, LVM volume etc) | 993 | * Journal uuid: identifies the object (filesystem, LVM volume etc) |
955 | * backed by this journal. This will eventually be replaced by an array | 994 | * backed by this journal. This will eventually be replaced by an array |
956 | * of uuids, allowing us to index multiple devices within a single | 995 | * of uuids, allowing us to index multiple devices within a single |
@@ -958,85 +997,151 @@ struct journal_s | |||
958 | */ | 997 | */ |
959 | __u8 j_uuid[16]; | 998 | __u8 j_uuid[16]; |
960 | 999 | ||
961 | /* Pointer to the current commit thread for this journal */ | 1000 | /** |
1001 | * @j_task: Pointer to the current commit thread for this journal. | ||
1002 | */ | ||
962 | struct task_struct *j_task; | 1003 | struct task_struct *j_task; |
963 | 1004 | ||
964 | /* | 1005 | /** |
1006 | * @j_max_transaction_buffers: | ||
1007 | * | ||
965 | * Maximum number of metadata buffers to allow in a single compound | 1008 | * Maximum number of metadata buffers to allow in a single compound |
966 | * commit transaction | 1009 | * commit transaction. |
967 | */ | 1010 | */ |
968 | int j_max_transaction_buffers; | 1011 | int j_max_transaction_buffers; |
969 | 1012 | ||
970 | /* | 1013 | /** |
1014 | * @j_commit_interval: | ||
1015 | * | ||
971 | * What is the maximum transaction lifetime before we begin a commit? | 1016 | * What is the maximum transaction lifetime before we begin a commit? |
972 | */ | 1017 | */ |
973 | unsigned long j_commit_interval; | 1018 | unsigned long j_commit_interval; |
974 | 1019 | ||
975 | /* The timer used to wakeup the commit thread: */ | 1020 | /** |
1021 | * @j_commit_timer: The timer used to wakeup the commit thread. | ||
1022 | */ | ||
976 | struct timer_list j_commit_timer; | 1023 | struct timer_list j_commit_timer; |
977 | 1024 | ||
978 | /* | 1025 | /** |
979 | * The revoke table: maintains the list of revoked blocks in the | 1026 | * @j_revoke_lock: Protect the revoke table. |
980 | * current transaction. [j_revoke_lock] | ||
981 | */ | 1027 | */ |
982 | spinlock_t j_revoke_lock; | 1028 | spinlock_t j_revoke_lock; |
1029 | |||
1030 | /** | ||
1031 | * @j_revoke: | ||
1032 | * | ||
1033 | * The revoke table - maintains the list of revoked blocks in the | ||
1034 | * current transaction. | ||
1035 | */ | ||
983 | struct jbd2_revoke_table_s *j_revoke; | 1036 | struct jbd2_revoke_table_s *j_revoke; |
1037 | |||
1038 | /** | ||
1039 | * @j_revoke_table: Alternate revoke tables for j_revoke. | ||
1040 | */ | ||
984 | struct jbd2_revoke_table_s *j_revoke_table[2]; | 1041 | struct jbd2_revoke_table_s *j_revoke_table[2]; |
985 | 1042 | ||
986 | /* | 1043 | /** |
987 | * array of bhs for jbd2_journal_commit_transaction | 1044 | * @j_wbuf: Array of bhs for jbd2_journal_commit_transaction. |
988 | */ | 1045 | */ |
989 | struct buffer_head **j_wbuf; | 1046 | struct buffer_head **j_wbuf; |
1047 | |||
1048 | /** | ||
1049 | * @j_wbufsize: | ||
1050 | * | ||
1051 | * Size of @j_wbuf array. | ||
1052 | */ | ||
990 | int j_wbufsize; | 1053 | int j_wbufsize; |
991 | 1054 | ||
992 | /* | 1055 | /** |
993 | * this is the pid of hte last person to run a synchronous operation | 1056 | * @j_last_sync_writer: |
994 | * through the journal | 1057 | * |
1058 | * The pid of the last person to run a synchronous operation | ||
1059 | * through the journal. | ||
995 | */ | 1060 | */ |
996 | pid_t j_last_sync_writer; | 1061 | pid_t j_last_sync_writer; |
997 | 1062 | ||
998 | /* | 1063 | /** |
999 | * the average amount of time in nanoseconds it takes to commit a | 1064 | * @j_average_commit_time: |
1065 | * | ||
1066 | * The average amount of time in nanoseconds it takes to commit a | ||
1000 | * transaction to disk. [j_state_lock] | 1067 | * transaction to disk. [j_state_lock] |
1001 | */ | 1068 | */ |
1002 | u64 j_average_commit_time; | 1069 | u64 j_average_commit_time; |
1003 | 1070 | ||
1004 | /* | 1071 | /** |
1005 | * minimum and maximum times that we should wait for | 1072 | * @j_min_batch_time: |
1006 | * additional filesystem operations to get batched into a | 1073 | * |
1007 | * synchronous handle in microseconds | 1074 | * Minimum time that we should wait for additional filesystem operations |
1075 | * to get batched into a synchronous handle in microseconds. | ||
1008 | */ | 1076 | */ |
1009 | u32 j_min_batch_time; | 1077 | u32 j_min_batch_time; |
1078 | |||
1079 | /** | ||
1080 | * @j_max_batch_time: | ||
1081 | * | ||
1082 | * Maximum time that we should wait for additional filesystem operations | ||
1083 | * to get batched into a synchronous handle in microseconds. | ||
1084 | */ | ||
1010 | u32 j_max_batch_time; | 1085 | u32 j_max_batch_time; |
1011 | 1086 | ||
1012 | /* This function is called when a transaction is closed */ | 1087 | /** |
1088 | * @j_commit_callback: | ||
1089 | * | ||
1090 | * This function is called when a transaction is closed. | ||
1091 | */ | ||
1013 | void (*j_commit_callback)(journal_t *, | 1092 | void (*j_commit_callback)(journal_t *, |
1014 | transaction_t *); | 1093 | transaction_t *); |
1015 | 1094 | ||
1016 | /* | 1095 | /* |
1017 | * Journal statistics | 1096 | * Journal statistics |
1018 | */ | 1097 | */ |
1098 | |||
1099 | /** | ||
1100 | * @j_history_lock: Protect the transactions statistics history. | ||
1101 | */ | ||
1019 | spinlock_t j_history_lock; | 1102 | spinlock_t j_history_lock; |
1103 | |||
1104 | /** | ||
1105 | * @j_proc_entry: procfs entry for the jbd statistics directory. | ||
1106 | */ | ||
1020 | struct proc_dir_entry *j_proc_entry; | 1107 | struct proc_dir_entry *j_proc_entry; |
1108 | |||
1109 | /** | ||
1110 | * @j_stats: Overall statistics. | ||
1111 | */ | ||
1021 | struct transaction_stats_s j_stats; | 1112 | struct transaction_stats_s j_stats; |
1022 | 1113 | ||
1023 | /* Failed journal commit ID */ | 1114 | /** |
1115 | * @j_failed_commit: Failed journal commit ID. | ||
1116 | */ | ||
1024 | unsigned int j_failed_commit; | 1117 | unsigned int j_failed_commit; |
1025 | 1118 | ||
1026 | /* | 1119 | /** |
1120 | * @j_private: | ||
1121 | * | ||
1027 | * An opaque pointer to fs-private information. ext3 puts its | 1122 | * An opaque pointer to fs-private information. ext3 puts its |
1028 | * superblock pointer here | 1123 | * superblock pointer here. |
1029 | */ | 1124 | */ |
1030 | void *j_private; | 1125 | void *j_private; |
1031 | 1126 | ||
1032 | /* Reference to checksum algorithm driver via cryptoapi */ | 1127 | /** |
1128 | * @j_chksum_driver: | ||
1129 | * | ||
1130 | * Reference to checksum algorithm driver via cryptoapi. | ||
1131 | */ | ||
1033 | struct crypto_shash *j_chksum_driver; | 1132 | struct crypto_shash *j_chksum_driver; |
1034 | 1133 | ||
1035 | /* Precomputed journal UUID checksum for seeding other checksums */ | 1134 | /** |
1135 | * @j_csum_seed: | ||
1136 | * | ||
1137 | * Precomputed journal UUID checksum for seeding other checksums. | ||
1138 | */ | ||
1036 | __u32 j_csum_seed; | 1139 | __u32 j_csum_seed; |
1037 | 1140 | ||
1038 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 1141 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
1039 | /* | 1142 | /** |
1143 | * @j_trans_commit_map: | ||
1144 | * | ||
1040 | * Lockdep entity to track transaction commit dependencies. Handles | 1145 | * Lockdep entity to track transaction commit dependencies. Handles |
1041 | * hold this "lock" for read, when we wait for commit, we acquire the | 1146 | * hold this "lock" for read, when we wait for commit, we acquire the |
1042 | * "lock" for writing. This matches the properties of jbd2 journalling | 1147 | * "lock" for writing. This matches the properties of jbd2 journalling |
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h index e0340ca08d98..b6a29c126cc4 100644 --- a/include/linux/jump_label.h +++ b/include/linux/jump_label.h | |||
@@ -393,7 +393,7 @@ extern bool ____wrong_branch_error(void); | |||
393 | branch = !arch_static_branch_jump(&(x)->key, true); \ | 393 | branch = !arch_static_branch_jump(&(x)->key, true); \ |
394 | else \ | 394 | else \ |
395 | branch = ____wrong_branch_error(); \ | 395 | branch = ____wrong_branch_error(); \ |
396 | branch; \ | 396 | likely(branch); \ |
397 | }) | 397 | }) |
398 | 398 | ||
399 | #define static_branch_unlikely(x) \ | 399 | #define static_branch_unlikely(x) \ |
@@ -405,7 +405,7 @@ extern bool ____wrong_branch_error(void); | |||
405 | branch = arch_static_branch(&(x)->key, false); \ | 405 | branch = arch_static_branch(&(x)->key, false); \ |
406 | else \ | 406 | else \ |
407 | branch = ____wrong_branch_error(); \ | 407 | branch = ____wrong_branch_error(); \ |
408 | branch; \ | 408 | unlikely(branch); \ |
409 | }) | 409 | }) |
410 | 410 | ||
411 | #else /* !HAVE_JUMP_LABEL */ | 411 | #else /* !HAVE_JUMP_LABEL */ |
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h index bd118a6c60cb..657a83b943f0 100644 --- a/include/linux/kallsyms.h +++ b/include/linux/kallsyms.h | |||
@@ -9,6 +9,10 @@ | |||
9 | #include <linux/errno.h> | 9 | #include <linux/errno.h> |
10 | #include <linux/kernel.h> | 10 | #include <linux/kernel.h> |
11 | #include <linux/stddef.h> | 11 | #include <linux/stddef.h> |
12 | #include <linux/mm.h> | ||
13 | #include <linux/module.h> | ||
14 | |||
15 | #include <asm/sections.h> | ||
12 | 16 | ||
13 | #define KSYM_NAME_LEN 128 | 17 | #define KSYM_NAME_LEN 128 |
14 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ | 18 | #define KSYM_SYMBOL_LEN (sizeof("%s+%#lx/%#lx [%s]") + (KSYM_NAME_LEN - 1) + \ |
@@ -16,6 +20,56 @@ | |||
16 | 20 | ||
17 | struct module; | 21 | struct module; |
18 | 22 | ||
23 | static inline int is_kernel_inittext(unsigned long addr) | ||
24 | { | ||
25 | if (addr >= (unsigned long)_sinittext | ||
26 | && addr <= (unsigned long)_einittext) | ||
27 | return 1; | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static inline int is_kernel_text(unsigned long addr) | ||
32 | { | ||
33 | if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || | ||
34 | arch_is_kernel_text(addr)) | ||
35 | return 1; | ||
36 | return in_gate_area_no_mm(addr); | ||
37 | } | ||
38 | |||
39 | static inline int is_kernel(unsigned long addr) | ||
40 | { | ||
41 | if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) | ||
42 | return 1; | ||
43 | return in_gate_area_no_mm(addr); | ||
44 | } | ||
45 | |||
46 | static inline int is_ksym_addr(unsigned long addr) | ||
47 | { | ||
48 | if (IS_ENABLED(CONFIG_KALLSYMS_ALL)) | ||
49 | return is_kernel(addr); | ||
50 | |||
51 | return is_kernel_text(addr) || is_kernel_inittext(addr); | ||
52 | } | ||
53 | |||
54 | static inline void *dereference_symbol_descriptor(void *ptr) | ||
55 | { | ||
56 | #ifdef HAVE_DEREFERENCE_FUNCTION_DESCRIPTOR | ||
57 | struct module *mod; | ||
58 | |||
59 | ptr = dereference_kernel_function_descriptor(ptr); | ||
60 | if (is_ksym_addr((unsigned long)ptr)) | ||
61 | return ptr; | ||
62 | |||
63 | preempt_disable(); | ||
64 | mod = __module_address((unsigned long)ptr); | ||
65 | preempt_enable(); | ||
66 | |||
67 | if (mod) | ||
68 | ptr = dereference_module_function_descriptor(mod, ptr); | ||
69 | #endif | ||
70 | return ptr; | ||
71 | } | ||
72 | |||
19 | #ifdef CONFIG_KALLSYMS | 73 | #ifdef CONFIG_KALLSYMS |
20 | /* Lookup the address for a symbol. Returns 0 if not found. */ | 74 | /* Lookup the address for a symbol. Returns 0 if not found. */ |
21 | unsigned long kallsyms_lookup_name(const char *name); | 75 | unsigned long kallsyms_lookup_name(const char *name); |
@@ -40,9 +94,6 @@ extern int sprint_symbol(char *buffer, unsigned long address); | |||
40 | extern int sprint_symbol_no_offset(char *buffer, unsigned long address); | 94 | extern int sprint_symbol_no_offset(char *buffer, unsigned long address); |
41 | extern int sprint_backtrace(char *buffer, unsigned long address); | 95 | extern int sprint_backtrace(char *buffer, unsigned long address); |
42 | 96 | ||
43 | /* Look up a kernel symbol and print it to the kernel messages. */ | ||
44 | extern void __print_symbol(const char *fmt, unsigned long address); | ||
45 | |||
46 | int lookup_symbol_name(unsigned long addr, char *symname); | 97 | int lookup_symbol_name(unsigned long addr, char *symname); |
47 | int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); | 98 | int lookup_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name); |
48 | 99 | ||
@@ -112,26 +163,11 @@ static inline int kallsyms_show_value(void) | |||
112 | return false; | 163 | return false; |
113 | } | 164 | } |
114 | 165 | ||
115 | /* Stupid that this does nothing, but I didn't create this mess. */ | ||
116 | #define __print_symbol(fmt, addr) | ||
117 | #endif /*CONFIG_KALLSYMS*/ | 166 | #endif /*CONFIG_KALLSYMS*/ |
118 | 167 | ||
119 | /* This macro allows us to keep printk typechecking */ | ||
120 | static __printf(1, 2) | ||
121 | void __check_printsym_format(const char *fmt, ...) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | static inline void print_symbol(const char *fmt, unsigned long addr) | ||
126 | { | ||
127 | __check_printsym_format(fmt, ""); | ||
128 | __print_symbol(fmt, (unsigned long) | ||
129 | __builtin_extract_return_addr((void *)addr)); | ||
130 | } | ||
131 | |||
132 | static inline void print_ip_sym(unsigned long ip) | 168 | static inline void print_ip_sym(unsigned long ip) |
133 | { | 169 | { |
134 | printk("[<%p>] %pS\n", (void *) ip, (void *) ip); | 170 | printk("[<%px>] %pS\n", (void *) ip, (void *) ip); |
135 | } | 171 | } |
136 | 172 | ||
137 | #endif /*_LINUX_KALLSYMS_H*/ | 173 | #endif /*_LINUX_KALLSYMS_H*/ |
diff --git a/include/linux/kasan.h b/include/linux/kasan.h index e3eb834c9a35..adc13474a53b 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h | |||
@@ -11,8 +11,6 @@ struct task_struct; | |||
11 | 11 | ||
12 | #ifdef CONFIG_KASAN | 12 | #ifdef CONFIG_KASAN |
13 | 13 | ||
14 | #define KASAN_SHADOW_SCALE_SHIFT 3 | ||
15 | |||
16 | #include <asm/kasan.h> | 14 | #include <asm/kasan.h> |
17 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
18 | 16 | ||
@@ -56,14 +54,14 @@ void kasan_poison_object_data(struct kmem_cache *cache, void *object); | |||
56 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); | 54 | void kasan_init_slab_obj(struct kmem_cache *cache, const void *object); |
57 | 55 | ||
58 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); | 56 | void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags); |
59 | void kasan_kfree_large(const void *ptr); | 57 | void kasan_kfree_large(void *ptr, unsigned long ip); |
60 | void kasan_poison_kfree(void *ptr); | 58 | void kasan_poison_kfree(void *ptr, unsigned long ip); |
61 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, | 59 | void kasan_kmalloc(struct kmem_cache *s, const void *object, size_t size, |
62 | gfp_t flags); | 60 | gfp_t flags); |
63 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); | 61 | void kasan_krealloc(const void *object, size_t new_size, gfp_t flags); |
64 | 62 | ||
65 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); | 63 | void kasan_slab_alloc(struct kmem_cache *s, void *object, gfp_t flags); |
66 | bool kasan_slab_free(struct kmem_cache *s, void *object); | 64 | bool kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip); |
67 | 65 | ||
68 | struct kasan_cache { | 66 | struct kasan_cache { |
69 | int alloc_meta_offset; | 67 | int alloc_meta_offset; |
@@ -108,8 +106,8 @@ static inline void kasan_init_slab_obj(struct kmem_cache *cache, | |||
108 | const void *object) {} | 106 | const void *object) {} |
109 | 107 | ||
110 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} | 108 | static inline void kasan_kmalloc_large(void *ptr, size_t size, gfp_t flags) {} |
111 | static inline void kasan_kfree_large(const void *ptr) {} | 109 | static inline void kasan_kfree_large(void *ptr, unsigned long ip) {} |
112 | static inline void kasan_poison_kfree(void *ptr) {} | 110 | static inline void kasan_poison_kfree(void *ptr, unsigned long ip) {} |
113 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, | 111 | static inline void kasan_kmalloc(struct kmem_cache *s, const void *object, |
114 | size_t size, gfp_t flags) {} | 112 | size_t size, gfp_t flags) {} |
115 | static inline void kasan_krealloc(const void *object, size_t new_size, | 113 | static inline void kasan_krealloc(const void *object, size_t new_size, |
@@ -117,7 +115,8 @@ static inline void kasan_krealloc(const void *object, size_t new_size, | |||
117 | 115 | ||
118 | static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, | 116 | static inline void kasan_slab_alloc(struct kmem_cache *s, void *object, |
119 | gfp_t flags) {} | 117 | gfp_t flags) {} |
120 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object) | 118 | static inline bool kasan_slab_free(struct kmem_cache *s, void *object, |
119 | unsigned long ip) | ||
121 | { | 120 | { |
122 | return false; | 121 | return false; |
123 | } | 122 | } |
diff --git a/include/linux/kfifo.h b/include/linux/kfifo.h index 7b45959ebd92..e251533a5939 100644 --- a/include/linux/kfifo.h +++ b/include/linux/kfifo.h | |||
@@ -113,7 +113,8 @@ struct kfifo_rec_ptr_2 __STRUCT_KFIFO_PTR(unsigned char, 2, void); | |||
113 | * array is a part of the structure and the fifo type where the array is | 113 | * array is a part of the structure and the fifo type where the array is |
114 | * outside of the fifo structure. | 114 | * outside of the fifo structure. |
115 | */ | 115 | */ |
116 | #define __is_kfifo_ptr(fifo) (sizeof(*fifo) == sizeof(struct __kfifo)) | 116 | #define __is_kfifo_ptr(fifo) \ |
117 | (sizeof(*fifo) == sizeof(STRUCT_KFIFO_PTR(typeof(*(fifo)->type)))) | ||
117 | 118 | ||
118 | /** | 119 | /** |
119 | * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object | 120 | * DECLARE_KFIFO_PTR - macro to declare a fifo pointer object |
diff --git a/include/linux/kobject.h b/include/linux/kobject.h index e0a6205caa71..7f6f93c3df9c 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* | 2 | /* |
2 | * kobject.h - generic kernel object infrastructure. | 3 | * kobject.h - generic kernel object infrastructure. |
3 | * | 4 | * |
@@ -6,8 +7,6 @@ | |||
6 | * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> | 7 | * Copyright (c) 2006-2008 Greg Kroah-Hartman <greg@kroah.com> |
7 | * Copyright (c) 2006-2008 Novell Inc. | 8 | * Copyright (c) 2006-2008 Novell Inc. |
8 | * | 9 | * |
9 | * This file is released under the GPLv2. | ||
10 | * | ||
11 | * Please read Documentation/kobject.txt before using the kobject | 10 | * Please read Documentation/kobject.txt before using the kobject |
12 | * interface, ESPECIALLY the parts about reference counts and object | 11 | * interface, ESPECIALLY the parts about reference counts and object |
13 | * destructors. | 12 | * destructors. |
diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h index df32d2508290..069aa2ebef90 100644 --- a/include/linux/kobject_ns.h +++ b/include/linux/kobject_ns.h | |||
@@ -1,3 +1,4 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
1 | /* Kernel object name space definitions | 2 | /* Kernel object name space definitions |
2 | * | 3 | * |
3 | * Copyright (c) 2002-2003 Patrick Mochel | 4 | * Copyright (c) 2002-2003 Patrick Mochel |
@@ -7,8 +8,6 @@ | |||
7 | * | 8 | * |
8 | * Split from kobject.h by David Howells (dhowells@redhat.com) | 9 | * Split from kobject.h by David Howells (dhowells@redhat.com) |
9 | * | 10 | * |
10 | * This file is released under the GPLv2. | ||
11 | * | ||
12 | * Please read Documentation/kobject.txt before using the kobject | 11 | * Please read Documentation/kobject.txt before using the kobject |
13 | * interface, ESPECIALLY the parts about reference counts and object | 12 | * interface, ESPECIALLY the parts about reference counts and object |
14 | * destructors. | 13 | * destructors. |
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h index e97966d1fb8d..700efaa9e115 100644 --- a/include/linux/led-class-flash.h +++ b/include/linux/led-class-flash.h | |||
@@ -121,6 +121,8 @@ extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev); | |||
121 | static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, | 121 | static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, |
122 | bool state) | 122 | bool state) |
123 | { | 123 | { |
124 | if (!fled_cdev) | ||
125 | return -EINVAL; | ||
124 | return fled_cdev->ops->strobe_set(fled_cdev, state); | 126 | return fled_cdev->ops->strobe_set(fled_cdev, state); |
125 | } | 127 | } |
126 | 128 | ||
@@ -136,6 +138,8 @@ static inline int led_set_flash_strobe(struct led_classdev_flash *fled_cdev, | |||
136 | static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, | 138 | static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev, |
137 | bool *state) | 139 | bool *state) |
138 | { | 140 | { |
141 | if (!fled_cdev) | ||
142 | return -EINVAL; | ||
139 | if (fled_cdev->ops->strobe_get) | 143 | if (fled_cdev->ops->strobe_get) |
140 | return fled_cdev->ops->strobe_get(fled_cdev, state); | 144 | return fled_cdev->ops->strobe_get(fled_cdev, state); |
141 | 145 | ||
diff --git a/include/linux/libfdt.h b/include/linux/libfdt.h index 27ba06e5d117..90ed4ebfa692 100644 --- a/include/linux/libfdt.h +++ b/include/linux/libfdt.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #define _INCLUDE_LIBFDT_H_ | 3 | #define _INCLUDE_LIBFDT_H_ |
4 | 4 | ||
5 | #include <linux/libfdt_env.h> | 5 | #include <linux/libfdt_env.h> |
6 | #include "../../scripts/dtc/libfdt/fdt.h" | ||
7 | #include "../../scripts/dtc/libfdt/libfdt.h" | 6 | #include "../../scripts/dtc/libfdt/libfdt.h" |
8 | 7 | ||
9 | #endif /* _INCLUDE_LIBFDT_H_ */ | 8 | #endif /* _INCLUDE_LIBFDT_H_ */ |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index f8109ddb5ef1..ff855ed965fb 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
@@ -47,6 +47,17 @@ enum { | |||
47 | 47 | ||
48 | /* region flag indicating to direct-map persistent memory by default */ | 48 | /* region flag indicating to direct-map persistent memory by default */ |
49 | ND_REGION_PAGEMAP = 0, | 49 | ND_REGION_PAGEMAP = 0, |
50 | /* | ||
51 | * Platform ensures entire CPU store data path is flushed to pmem on | ||
52 | * system power loss. | ||
53 | */ | ||
54 | ND_REGION_PERSIST_CACHE = 1, | ||
55 | /* | ||
56 | * Platform provides mechanisms to automatically flush outstanding | ||
57 | * write data from memory controler to pmem on system power loss. | ||
58 | * (ADR) | ||
59 | */ | ||
60 | ND_REGION_PERSIST_MEMCTRL = 2, | ||
50 | 61 | ||
51 | /* mark newly adjusted resources as requiring a label update */ | 62 | /* mark newly adjusted resources as requiring a label update */ |
52 | DPA_RESOURCE_ADJUSTED = 1 << 0, | 63 | DPA_RESOURCE_ADJUSTED = 1 << 0, |
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h index 2d1d9de06728..7f4b60abdf27 100644 --- a/include/linux/lightnvm.h +++ b/include/linux/lightnvm.h | |||
@@ -50,10 +50,7 @@ struct nvm_id; | |||
50 | struct nvm_dev; | 50 | struct nvm_dev; |
51 | struct nvm_tgt_dev; | 51 | struct nvm_tgt_dev; |
52 | 52 | ||
53 | typedef int (nvm_l2p_update_fn)(u64, u32, __le64 *, void *); | ||
54 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); | 53 | typedef int (nvm_id_fn)(struct nvm_dev *, struct nvm_id *); |
55 | typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32, | ||
56 | nvm_l2p_update_fn *, void *); | ||
57 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); | 54 | typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *); |
58 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); | 55 | typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int); |
59 | typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); | 56 | typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *); |
@@ -66,7 +63,6 @@ typedef void (nvm_dev_dma_free_fn)(void *, void*, dma_addr_t); | |||
66 | 63 | ||
67 | struct nvm_dev_ops { | 64 | struct nvm_dev_ops { |
68 | nvm_id_fn *identity; | 65 | nvm_id_fn *identity; |
69 | nvm_get_l2p_tbl_fn *get_l2p_tbl; | ||
70 | nvm_op_bb_tbl_fn *get_bb_tbl; | 66 | nvm_op_bb_tbl_fn *get_bb_tbl; |
71 | nvm_op_set_bb_fn *set_bb_tbl; | 67 | nvm_op_set_bb_fn *set_bb_tbl; |
72 | 68 | ||
@@ -112,8 +108,6 @@ enum { | |||
112 | NVM_RSP_WARN_HIGHECC = 0x4700, | 108 | NVM_RSP_WARN_HIGHECC = 0x4700, |
113 | 109 | ||
114 | /* Device opcodes */ | 110 | /* Device opcodes */ |
115 | NVM_OP_HBREAD = 0x02, | ||
116 | NVM_OP_HBWRITE = 0x81, | ||
117 | NVM_OP_PWRITE = 0x91, | 111 | NVM_OP_PWRITE = 0x91, |
118 | NVM_OP_PREAD = 0x92, | 112 | NVM_OP_PREAD = 0x92, |
119 | NVM_OP_ERASE = 0x90, | 113 | NVM_OP_ERASE = 0x90, |
@@ -165,12 +159,16 @@ struct nvm_id_group { | |||
165 | u8 fmtype; | 159 | u8 fmtype; |
166 | u8 num_ch; | 160 | u8 num_ch; |
167 | u8 num_lun; | 161 | u8 num_lun; |
168 | u8 num_pln; | 162 | u16 num_chk; |
169 | u16 num_blk; | 163 | u16 clba; |
170 | u16 num_pg; | ||
171 | u16 fpg_sz; | ||
172 | u16 csecs; | 164 | u16 csecs; |
173 | u16 sos; | 165 | u16 sos; |
166 | |||
167 | u16 ws_min; | ||
168 | u16 ws_opt; | ||
169 | u16 ws_seq; | ||
170 | u16 ws_per_chk; | ||
171 | |||
174 | u32 trdt; | 172 | u32 trdt; |
175 | u32 trdm; | 173 | u32 trdm; |
176 | u32 tprt; | 174 | u32 tprt; |
@@ -181,7 +179,10 @@ struct nvm_id_group { | |||
181 | u32 mccap; | 179 | u32 mccap; |
182 | u16 cpar; | 180 | u16 cpar; |
183 | 181 | ||
184 | struct nvm_id_lp_tbl lptbl; | 182 | /* 1.2 compatibility */ |
183 | u8 num_pln; | ||
184 | u16 num_pg; | ||
185 | u16 fpg_sz; | ||
185 | }; | 186 | }; |
186 | 187 | ||
187 | struct nvm_addr_format { | 188 | struct nvm_addr_format { |
@@ -217,6 +218,10 @@ struct nvm_target { | |||
217 | 218 | ||
218 | #define ADDR_EMPTY (~0ULL) | 219 | #define ADDR_EMPTY (~0ULL) |
219 | 220 | ||
221 | #define NVM_TARGET_DEFAULT_OP (101) | ||
222 | #define NVM_TARGET_MIN_OP (3) | ||
223 | #define NVM_TARGET_MAX_OP (80) | ||
224 | |||
220 | #define NVM_VERSION_MAJOR 1 | 225 | #define NVM_VERSION_MAJOR 1 |
221 | #define NVM_VERSION_MINOR 0 | 226 | #define NVM_VERSION_MINOR 0 |
222 | #define NVM_VERSION_PATCH 0 | 227 | #define NVM_VERSION_PATCH 0 |
@@ -239,7 +244,6 @@ struct nvm_rq { | |||
239 | void *meta_list; | 244 | void *meta_list; |
240 | dma_addr_t dma_meta_list; | 245 | dma_addr_t dma_meta_list; |
241 | 246 | ||
242 | struct completion *wait; | ||
243 | nvm_end_io_fn *end_io; | 247 | nvm_end_io_fn *end_io; |
244 | 248 | ||
245 | uint8_t opcode; | 249 | uint8_t opcode; |
@@ -268,31 +272,38 @@ enum { | |||
268 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ | 272 | NVM_BLK_ST_BAD = 0x8, /* Bad block */ |
269 | }; | 273 | }; |
270 | 274 | ||
275 | |||
271 | /* Device generic information */ | 276 | /* Device generic information */ |
272 | struct nvm_geo { | 277 | struct nvm_geo { |
278 | /* generic geometry */ | ||
273 | int nr_chnls; | 279 | int nr_chnls; |
274 | int nr_luns; | 280 | int all_luns; /* across channels */ |
275 | int luns_per_chnl; /* -1 if channels are not symmetric */ | 281 | int nr_luns; /* per channel */ |
276 | int nr_planes; | 282 | int nr_chks; /* per lun */ |
277 | int sec_per_pg; /* only sectors for a single page */ | 283 | |
278 | int pgs_per_blk; | ||
279 | int blks_per_lun; | ||
280 | int fpg_size; | ||
281 | int pfpg_size; /* size of buffer if all pages are to be read */ | ||
282 | int sec_size; | 284 | int sec_size; |
283 | int oob_size; | 285 | int oob_size; |
284 | int mccap; | 286 | int mccap; |
285 | struct nvm_addr_format ppaf; | ||
286 | 287 | ||
287 | /* Calculated/Cached values. These do not reflect the actual usable | 288 | int sec_per_chk; |
288 | * blocks at run-time. | 289 | int sec_per_lun; |
289 | */ | 290 | |
291 | int ws_min; | ||
292 | int ws_opt; | ||
293 | int ws_seq; | ||
294 | int ws_per_chk; | ||
295 | |||
290 | int max_rq_size; | 296 | int max_rq_size; |
291 | int plane_mode; /* drive device in single, double or quad mode */ | ||
292 | 297 | ||
298 | int op; | ||
299 | |||
300 | struct nvm_addr_format ppaf; | ||
301 | |||
302 | /* Legacy 1.2 specific geometry */ | ||
303 | int plane_mode; /* drive device in single, double or quad mode */ | ||
304 | int nr_planes; | ||
305 | int sec_per_pg; /* only sectors for a single page */ | ||
293 | int sec_per_pl; /* all sectors across planes */ | 306 | int sec_per_pl; /* all sectors across planes */ |
294 | int sec_per_blk; | ||
295 | int sec_per_lun; | ||
296 | }; | 307 | }; |
297 | 308 | ||
298 | /* sub-device structure */ | 309 | /* sub-device structure */ |
@@ -320,10 +331,6 @@ struct nvm_dev { | |||
320 | /* Device information */ | 331 | /* Device information */ |
321 | struct nvm_geo geo; | 332 | struct nvm_geo geo; |
322 | 333 | ||
323 | /* lower page table */ | ||
324 | int lps_per_blk; | ||
325 | int *lptbl; | ||
326 | |||
327 | unsigned long total_secs; | 334 | unsigned long total_secs; |
328 | 335 | ||
329 | unsigned long *lun_map; | 336 | unsigned long *lun_map; |
@@ -346,36 +353,6 @@ struct nvm_dev { | |||
346 | struct list_head targets; | 353 | struct list_head targets; |
347 | }; | 354 | }; |
348 | 355 | ||
349 | static inline struct ppa_addr linear_to_generic_addr(struct nvm_geo *geo, | ||
350 | u64 pba) | ||
351 | { | ||
352 | struct ppa_addr l; | ||
353 | int secs, pgs, blks, luns; | ||
354 | sector_t ppa = pba; | ||
355 | |||
356 | l.ppa = 0; | ||
357 | |||
358 | div_u64_rem(ppa, geo->sec_per_pg, &secs); | ||
359 | l.g.sec = secs; | ||
360 | |||
361 | sector_div(ppa, geo->sec_per_pg); | ||
362 | div_u64_rem(ppa, geo->pgs_per_blk, &pgs); | ||
363 | l.g.pg = pgs; | ||
364 | |||
365 | sector_div(ppa, geo->pgs_per_blk); | ||
366 | div_u64_rem(ppa, geo->blks_per_lun, &blks); | ||
367 | l.g.blk = blks; | ||
368 | |||
369 | sector_div(ppa, geo->blks_per_lun); | ||
370 | div_u64_rem(ppa, geo->luns_per_chnl, &luns); | ||
371 | l.g.lun = luns; | ||
372 | |||
373 | sector_div(ppa, geo->luns_per_chnl); | ||
374 | l.g.ch = ppa; | ||
375 | |||
376 | return l; | ||
377 | } | ||
378 | |||
379 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, | 356 | static inline struct ppa_addr generic_to_dev_addr(struct nvm_tgt_dev *tgt_dev, |
380 | struct ppa_addr r) | 357 | struct ppa_addr r) |
381 | { | 358 | { |
@@ -418,25 +395,6 @@ static inline struct ppa_addr dev_to_generic_addr(struct nvm_tgt_dev *tgt_dev, | |||
418 | return l; | 395 | return l; |
419 | } | 396 | } |
420 | 397 | ||
421 | static inline int ppa_empty(struct ppa_addr ppa_addr) | ||
422 | { | ||
423 | return (ppa_addr.ppa == ADDR_EMPTY); | ||
424 | } | ||
425 | |||
426 | static inline void ppa_set_empty(struct ppa_addr *ppa_addr) | ||
427 | { | ||
428 | ppa_addr->ppa = ADDR_EMPTY; | ||
429 | } | ||
430 | |||
431 | static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2) | ||
432 | { | ||
433 | if (ppa_empty(ppa1) || ppa_empty(ppa2)) | ||
434 | return 0; | ||
435 | |||
436 | return ((ppa1.g.ch == ppa2.g.ch) && (ppa1.g.lun == ppa2.g.lun) && | ||
437 | (ppa1.g.blk == ppa2.g.blk)); | ||
438 | } | ||
439 | |||
440 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); | 398 | typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *); |
441 | typedef sector_t (nvm_tgt_capacity_fn)(void *); | 399 | typedef sector_t (nvm_tgt_capacity_fn)(void *); |
442 | typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, | 400 | typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *, |
@@ -481,17 +439,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *, | |||
481 | extern int nvm_max_phys_sects(struct nvm_tgt_dev *); | 439 | extern int nvm_max_phys_sects(struct nvm_tgt_dev *); |
482 | extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); | 440 | extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *); |
483 | extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); | 441 | extern int nvm_submit_io_sync(struct nvm_tgt_dev *, struct nvm_rq *); |
484 | extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int); | ||
485 | extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *, | ||
486 | void *); | ||
487 | extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t); | ||
488 | extern void nvm_put_area(struct nvm_tgt_dev *, sector_t); | ||
489 | extern void nvm_end_io(struct nvm_rq *); | 442 | extern void nvm_end_io(struct nvm_rq *); |
490 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); | 443 | extern int nvm_bb_tbl_fold(struct nvm_dev *, u8 *, int); |
491 | extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); | 444 | extern int nvm_get_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr, u8 *); |
492 | 445 | ||
493 | extern void nvm_part_to_tgt(struct nvm_dev *, sector_t *, int); | ||
494 | |||
495 | #else /* CONFIG_NVM */ | 446 | #else /* CONFIG_NVM */ |
496 | struct nvm_dev_ops; | 447 | struct nvm_dev_ops; |
497 | 448 | ||
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index fc5c1be3f6f4..4754f01c1abb 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
@@ -40,7 +40,6 @@ | |||
40 | * @new_func: pointer to the patched function code | 40 | * @new_func: pointer to the patched function code |
41 | * @old_sympos: a hint indicating which symbol position the old function | 41 | * @old_sympos: a hint indicating which symbol position the old function |
42 | * can be found (optional) | 42 | * can be found (optional) |
43 | * @immediate: patch the func immediately, bypassing safety mechanisms | ||
44 | * @old_addr: the address of the function being patched | 43 | * @old_addr: the address of the function being patched |
45 | * @kobj: kobject for sysfs resources | 44 | * @kobj: kobject for sysfs resources |
46 | * @stack_node: list node for klp_ops func_stack list | 45 | * @stack_node: list node for klp_ops func_stack list |
@@ -76,7 +75,6 @@ struct klp_func { | |||
76 | * in kallsyms for the given object is used. | 75 | * in kallsyms for the given object is used. |
77 | */ | 76 | */ |
78 | unsigned long old_sympos; | 77 | unsigned long old_sympos; |
79 | bool immediate; | ||
80 | 78 | ||
81 | /* internal */ | 79 | /* internal */ |
82 | unsigned long old_addr; | 80 | unsigned long old_addr; |
@@ -137,7 +135,6 @@ struct klp_object { | |||
137 | * struct klp_patch - patch structure for live patching | 135 | * struct klp_patch - patch structure for live patching |
138 | * @mod: reference to the live patch module | 136 | * @mod: reference to the live patch module |
139 | * @objs: object entries for kernel objects to be patched | 137 | * @objs: object entries for kernel objects to be patched |
140 | * @immediate: patch all funcs immediately, bypassing safety mechanisms | ||
141 | * @list: list node for global list of registered patches | 138 | * @list: list node for global list of registered patches |
142 | * @kobj: kobject for sysfs resources | 139 | * @kobj: kobject for sysfs resources |
143 | * @enabled: the patch is enabled (but operation may be incomplete) | 140 | * @enabled: the patch is enabled (but operation may be incomplete) |
@@ -147,7 +144,6 @@ struct klp_patch { | |||
147 | /* external */ | 144 | /* external */ |
148 | struct module *mod; | 145 | struct module *mod; |
149 | struct klp_object *objs; | 146 | struct klp_object *objs; |
150 | bool immediate; | ||
151 | 147 | ||
152 | /* internal */ | 148 | /* internal */ |
153 | struct list_head list; | 149 | struct list_head list; |
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h index d7d313fb9cd4..4fd95dbeb52f 100644 --- a/include/linux/lockd/lockd.h +++ b/include/linux/lockd/lockd.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <net/ipv6.h> | 17 | #include <net/ipv6.h> |
18 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
19 | #include <linux/kref.h> | 19 | #include <linux/kref.h> |
20 | #include <linux/refcount.h> | ||
20 | #include <linux/utsname.h> | 21 | #include <linux/utsname.h> |
21 | #include <linux/lockd/bind.h> | 22 | #include <linux/lockd/bind.h> |
22 | #include <linux/lockd/xdr.h> | 23 | #include <linux/lockd/xdr.h> |
@@ -58,7 +59,7 @@ struct nlm_host { | |||
58 | u32 h_state; /* pseudo-state counter */ | 59 | u32 h_state; /* pseudo-state counter */ |
59 | u32 h_nsmstate; /* true remote NSM state */ | 60 | u32 h_nsmstate; /* true remote NSM state */ |
60 | u32 h_pidcount; /* Pseudopids */ | 61 | u32 h_pidcount; /* Pseudopids */ |
61 | atomic_t h_count; /* reference count */ | 62 | refcount_t h_count; /* reference count */ |
62 | struct mutex h_mutex; /* mutex for pmap binding */ | 63 | struct mutex h_mutex; /* mutex for pmap binding */ |
63 | unsigned long h_nextrebind; /* next portmap call */ | 64 | unsigned long h_nextrebind; /* next portmap call */ |
64 | unsigned long h_expires; /* eligible for GC */ | 65 | unsigned long h_expires; /* eligible for GC */ |
@@ -83,7 +84,7 @@ struct nlm_host { | |||
83 | 84 | ||
84 | struct nsm_handle { | 85 | struct nsm_handle { |
85 | struct list_head sm_link; | 86 | struct list_head sm_link; |
86 | atomic_t sm_count; | 87 | refcount_t sm_count; |
87 | char *sm_mon_name; | 88 | char *sm_mon_name; |
88 | char *sm_name; | 89 | char *sm_name; |
89 | struct sockaddr_storage sm_addr; | 90 | struct sockaddr_storage sm_addr; |
@@ -122,7 +123,7 @@ static inline struct sockaddr *nlm_srcaddr(const struct nlm_host *host) | |||
122 | */ | 123 | */ |
123 | struct nlm_lockowner { | 124 | struct nlm_lockowner { |
124 | struct list_head list; | 125 | struct list_head list; |
125 | atomic_t count; | 126 | refcount_t count; |
126 | 127 | ||
127 | struct nlm_host *host; | 128 | struct nlm_host *host; |
128 | fl_owner_t owner; | 129 | fl_owner_t owner; |
@@ -136,7 +137,7 @@ struct nlm_wait; | |||
136 | */ | 137 | */ |
137 | #define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) | 138 | #define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u) |
138 | struct nlm_rqst { | 139 | struct nlm_rqst { |
139 | atomic_t a_count; | 140 | refcount_t a_count; |
140 | unsigned int a_flags; /* initial RPC task flags */ | 141 | unsigned int a_flags; /* initial RPC task flags */ |
141 | struct nlm_host * a_host; /* host handle */ | 142 | struct nlm_host * a_host; /* host handle */ |
142 | struct nlm_args a_args; /* arguments */ | 143 | struct nlm_args a_args; /* arguments */ |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 795634ee5aa5..6fc77d4dbdcd 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -337,9 +337,9 @@ extern void lock_release(struct lockdep_map *lock, int nested, | |||
337 | /* | 337 | /* |
338 | * Same "read" as for lock_acquire(), except -1 means any. | 338 | * Same "read" as for lock_acquire(), except -1 means any. |
339 | */ | 339 | */ |
340 | extern int lock_is_held_type(struct lockdep_map *lock, int read); | 340 | extern int lock_is_held_type(const struct lockdep_map *lock, int read); |
341 | 341 | ||
342 | static inline int lock_is_held(struct lockdep_map *lock) | 342 | static inline int lock_is_held(const struct lockdep_map *lock) |
343 | { | 343 | { |
344 | return lock_is_held_type(lock, -1); | 344 | return lock_is_held_type(lock, -1); |
345 | } | 345 | } |
diff --git a/include/linux/lockref.h b/include/linux/lockref.h index ef3c9342e119..2eac32095113 100644 --- a/include/linux/lockref.h +++ b/include/linux/lockref.h | |||
@@ -44,7 +44,7 @@ extern void lockref_mark_dead(struct lockref *); | |||
44 | extern int lockref_get_not_dead(struct lockref *); | 44 | extern int lockref_get_not_dead(struct lockref *); |
45 | 45 | ||
46 | /* Must be called under spinlock for reliable results */ | 46 | /* Must be called under spinlock for reliable results */ |
47 | static inline int __lockref_is_dead(const struct lockref *l) | 47 | static inline bool __lockref_is_dead(const struct lockref *l) |
48 | { | 48 | { |
49 | return ((int)l->count < 0); | 49 | return ((int)l->count < 0); |
50 | } | 50 | } |
diff --git a/include/linux/mdio.h b/include/linux/mdio.h index ca08ab16ecdc..2cfffe586885 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <uapi/linux/mdio.h> | 12 | #include <uapi/linux/mdio.h> |
13 | #include <linux/mod_devicetable.h> | 13 | #include <linux/mod_devicetable.h> |
14 | 14 | ||
15 | struct gpio_desc; | ||
15 | struct mii_bus; | 16 | struct mii_bus; |
16 | 17 | ||
17 | /* Multiple levels of nesting are possible. However typically this is | 18 | /* Multiple levels of nesting are possible. However typically this is |
@@ -39,6 +40,9 @@ struct mdio_device { | |||
39 | /* Bus address of the MDIO device (0-31) */ | 40 | /* Bus address of the MDIO device (0-31) */ |
40 | int addr; | 41 | int addr; |
41 | int flags; | 42 | int flags; |
43 | struct gpio_desc *reset; | ||
44 | unsigned int reset_assert_delay; | ||
45 | unsigned int reset_deassert_delay; | ||
42 | }; | 46 | }; |
43 | #define to_mdio_device(d) container_of(d, struct mdio_device, dev) | 47 | #define to_mdio_device(d) container_of(d, struct mdio_device, dev) |
44 | 48 | ||
@@ -71,6 +75,7 @@ void mdio_device_free(struct mdio_device *mdiodev); | |||
71 | struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); | 75 | struct mdio_device *mdio_device_create(struct mii_bus *bus, int addr); |
72 | int mdio_device_register(struct mdio_device *mdiodev); | 76 | int mdio_device_register(struct mdio_device *mdiodev); |
73 | void mdio_device_remove(struct mdio_device *mdiodev); | 77 | void mdio_device_remove(struct mdio_device *mdiodev); |
78 | void mdio_device_reset(struct mdio_device *mdiodev, int value); | ||
74 | int mdio_driver_register(struct mdio_driver *drv); | 79 | int mdio_driver_register(struct mdio_driver *drv); |
75 | void mdio_driver_unregister(struct mdio_driver *drv); | 80 | void mdio_driver_unregister(struct mdio_driver *drv); |
76 | int mdio_device_bus_match(struct device *dev, struct device_driver *drv); | 81 | int mdio_device_bus_match(struct device *dev, struct device_driver *drv); |
@@ -257,6 +262,9 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) | |||
257 | return reg; | 262 | return reg; |
258 | } | 263 | } |
259 | 264 | ||
265 | int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); | ||
266 | int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); | ||
267 | |||
260 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); | 268 | int mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); |
261 | int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); | 269 | int mdiobus_read_nested(struct mii_bus *bus, int addr, u32 regnum); |
262 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); | 270 | int mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 7ed0f7782d16..8be5077efb5f 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
@@ -332,8 +332,8 @@ void memblock_enforce_memory_limit(phys_addr_t memory_limit); | |||
332 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); | 332 | void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size); |
333 | void memblock_mem_limit_remove_map(phys_addr_t limit); | 333 | void memblock_mem_limit_remove_map(phys_addr_t limit); |
334 | bool memblock_is_memory(phys_addr_t addr); | 334 | bool memblock_is_memory(phys_addr_t addr); |
335 | int memblock_is_map_memory(phys_addr_t addr); | 335 | bool memblock_is_map_memory(phys_addr_t addr); |
336 | int memblock_is_region_memory(phys_addr_t base, phys_addr_t size); | 336 | bool memblock_is_region_memory(phys_addr_t base, phys_addr_t size); |
337 | bool memblock_is_reserved(phys_addr_t addr); | 337 | bool memblock_is_reserved(phys_addr_t addr); |
338 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); | 338 | bool memblock_is_region_reserved(phys_addr_t base, phys_addr_t size); |
339 | 339 | ||
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 69966c461d1c..882046863581 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -108,7 +108,10 @@ struct lruvec_stat { | |||
108 | */ | 108 | */ |
109 | struct mem_cgroup_per_node { | 109 | struct mem_cgroup_per_node { |
110 | struct lruvec lruvec; | 110 | struct lruvec lruvec; |
111 | struct lruvec_stat __percpu *lruvec_stat; | 111 | |
112 | struct lruvec_stat __percpu *lruvec_stat_cpu; | ||
113 | atomic_long_t lruvec_stat[NR_VM_NODE_STAT_ITEMS]; | ||
114 | |||
112 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; | 115 | unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS]; |
113 | 116 | ||
114 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; | 117 | struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1]; |
@@ -227,10 +230,10 @@ struct mem_cgroup { | |||
227 | spinlock_t move_lock; | 230 | spinlock_t move_lock; |
228 | struct task_struct *move_lock_task; | 231 | struct task_struct *move_lock_task; |
229 | unsigned long move_lock_flags; | 232 | unsigned long move_lock_flags; |
230 | /* | 233 | |
231 | * percpu counter. | 234 | struct mem_cgroup_stat_cpu __percpu *stat_cpu; |
232 | */ | 235 | atomic_long_t stat[MEMCG_NR_STAT]; |
233 | struct mem_cgroup_stat_cpu __percpu *stat; | 236 | atomic_long_t events[MEMCG_NR_EVENTS]; |
234 | 237 | ||
235 | unsigned long socket_pressure; | 238 | unsigned long socket_pressure; |
236 | 239 | ||
@@ -265,6 +268,12 @@ struct mem_cgroup { | |||
265 | /* WARNING: nodeinfo must be the last member here */ | 268 | /* WARNING: nodeinfo must be the last member here */ |
266 | }; | 269 | }; |
267 | 270 | ||
271 | /* | ||
272 | * size of first charge trial. "32" comes from vmscan.c's magic value. | ||
273 | * TODO: maybe necessary to use big numbers in big irons. | ||
274 | */ | ||
275 | #define MEMCG_CHARGE_BATCH 32U | ||
276 | |||
268 | extern struct mem_cgroup *root_mem_cgroup; | 277 | extern struct mem_cgroup *root_mem_cgroup; |
269 | 278 | ||
270 | static inline bool mem_cgroup_disabled(void) | 279 | static inline bool mem_cgroup_disabled(void) |
@@ -272,13 +281,6 @@ static inline bool mem_cgroup_disabled(void) | |||
272 | return !cgroup_subsys_enabled(memory_cgrp_subsys); | 281 | return !cgroup_subsys_enabled(memory_cgrp_subsys); |
273 | } | 282 | } |
274 | 283 | ||
275 | static inline void mem_cgroup_event(struct mem_cgroup *memcg, | ||
276 | enum memcg_event_item event) | ||
277 | { | ||
278 | this_cpu_inc(memcg->stat->events[event]); | ||
279 | cgroup_file_notify(&memcg->events_file); | ||
280 | } | ||
281 | |||
282 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); | 284 | bool mem_cgroup_low(struct mem_cgroup *root, struct mem_cgroup *memcg); |
283 | 285 | ||
284 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, | 286 | int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm, |
@@ -492,32 +494,38 @@ void unlock_page_memcg(struct page *page); | |||
492 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | 494 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, |
493 | int idx) | 495 | int idx) |
494 | { | 496 | { |
495 | long val = 0; | 497 | long x = atomic_long_read(&memcg->stat[idx]); |
496 | int cpu; | 498 | #ifdef CONFIG_SMP |
497 | 499 | if (x < 0) | |
498 | for_each_possible_cpu(cpu) | 500 | x = 0; |
499 | val += per_cpu(memcg->stat->count[idx], cpu); | 501 | #endif |
500 | 502 | return x; | |
501 | if (val < 0) | ||
502 | val = 0; | ||
503 | |||
504 | return val; | ||
505 | } | 503 | } |
506 | 504 | ||
507 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 505 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
508 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, | 506 | static inline void __mod_memcg_state(struct mem_cgroup *memcg, |
509 | int idx, int val) | 507 | int idx, int val) |
510 | { | 508 | { |
511 | if (!mem_cgroup_disabled()) | 509 | long x; |
512 | __this_cpu_add(memcg->stat->count[idx], val); | 510 | |
511 | if (mem_cgroup_disabled()) | ||
512 | return; | ||
513 | |||
514 | x = val + __this_cpu_read(memcg->stat_cpu->count[idx]); | ||
515 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | ||
516 | atomic_long_add(x, &memcg->stat[idx]); | ||
517 | x = 0; | ||
518 | } | ||
519 | __this_cpu_write(memcg->stat_cpu->count[idx], x); | ||
513 | } | 520 | } |
514 | 521 | ||
515 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 522 | /* idx can be of type enum memcg_stat_item or node_stat_item */ |
516 | static inline void mod_memcg_state(struct mem_cgroup *memcg, | 523 | static inline void mod_memcg_state(struct mem_cgroup *memcg, |
517 | int idx, int val) | 524 | int idx, int val) |
518 | { | 525 | { |
519 | if (!mem_cgroup_disabled()) | 526 | preempt_disable(); |
520 | this_cpu_add(memcg->stat->count[idx], val); | 527 | __mod_memcg_state(memcg, idx, val); |
528 | preempt_enable(); | ||
521 | } | 529 | } |
522 | 530 | ||
523 | /** | 531 | /** |
@@ -555,87 +563,108 @@ static inline unsigned long lruvec_page_state(struct lruvec *lruvec, | |||
555 | enum node_stat_item idx) | 563 | enum node_stat_item idx) |
556 | { | 564 | { |
557 | struct mem_cgroup_per_node *pn; | 565 | struct mem_cgroup_per_node *pn; |
558 | long val = 0; | 566 | long x; |
559 | int cpu; | ||
560 | 567 | ||
561 | if (mem_cgroup_disabled()) | 568 | if (mem_cgroup_disabled()) |
562 | return node_page_state(lruvec_pgdat(lruvec), idx); | 569 | return node_page_state(lruvec_pgdat(lruvec), idx); |
563 | 570 | ||
564 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | 571 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
565 | for_each_possible_cpu(cpu) | 572 | x = atomic_long_read(&pn->lruvec_stat[idx]); |
566 | val += per_cpu(pn->lruvec_stat->count[idx], cpu); | 573 | #ifdef CONFIG_SMP |
567 | 574 | if (x < 0) | |
568 | if (val < 0) | 575 | x = 0; |
569 | val = 0; | 576 | #endif |
570 | 577 | return x; | |
571 | return val; | ||
572 | } | 578 | } |
573 | 579 | ||
574 | static inline void __mod_lruvec_state(struct lruvec *lruvec, | 580 | static inline void __mod_lruvec_state(struct lruvec *lruvec, |
575 | enum node_stat_item idx, int val) | 581 | enum node_stat_item idx, int val) |
576 | { | 582 | { |
577 | struct mem_cgroup_per_node *pn; | 583 | struct mem_cgroup_per_node *pn; |
584 | long x; | ||
578 | 585 | ||
586 | /* Update node */ | ||
579 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | 587 | __mod_node_page_state(lruvec_pgdat(lruvec), idx, val); |
588 | |||
580 | if (mem_cgroup_disabled()) | 589 | if (mem_cgroup_disabled()) |
581 | return; | 590 | return; |
591 | |||
582 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | 592 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); |
593 | |||
594 | /* Update memcg */ | ||
583 | __mod_memcg_state(pn->memcg, idx, val); | 595 | __mod_memcg_state(pn->memcg, idx, val); |
584 | __this_cpu_add(pn->lruvec_stat->count[idx], val); | 596 | |
597 | /* Update lruvec */ | ||
598 | x = val + __this_cpu_read(pn->lruvec_stat_cpu->count[idx]); | ||
599 | if (unlikely(abs(x) > MEMCG_CHARGE_BATCH)) { | ||
600 | atomic_long_add(x, &pn->lruvec_stat[idx]); | ||
601 | x = 0; | ||
602 | } | ||
603 | __this_cpu_write(pn->lruvec_stat_cpu->count[idx], x); | ||
585 | } | 604 | } |
586 | 605 | ||
587 | static inline void mod_lruvec_state(struct lruvec *lruvec, | 606 | static inline void mod_lruvec_state(struct lruvec *lruvec, |
588 | enum node_stat_item idx, int val) | 607 | enum node_stat_item idx, int val) |
589 | { | 608 | { |
590 | struct mem_cgroup_per_node *pn; | 609 | preempt_disable(); |
591 | 610 | __mod_lruvec_state(lruvec, idx, val); | |
592 | mod_node_page_state(lruvec_pgdat(lruvec), idx, val); | 611 | preempt_enable(); |
593 | if (mem_cgroup_disabled()) | ||
594 | return; | ||
595 | pn = container_of(lruvec, struct mem_cgroup_per_node, lruvec); | ||
596 | mod_memcg_state(pn->memcg, idx, val); | ||
597 | this_cpu_add(pn->lruvec_stat->count[idx], val); | ||
598 | } | 612 | } |
599 | 613 | ||
600 | static inline void __mod_lruvec_page_state(struct page *page, | 614 | static inline void __mod_lruvec_page_state(struct page *page, |
601 | enum node_stat_item idx, int val) | 615 | enum node_stat_item idx, int val) |
602 | { | 616 | { |
603 | struct mem_cgroup_per_node *pn; | 617 | pg_data_t *pgdat = page_pgdat(page); |
618 | struct lruvec *lruvec; | ||
604 | 619 | ||
605 | __mod_node_page_state(page_pgdat(page), idx, val); | 620 | /* Untracked pages have no memcg, no lruvec. Update only the node */ |
606 | if (mem_cgroup_disabled() || !page->mem_cgroup) | 621 | if (!page->mem_cgroup) { |
622 | __mod_node_page_state(pgdat, idx, val); | ||
607 | return; | 623 | return; |
608 | __mod_memcg_state(page->mem_cgroup, idx, val); | 624 | } |
609 | pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; | 625 | |
610 | __this_cpu_add(pn->lruvec_stat->count[idx], val); | 626 | lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup); |
627 | __mod_lruvec_state(lruvec, idx, val); | ||
611 | } | 628 | } |
612 | 629 | ||
613 | static inline void mod_lruvec_page_state(struct page *page, | 630 | static inline void mod_lruvec_page_state(struct page *page, |
614 | enum node_stat_item idx, int val) | 631 | enum node_stat_item idx, int val) |
615 | { | 632 | { |
616 | struct mem_cgroup_per_node *pn; | 633 | preempt_disable(); |
617 | 634 | __mod_lruvec_page_state(page, idx, val); | |
618 | mod_node_page_state(page_pgdat(page), idx, val); | 635 | preempt_enable(); |
619 | if (mem_cgroup_disabled() || !page->mem_cgroup) | ||
620 | return; | ||
621 | mod_memcg_state(page->mem_cgroup, idx, val); | ||
622 | pn = page->mem_cgroup->nodeinfo[page_to_nid(page)]; | ||
623 | this_cpu_add(pn->lruvec_stat->count[idx], val); | ||
624 | } | 636 | } |
625 | 637 | ||
626 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, | 638 | unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order, |
627 | gfp_t gfp_mask, | 639 | gfp_t gfp_mask, |
628 | unsigned long *total_scanned); | 640 | unsigned long *total_scanned); |
629 | 641 | ||
642 | /* idx can be of type enum memcg_event_item or vm_event_item */ | ||
643 | static inline void __count_memcg_events(struct mem_cgroup *memcg, | ||
644 | int idx, unsigned long count) | ||
645 | { | ||
646 | unsigned long x; | ||
647 | |||
648 | if (mem_cgroup_disabled()) | ||
649 | return; | ||
650 | |||
651 | x = count + __this_cpu_read(memcg->stat_cpu->events[idx]); | ||
652 | if (unlikely(x > MEMCG_CHARGE_BATCH)) { | ||
653 | atomic_long_add(x, &memcg->events[idx]); | ||
654 | x = 0; | ||
655 | } | ||
656 | __this_cpu_write(memcg->stat_cpu->events[idx], x); | ||
657 | } | ||
658 | |||
630 | static inline void count_memcg_events(struct mem_cgroup *memcg, | 659 | static inline void count_memcg_events(struct mem_cgroup *memcg, |
631 | enum vm_event_item idx, | 660 | int idx, unsigned long count) |
632 | unsigned long count) | ||
633 | { | 661 | { |
634 | if (!mem_cgroup_disabled()) | 662 | preempt_disable(); |
635 | this_cpu_add(memcg->stat->events[idx], count); | 663 | __count_memcg_events(memcg, idx, count); |
664 | preempt_enable(); | ||
636 | } | 665 | } |
637 | 666 | ||
638 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 667 | /* idx can be of type enum memcg_event_item or vm_event_item */ |
639 | static inline void count_memcg_page_event(struct page *page, | 668 | static inline void count_memcg_page_event(struct page *page, |
640 | int idx) | 669 | int idx) |
641 | { | 670 | { |
@@ -654,12 +683,20 @@ static inline void count_memcg_event_mm(struct mm_struct *mm, | |||
654 | rcu_read_lock(); | 683 | rcu_read_lock(); |
655 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); | 684 | memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); |
656 | if (likely(memcg)) { | 685 | if (likely(memcg)) { |
657 | this_cpu_inc(memcg->stat->events[idx]); | 686 | count_memcg_events(memcg, idx, 1); |
658 | if (idx == OOM_KILL) | 687 | if (idx == OOM_KILL) |
659 | cgroup_file_notify(&memcg->events_file); | 688 | cgroup_file_notify(&memcg->events_file); |
660 | } | 689 | } |
661 | rcu_read_unlock(); | 690 | rcu_read_unlock(); |
662 | } | 691 | } |
692 | |||
693 | static inline void mem_cgroup_event(struct mem_cgroup *memcg, | ||
694 | enum memcg_event_item event) | ||
695 | { | ||
696 | count_memcg_events(memcg, event, 1); | ||
697 | cgroup_file_notify(&memcg->events_file); | ||
698 | } | ||
699 | |||
663 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 700 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
664 | void mem_cgroup_split_huge_fixup(struct page *head); | 701 | void mem_cgroup_split_huge_fixup(struct page *head); |
665 | #endif | 702 | #endif |
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 58e110aee7ab..aba5f86eb038 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
@@ -13,6 +13,7 @@ struct pglist_data; | |||
13 | struct mem_section; | 13 | struct mem_section; |
14 | struct memory_block; | 14 | struct memory_block; |
15 | struct resource; | 15 | struct resource; |
16 | struct vmem_altmap; | ||
16 | 17 | ||
17 | #ifdef CONFIG_MEMORY_HOTPLUG | 18 | #ifdef CONFIG_MEMORY_HOTPLUG |
18 | /* | 19 | /* |
@@ -125,24 +126,26 @@ static inline bool movable_node_is_enabled(void) | |||
125 | 126 | ||
126 | #ifdef CONFIG_MEMORY_HOTREMOVE | 127 | #ifdef CONFIG_MEMORY_HOTREMOVE |
127 | extern bool is_pageblock_removable_nolock(struct page *page); | 128 | extern bool is_pageblock_removable_nolock(struct page *page); |
128 | extern int arch_remove_memory(u64 start, u64 size); | 129 | extern int arch_remove_memory(u64 start, u64 size, |
130 | struct vmem_altmap *altmap); | ||
129 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, | 131 | extern int __remove_pages(struct zone *zone, unsigned long start_pfn, |
130 | unsigned long nr_pages); | 132 | unsigned long nr_pages, struct vmem_altmap *altmap); |
131 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | 133 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
132 | 134 | ||
133 | /* reasonably generic interface to expand the physical pages */ | 135 | /* reasonably generic interface to expand the physical pages */ |
134 | extern int __add_pages(int nid, unsigned long start_pfn, | 136 | extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
135 | unsigned long nr_pages, bool want_memblock); | 137 | struct vmem_altmap *altmap, bool want_memblock); |
136 | 138 | ||
137 | #ifndef CONFIG_ARCH_HAS_ADD_PAGES | 139 | #ifndef CONFIG_ARCH_HAS_ADD_PAGES |
138 | static inline int add_pages(int nid, unsigned long start_pfn, | 140 | static inline int add_pages(int nid, unsigned long start_pfn, |
139 | unsigned long nr_pages, bool want_memblock) | 141 | unsigned long nr_pages, struct vmem_altmap *altmap, |
142 | bool want_memblock) | ||
140 | { | 143 | { |
141 | return __add_pages(nid, start_pfn, nr_pages, want_memblock); | 144 | return __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
142 | } | 145 | } |
143 | #else /* ARCH_HAS_ADD_PAGES */ | 146 | #else /* ARCH_HAS_ADD_PAGES */ |
144 | int add_pages(int nid, unsigned long start_pfn, | 147 | int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, |
145 | unsigned long nr_pages, bool want_memblock); | 148 | struct vmem_altmap *altmap, bool want_memblock); |
146 | #endif /* ARCH_HAS_ADD_PAGES */ | 149 | #endif /* ARCH_HAS_ADD_PAGES */ |
147 | 150 | ||
148 | #ifdef CONFIG_NUMA | 151 | #ifdef CONFIG_NUMA |
@@ -318,15 +321,17 @@ extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn, | |||
318 | void *arg, int (*func)(struct memory_block *, void *)); | 321 | void *arg, int (*func)(struct memory_block *, void *)); |
319 | extern int add_memory(int nid, u64 start, u64 size); | 322 | extern int add_memory(int nid, u64 start, u64 size); |
320 | extern int add_memory_resource(int nid, struct resource *resource, bool online); | 323 | extern int add_memory_resource(int nid, struct resource *resource, bool online); |
321 | extern int arch_add_memory(int nid, u64 start, u64 size, bool want_memblock); | 324 | extern int arch_add_memory(int nid, u64 start, u64 size, |
325 | struct vmem_altmap *altmap, bool want_memblock); | ||
322 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, | 326 | extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, |
323 | unsigned long nr_pages); | 327 | unsigned long nr_pages, struct vmem_altmap *altmap); |
324 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); | 328 | extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages); |
325 | extern bool is_memblock_offlined(struct memory_block *mem); | 329 | extern bool is_memblock_offlined(struct memory_block *mem); |
326 | extern void remove_memory(int nid, u64 start, u64 size); | 330 | extern void remove_memory(int nid, u64 start, u64 size); |
327 | extern int sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn); | 331 | extern int sparse_add_one_section(struct pglist_data *pgdat, |
332 | unsigned long start_pfn, struct vmem_altmap *altmap); | ||
328 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, | 333 | extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
329 | unsigned long map_offset); | 334 | unsigned long map_offset, struct vmem_altmap *altmap); |
330 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, | 335 | extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map, |
331 | unsigned long pnum); | 336 | unsigned long pnum); |
332 | extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, | 337 | extern bool allow_online_pfn_range(int nid, unsigned long pfn, unsigned long nr_pages, |
diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 10d23c367048..7b4899c06f49 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h | |||
@@ -26,18 +26,6 @@ struct vmem_altmap { | |||
26 | unsigned long alloc; | 26 | unsigned long alloc; |
27 | }; | 27 | }; |
28 | 28 | ||
29 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); | ||
30 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); | ||
31 | |||
32 | #ifdef CONFIG_ZONE_DEVICE | ||
33 | struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start); | ||
34 | #else | ||
35 | static inline struct vmem_altmap *to_vmem_altmap(unsigned long memmap_start) | ||
36 | { | ||
37 | return NULL; | ||
38 | } | ||
39 | #endif | ||
40 | |||
41 | /* | 29 | /* |
42 | * Specialize ZONE_DEVICE memory into multiple types each having differents | 30 | * Specialize ZONE_DEVICE memory into multiple types each having differents |
43 | * usage. | 31 | * usage. |
@@ -125,8 +113,9 @@ typedef void (*dev_page_free_t)(struct page *page, void *data); | |||
125 | struct dev_pagemap { | 113 | struct dev_pagemap { |
126 | dev_page_fault_t page_fault; | 114 | dev_page_fault_t page_fault; |
127 | dev_page_free_t page_free; | 115 | dev_page_free_t page_free; |
128 | struct vmem_altmap *altmap; | 116 | struct vmem_altmap altmap; |
129 | const struct resource *res; | 117 | bool altmap_valid; |
118 | struct resource res; | ||
130 | struct percpu_ref *ref; | 119 | struct percpu_ref *ref; |
131 | struct device *dev; | 120 | struct device *dev; |
132 | void *data; | 121 | void *data; |
@@ -134,15 +123,17 @@ struct dev_pagemap { | |||
134 | }; | 123 | }; |
135 | 124 | ||
136 | #ifdef CONFIG_ZONE_DEVICE | 125 | #ifdef CONFIG_ZONE_DEVICE |
137 | void *devm_memremap_pages(struct device *dev, struct resource *res, | 126 | void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); |
138 | struct percpu_ref *ref, struct vmem_altmap *altmap); | 127 | struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
139 | struct dev_pagemap *find_dev_pagemap(resource_size_t phys); | 128 | struct dev_pagemap *pgmap); |
129 | |||
130 | unsigned long vmem_altmap_offset(struct vmem_altmap *altmap); | ||
131 | void vmem_altmap_free(struct vmem_altmap *altmap, unsigned long nr_pfns); | ||
140 | 132 | ||
141 | static inline bool is_zone_device_page(const struct page *page); | 133 | static inline bool is_zone_device_page(const struct page *page); |
142 | #else | 134 | #else |
143 | static inline void *devm_memremap_pages(struct device *dev, | 135 | static inline void *devm_memremap_pages(struct device *dev, |
144 | struct resource *res, struct percpu_ref *ref, | 136 | struct dev_pagemap *pgmap) |
145 | struct vmem_altmap *altmap) | ||
146 | { | 137 | { |
147 | /* | 138 | /* |
148 | * Fail attempts to call devm_memremap_pages() without | 139 | * Fail attempts to call devm_memremap_pages() without |
@@ -153,11 +144,22 @@ static inline void *devm_memremap_pages(struct device *dev, | |||
153 | return ERR_PTR(-ENXIO); | 144 | return ERR_PTR(-ENXIO); |
154 | } | 145 | } |
155 | 146 | ||
156 | static inline struct dev_pagemap *find_dev_pagemap(resource_size_t phys) | 147 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, |
148 | struct dev_pagemap *pgmap) | ||
157 | { | 149 | { |
158 | return NULL; | 150 | return NULL; |
159 | } | 151 | } |
160 | #endif | 152 | |
153 | static inline unsigned long vmem_altmap_offset(struct vmem_altmap *altmap) | ||
154 | { | ||
155 | return 0; | ||
156 | } | ||
157 | |||
158 | static inline void vmem_altmap_free(struct vmem_altmap *altmap, | ||
159 | unsigned long nr_pfns) | ||
160 | { | ||
161 | } | ||
162 | #endif /* CONFIG_ZONE_DEVICE */ | ||
161 | 163 | ||
162 | #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) | 164 | #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) |
163 | static inline bool is_device_private_page(const struct page *page) | 165 | static inline bool is_device_private_page(const struct page *page) |
@@ -173,39 +175,6 @@ static inline bool is_device_public_page(const struct page *page) | |||
173 | } | 175 | } |
174 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ | 176 | #endif /* CONFIG_DEVICE_PRIVATE || CONFIG_DEVICE_PUBLIC */ |
175 | 177 | ||
176 | /** | ||
177 | * get_dev_pagemap() - take a new live reference on the dev_pagemap for @pfn | ||
178 | * @pfn: page frame number to lookup page_map | ||
179 | * @pgmap: optional known pgmap that already has a reference | ||
180 | * | ||
181 | * @pgmap allows the overhead of a lookup to be bypassed when @pfn lands in the | ||
182 | * same mapping. | ||
183 | */ | ||
184 | static inline struct dev_pagemap *get_dev_pagemap(unsigned long pfn, | ||
185 | struct dev_pagemap *pgmap) | ||
186 | { | ||
187 | const struct resource *res = pgmap ? pgmap->res : NULL; | ||
188 | resource_size_t phys = PFN_PHYS(pfn); | ||
189 | |||
190 | /* | ||
191 | * In the cached case we're already holding a live reference so | ||
192 | * we can simply do a blind increment | ||
193 | */ | ||
194 | if (res && phys >= res->start && phys <= res->end) { | ||
195 | percpu_ref_get(pgmap->ref); | ||
196 | return pgmap; | ||
197 | } | ||
198 | |||
199 | /* fall back to slow path lookup */ | ||
200 | rcu_read_lock(); | ||
201 | pgmap = find_dev_pagemap(phys); | ||
202 | if (pgmap && !percpu_ref_tryget_live(pgmap->ref)) | ||
203 | pgmap = NULL; | ||
204 | rcu_read_unlock(); | ||
205 | |||
206 | return pgmap; | ||
207 | } | ||
208 | |||
209 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) | 178 | static inline void put_dev_pagemap(struct dev_pagemap *pgmap) |
210 | { | 179 | { |
211 | if (pgmap) | 180 | if (pgmap) |
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h index 78dc85365c4f..080798f17ece 100644 --- a/include/linux/mfd/axp20x.h +++ b/include/linux/mfd/axp20x.h | |||
@@ -645,11 +645,6 @@ struct axp20x_dev { | |||
645 | const struct regmap_irq_chip *regmap_irq_chip; | 645 | const struct regmap_irq_chip *regmap_irq_chip; |
646 | }; | 646 | }; |
647 | 647 | ||
648 | struct axp288_extcon_pdata { | ||
649 | /* GPIO pin control to switch D+/D- lines b/w PMIC and SOC */ | ||
650 | struct gpio_desc *gpio_mux_cntl; | ||
651 | }; | ||
652 | |||
653 | /* generic helper function for reading 9-16 bit wide regs */ | 648 | /* generic helper function for reading 9-16 bit wide regs */ |
654 | static inline int axp20x_read_variable_width(struct regmap *regmap, | 649 | static inline int axp20x_read_variable_width(struct regmap *regmap, |
655 | unsigned int reg, unsigned int width) | 650 | unsigned int reg, unsigned int width) |
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index 4e887ba22635..c61535979b8f 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h | |||
@@ -322,6 +322,10 @@ extern struct attribute_group cros_ec_attr_group; | |||
322 | extern struct attribute_group cros_ec_lightbar_attr_group; | 322 | extern struct attribute_group cros_ec_lightbar_attr_group; |
323 | extern struct attribute_group cros_ec_vbc_attr_group; | 323 | extern struct attribute_group cros_ec_vbc_attr_group; |
324 | 324 | ||
325 | /* debugfs stuff */ | ||
326 | int cros_ec_debugfs_init(struct cros_ec_dev *ec); | ||
327 | void cros_ec_debugfs_remove(struct cros_ec_dev *ec); | ||
328 | |||
325 | /* ACPI GPE handler */ | 329 | /* ACPI GPE handler */ |
326 | #ifdef CONFIG_ACPI | 330 | #ifdef CONFIG_ACPI |
327 | 331 | ||
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 2b16e95b9bb8..2b96e630e3b6 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h | |||
@@ -291,6 +291,9 @@ enum host_event_code { | |||
291 | /* EC desires to change state of host-controlled USB mux */ | 291 | /* EC desires to change state of host-controlled USB mux */ |
292 | EC_HOST_EVENT_USB_MUX = 28, | 292 | EC_HOST_EVENT_USB_MUX = 28, |
293 | 293 | ||
294 | /* EC RTC event occurred */ | ||
295 | EC_HOST_EVENT_RTC = 26, | ||
296 | |||
294 | /* | 297 | /* |
295 | * The high bit of the event mask is not used as a host event code. If | 298 | * The high bit of the event mask is not used as a host event code. If |
296 | * it reads back as set, then the entire event mask should be | 299 | * it reads back as set, then the entire event mask should be |
@@ -799,6 +802,8 @@ enum ec_feature_code { | |||
799 | EC_FEATURE_USB_MUX = 23, | 802 | EC_FEATURE_USB_MUX = 23, |
800 | /* Motion Sensor code has an internal software FIFO */ | 803 | /* Motion Sensor code has an internal software FIFO */ |
801 | EC_FEATURE_MOTION_SENSE_FIFO = 24, | 804 | EC_FEATURE_MOTION_SENSE_FIFO = 24, |
805 | /* EC has RTC feature that can be controlled by host commands */ | ||
806 | EC_FEATURE_RTC = 27, | ||
802 | }; | 807 | }; |
803 | 808 | ||
804 | #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) | 809 | #define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32)) |
@@ -1709,6 +1714,9 @@ struct ec_response_rtc { | |||
1709 | #define EC_CMD_RTC_SET_VALUE 0x46 | 1714 | #define EC_CMD_RTC_SET_VALUE 0x46 |
1710 | #define EC_CMD_RTC_SET_ALARM 0x47 | 1715 | #define EC_CMD_RTC_SET_ALARM 0x47 |
1711 | 1716 | ||
1717 | /* Pass as param to SET_ALARM to clear the current alarm */ | ||
1718 | #define EC_RTC_ALARM_CLEAR 0 | ||
1719 | |||
1712 | /*****************************************************************************/ | 1720 | /*****************************************************************************/ |
1713 | /* Port80 log access */ | 1721 | /* Port80 log access */ |
1714 | 1722 | ||
@@ -2904,16 +2912,33 @@ enum usb_pd_control_mux { | |||
2904 | USB_PD_CTRL_MUX_AUTO = 5, | 2912 | USB_PD_CTRL_MUX_AUTO = 5, |
2905 | }; | 2913 | }; |
2906 | 2914 | ||
2915 | enum usb_pd_control_swap { | ||
2916 | USB_PD_CTRL_SWAP_NONE = 0, | ||
2917 | USB_PD_CTRL_SWAP_DATA = 1, | ||
2918 | USB_PD_CTRL_SWAP_POWER = 2, | ||
2919 | USB_PD_CTRL_SWAP_VCONN = 3, | ||
2920 | USB_PD_CTRL_SWAP_COUNT | ||
2921 | }; | ||
2922 | |||
2907 | struct ec_params_usb_pd_control { | 2923 | struct ec_params_usb_pd_control { |
2908 | uint8_t port; | 2924 | uint8_t port; |
2909 | uint8_t role; | 2925 | uint8_t role; |
2910 | uint8_t mux; | 2926 | uint8_t mux; |
2927 | uint8_t swap; | ||
2911 | } __packed; | 2928 | } __packed; |
2912 | 2929 | ||
2913 | #define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ | 2930 | #define PD_CTRL_RESP_ENABLED_COMMS (1 << 0) /* Communication enabled */ |
2914 | #define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ | 2931 | #define PD_CTRL_RESP_ENABLED_CONNECTED (1 << 1) /* Device connected */ |
2915 | #define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ | 2932 | #define PD_CTRL_RESP_ENABLED_PD_CAPABLE (1 << 2) /* Partner is PD capable */ |
2916 | 2933 | ||
2934 | #define PD_CTRL_RESP_ROLE_POWER BIT(0) /* 0=SNK/1=SRC */ | ||
2935 | #define PD_CTRL_RESP_ROLE_DATA BIT(1) /* 0=UFP/1=DFP */ | ||
2936 | #define PD_CTRL_RESP_ROLE_VCONN BIT(2) /* Vconn status */ | ||
2937 | #define PD_CTRL_RESP_ROLE_DR_POWER BIT(3) /* Partner is dualrole power */ | ||
2938 | #define PD_CTRL_RESP_ROLE_DR_DATA BIT(4) /* Partner is dualrole data */ | ||
2939 | #define PD_CTRL_RESP_ROLE_USB_COMM BIT(5) /* Partner USB comm capable */ | ||
2940 | #define PD_CTRL_RESP_ROLE_EXT_POWERED BIT(6) /* Partner externally powerd */ | ||
2941 | |||
2917 | struct ec_response_usb_pd_control_v1 { | 2942 | struct ec_response_usb_pd_control_v1 { |
2918 | uint8_t enabled; | 2943 | uint8_t enabled; |
2919 | uint8_t role; | 2944 | uint8_t role; |
diff --git a/include/linux/mfd/palmas.h b/include/linux/mfd/palmas.h index 3c8568aa82a5..75e5c8ff85fc 100644 --- a/include/linux/mfd/palmas.h +++ b/include/linux/mfd/palmas.h | |||
@@ -3733,6 +3733,9 @@ enum usb_irq_events { | |||
3733 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 | 3733 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE 0x01 |
3734 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 | 3734 | #define TPS65917_REGEN3_CTRL_MODE_ACTIVE_SHIFT 0x00 |
3735 | 3735 | ||
3736 | /* POWERHOLD Mask field for PRIMARY_SECONDARY_PAD2 register */ | ||
3737 | #define TPS65917_PRIMARY_SECONDARY_PAD2_GPIO_5_MASK 0xC | ||
3738 | |||
3736 | /* Registers for function RESOURCE */ | 3739 | /* Registers for function RESOURCE */ |
3737 | #define TPS65917_REGEN1_CTRL 0x2 | 3740 | #define TPS65917_REGEN1_CTRL 0x2 |
3738 | #define TPS65917_PLLEN_CTRL 0x3 | 3741 | #define TPS65917_PLLEN_CTRL 0x3 |
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h new file mode 100644 index 000000000000..796fb9794c9e --- /dev/null +++ b/include/linux/mfd/rave-sp.h | |||
@@ -0,0 +1,60 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
2 | |||
3 | /* | ||
4 | * Core definitions for RAVE SP MFD driver. | ||
5 | * | ||
6 | * Copyright (C) 2017 Zodiac Inflight Innovations | ||
7 | */ | ||
8 | |||
9 | #ifndef _LINUX_RAVE_SP_H_ | ||
10 | #define _LINUX_RAVE_SP_H_ | ||
11 | |||
12 | #include <linux/notifier.h> | ||
13 | |||
14 | enum rave_sp_command { | ||
15 | RAVE_SP_CMD_GET_FIRMWARE_VERSION = 0x20, | ||
16 | RAVE_SP_CMD_GET_BOOTLOADER_VERSION = 0x21, | ||
17 | RAVE_SP_CMD_BOOT_SOURCE = 0x26, | ||
18 | RAVE_SP_CMD_GET_BOARD_COPPER_REV = 0x2B, | ||
19 | RAVE_SP_CMD_GET_GPIO_STATE = 0x2F, | ||
20 | |||
21 | RAVE_SP_CMD_STATUS = 0xA0, | ||
22 | RAVE_SP_CMD_SW_WDT = 0xA1, | ||
23 | RAVE_SP_CMD_PET_WDT = 0xA2, | ||
24 | RAVE_SP_CMD_RESET = 0xA7, | ||
25 | RAVE_SP_CMD_RESET_REASON = 0xA8, | ||
26 | |||
27 | RAVE_SP_CMD_REQ_COPPER_REV = 0xB6, | ||
28 | RAVE_SP_CMD_GET_I2C_DEVICE_STATUS = 0xBA, | ||
29 | RAVE_SP_CMD_GET_SP_SILICON_REV = 0xB9, | ||
30 | RAVE_SP_CMD_CONTROL_EVENTS = 0xBB, | ||
31 | |||
32 | RAVE_SP_EVNT_BASE = 0xE0, | ||
33 | }; | ||
34 | |||
35 | struct rave_sp; | ||
36 | |||
37 | static inline unsigned long rave_sp_action_pack(u8 event, u8 value) | ||
38 | { | ||
39 | return ((unsigned long)value << 8) | event; | ||
40 | } | ||
41 | |||
42 | static inline u8 rave_sp_action_unpack_event(unsigned long action) | ||
43 | { | ||
44 | return action; | ||
45 | } | ||
46 | |||
47 | static inline u8 rave_sp_action_unpack_value(unsigned long action) | ||
48 | { | ||
49 | return action >> 8; | ||
50 | } | ||
51 | |||
52 | int rave_sp_exec(struct rave_sp *sp, | ||
53 | void *__data, size_t data_size, | ||
54 | void *reply_data, size_t reply_data_size); | ||
55 | |||
56 | struct device; | ||
57 | int devm_rave_sp_register_event_notifier(struct device *dev, | ||
58 | struct notifier_block *nb); | ||
59 | |||
60 | #endif /* _LINUX_RAVE_SP_H_ */ | ||
diff --git a/include/linux/mfd/stm32-lptimer.h b/include/linux/mfd/stm32-lptimer.h index 77c7cf40d9b4..605f62264825 100644 --- a/include/linux/mfd/stm32-lptimer.h +++ b/include/linux/mfd/stm32-lptimer.h | |||
@@ -1,13 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * STM32 Low-Power Timer parent driver. | 3 | * STM32 Low-Power Timer parent driver. |
3 | * | ||
4 | * Copyright (C) STMicroelectronics 2017 | 4 | * Copyright (C) STMicroelectronics 2017 |
5 | * | ||
6 | * Author: Fabrice Gasnier <fabrice.gasnier@st.com> | 5 | * Author: Fabrice Gasnier <fabrice.gasnier@st.com> |
7 | * | ||
8 | * Inspired by Benjamin Gaignard's stm32-timers driver | 6 | * Inspired by Benjamin Gaignard's stm32-timers driver |
9 | * | ||
10 | * License terms: GNU General Public License (GPL), version 2 | ||
11 | */ | 7 | */ |
12 | 8 | ||
13 | #ifndef _LINUX_STM32_LPTIMER_H_ | 9 | #ifndef _LINUX_STM32_LPTIMER_H_ |
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h index ce7346e7f77a..2aadab6f34a1 100644 --- a/include/linux/mfd/stm32-timers.h +++ b/include/linux/mfd/stm32-timers.h | |||
@@ -1,9 +1,7 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright (C) STMicroelectronics 2016 | 3 | * Copyright (C) STMicroelectronics 2016 |
3 | * | ||
4 | * Author: Benjamin Gaignard <benjamin.gaignard@st.com> | 4 | * Author: Benjamin Gaignard <benjamin.gaignard@st.com> |
5 | * | ||
6 | * License terms: GNU General Public License (GPL), version 2 | ||
7 | */ | 5 | */ |
8 | 6 | ||
9 | #ifndef _LINUX_STM32_GPTIMER_H_ | 7 | #ifndef _LINUX_STM32_GPTIMER_H_ |
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index e1cfe9194129..396a103c8bc6 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
@@ -25,26 +25,6 @@ | |||
25 | writew((val) >> 16, (addr) + 2); \ | 25 | writew((val) >> 16, (addr) + 2); \ |
26 | } while (0) | 26 | } while (0) |
27 | 27 | ||
28 | #define CNF_CMD 0x04 | ||
29 | #define CNF_CTL_BASE 0x10 | ||
30 | #define CNF_INT_PIN 0x3d | ||
31 | #define CNF_STOP_CLK_CTL 0x40 | ||
32 | #define CNF_GCLK_CTL 0x41 | ||
33 | #define CNF_SD_CLK_MODE 0x42 | ||
34 | #define CNF_PIN_STATUS 0x44 | ||
35 | #define CNF_PWR_CTL_1 0x48 | ||
36 | #define CNF_PWR_CTL_2 0x49 | ||
37 | #define CNF_PWR_CTL_3 0x4a | ||
38 | #define CNF_CARD_DETECT_MODE 0x4c | ||
39 | #define CNF_SD_SLOT 0x50 | ||
40 | #define CNF_EXT_GCLK_CTL_1 0xf0 | ||
41 | #define CNF_EXT_GCLK_CTL_2 0xf1 | ||
42 | #define CNF_EXT_GCLK_CTL_3 0xf9 | ||
43 | #define CNF_SD_LED_EN_1 0xfa | ||
44 | #define CNF_SD_LED_EN_2 0xfe | ||
45 | |||
46 | #define SDCREN 0x2 /* Enable access to MMC CTL regs. (flag in COMMAND_REG)*/ | ||
47 | |||
48 | #define sd_config_write8(base, shift, reg, val) \ | 28 | #define sd_config_write8(base, shift, reg, val) \ |
49 | tmio_iowrite8((val), (base) + ((reg) << (shift))) | 29 | tmio_iowrite8((val), (base) + ((reg) << (shift))) |
50 | #define sd_config_write16(base, shift, reg, val) \ | 30 | #define sd_config_write16(base, shift, reg, val) \ |
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 409ffb14298a..e5258ee4e38b 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
@@ -79,6 +79,11 @@ | |||
79 | << __mlx5_dw_bit_off(typ, fld))); \ | 79 | << __mlx5_dw_bit_off(typ, fld))); \ |
80 | } while (0) | 80 | } while (0) |
81 | 81 | ||
82 | #define MLX5_ARRAY_SET(typ, p, fld, idx, v) do { \ | ||
83 | BUILD_BUG_ON(__mlx5_bit_off(typ, fld) % 32); \ | ||
84 | MLX5_SET(typ, p, fld[idx], v); \ | ||
85 | } while (0) | ||
86 | |||
82 | #define MLX5_SET_TO_ONES(typ, p, fld) do { \ | 87 | #define MLX5_SET_TO_ONES(typ, p, fld) do { \ |
83 | BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ | 88 | BUILD_BUG_ON(__mlx5_st_sz_bits(typ) % 32); \ |
84 | *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ | 89 | *((__be32 *)(p) + __mlx5_dw_off(typ, fld)) = \ |
@@ -244,6 +249,8 @@ enum { | |||
244 | MLX5_NON_FP_BFREGS_PER_UAR, | 249 | MLX5_NON_FP_BFREGS_PER_UAR, |
245 | MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, | 250 | MLX5_UARS_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE, |
246 | MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, | 251 | MLX5_NON_FP_BFREGS_IN_PAGE = MLX5_NON_FP_BFREGS_PER_UAR * MLX5_UARS_IN_PAGE, |
252 | MLX5_MIN_DYN_BFREGS = 512, | ||
253 | MLX5_MAX_DYN_BFREGS = 1024, | ||
247 | }; | 254 | }; |
248 | 255 | ||
249 | enum { | 256 | enum { |
@@ -284,6 +291,7 @@ enum { | |||
284 | MLX5_EVENT_QUEUE_TYPE_QP = 0, | 291 | MLX5_EVENT_QUEUE_TYPE_QP = 0, |
285 | MLX5_EVENT_QUEUE_TYPE_RQ = 1, | 292 | MLX5_EVENT_QUEUE_TYPE_RQ = 1, |
286 | MLX5_EVENT_QUEUE_TYPE_SQ = 2, | 293 | MLX5_EVENT_QUEUE_TYPE_SQ = 2, |
294 | MLX5_EVENT_QUEUE_TYPE_DCT = 6, | ||
287 | }; | 295 | }; |
288 | 296 | ||
289 | enum mlx5_event { | 297 | enum mlx5_event { |
@@ -319,6 +327,8 @@ enum mlx5_event { | |||
319 | MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, | 327 | MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, |
320 | MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, | 328 | MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, |
321 | 329 | ||
330 | MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, | ||
331 | |||
322 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, | 332 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, |
323 | }; | 333 | }; |
324 | 334 | ||
@@ -611,6 +621,11 @@ struct mlx5_eqe_pps { | |||
611 | u8 rsvd2[12]; | 621 | u8 rsvd2[12]; |
612 | } __packed; | 622 | } __packed; |
613 | 623 | ||
624 | struct mlx5_eqe_dct { | ||
625 | __be32 reserved[6]; | ||
626 | __be32 dctn; | ||
627 | }; | ||
628 | |||
614 | union ev_data { | 629 | union ev_data { |
615 | __be32 raw[7]; | 630 | __be32 raw[7]; |
616 | struct mlx5_eqe_cmd cmd; | 631 | struct mlx5_eqe_cmd cmd; |
@@ -626,6 +641,7 @@ union ev_data { | |||
626 | struct mlx5_eqe_vport_change vport_change; | 641 | struct mlx5_eqe_vport_change vport_change; |
627 | struct mlx5_eqe_port_module port_module; | 642 | struct mlx5_eqe_port_module port_module; |
628 | struct mlx5_eqe_pps pps; | 643 | struct mlx5_eqe_pps pps; |
644 | struct mlx5_eqe_dct dct; | ||
629 | } __packed; | 645 | } __packed; |
630 | 646 | ||
631 | struct mlx5_eqe { | 647 | struct mlx5_eqe { |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index a0610427e168..6ed79a8a8318 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
@@ -155,6 +155,13 @@ enum mlx5_dcbx_oper_mode { | |||
155 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, | 155 | MLX5E_DCBX_PARAM_VER_OPER_AUTO = 0x3, |
156 | }; | 156 | }; |
157 | 157 | ||
158 | enum mlx5_dct_atomic_mode { | ||
159 | MLX5_ATOMIC_MODE_DCT_OFF = 20, | ||
160 | MLX5_ATOMIC_MODE_DCT_NONE = 0 << MLX5_ATOMIC_MODE_DCT_OFF, | ||
161 | MLX5_ATOMIC_MODE_DCT_IB_COMP = 1 << MLX5_ATOMIC_MODE_DCT_OFF, | ||
162 | MLX5_ATOMIC_MODE_DCT_CX = 2 << MLX5_ATOMIC_MODE_DCT_OFF, | ||
163 | }; | ||
164 | |||
158 | enum { | 165 | enum { |
159 | MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, | 166 | MLX5_ATOMIC_OPS_CMP_SWAP = 1 << 0, |
160 | MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, | 167 | MLX5_ATOMIC_OPS_FETCH_ADD = 1 << 1, |
@@ -231,6 +238,9 @@ struct mlx5_bfreg_info { | |||
231 | u32 ver; | 238 | u32 ver; |
232 | bool lib_uar_4k; | 239 | bool lib_uar_4k; |
233 | u32 num_sys_pages; | 240 | u32 num_sys_pages; |
241 | u32 num_static_sys_pages; | ||
242 | u32 total_num_bfregs; | ||
243 | u32 num_dyn_bfregs; | ||
234 | }; | 244 | }; |
235 | 245 | ||
236 | struct mlx5_cmd_first { | 246 | struct mlx5_cmd_first { |
@@ -430,6 +440,7 @@ enum mlx5_res_type { | |||
430 | MLX5_RES_SRQ = 3, | 440 | MLX5_RES_SRQ = 3, |
431 | MLX5_RES_XSRQ = 4, | 441 | MLX5_RES_XSRQ = 4, |
432 | MLX5_RES_XRQ = 5, | 442 | MLX5_RES_XRQ = 5, |
443 | MLX5_RES_DCT = MLX5_EVENT_QUEUE_TYPE_DCT, | ||
433 | }; | 444 | }; |
434 | 445 | ||
435 | struct mlx5_core_rsc_common { | 446 | struct mlx5_core_rsc_common { |
@@ -788,6 +799,7 @@ struct mlx5_clock { | |||
788 | u32 nominal_c_mult; | 799 | u32 nominal_c_mult; |
789 | unsigned long overflow_period; | 800 | unsigned long overflow_period; |
790 | struct delayed_work overflow_work; | 801 | struct delayed_work overflow_work; |
802 | struct mlx5_core_dev *mdev; | ||
791 | struct ptp_clock *ptp; | 803 | struct ptp_clock *ptp; |
792 | struct ptp_clock_info ptp_info; | 804 | struct ptp_clock_info ptp_info; |
793 | struct mlx5_pps pps_info; | 805 | struct mlx5_pps pps_info; |
@@ -826,7 +838,7 @@ struct mlx5_core_dev { | |||
826 | struct mlx5e_resources mlx5e_res; | 838 | struct mlx5e_resources mlx5e_res; |
827 | struct { | 839 | struct { |
828 | struct mlx5_rsvd_gids reserved_gids; | 840 | struct mlx5_rsvd_gids reserved_gids; |
829 | atomic_t roce_en; | 841 | u32 roce_en; |
830 | } roce; | 842 | } roce; |
831 | #ifdef CONFIG_MLX5_FPGA | 843 | #ifdef CONFIG_MLX5_FPGA |
832 | struct mlx5_fpga_device *fpga; | 844 | struct mlx5_fpga_device *fpga; |
@@ -835,6 +847,8 @@ struct mlx5_core_dev { | |||
835 | struct cpu_rmap *rmap; | 847 | struct cpu_rmap *rmap; |
836 | #endif | 848 | #endif |
837 | struct mlx5_clock clock; | 849 | struct mlx5_clock clock; |
850 | struct mlx5_ib_clock_info *clock_info; | ||
851 | struct page *clock_info_page; | ||
838 | }; | 852 | }; |
839 | 853 | ||
840 | struct mlx5_db { | 854 | struct mlx5_db { |
@@ -1103,7 +1117,7 @@ void mlx5_free_bfreg(struct mlx5_core_dev *mdev, struct mlx5_sq_bfreg *bfreg); | |||
1103 | unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); | 1117 | unsigned int mlx5_core_reserved_gids_count(struct mlx5_core_dev *dev); |
1104 | int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, | 1118 | int mlx5_core_roce_gid_set(struct mlx5_core_dev *dev, unsigned int index, |
1105 | u8 roce_version, u8 roce_l3_type, const u8 *gid, | 1119 | u8 roce_version, u8 roce_l3_type, const u8 *gid, |
1106 | const u8 *mac, bool vlan, u16 vlan_id); | 1120 | const u8 *mac, bool vlan, u16 vlan_id, u8 port_num); |
1107 | 1121 | ||
1108 | static inline int fw_initializing(struct mlx5_core_dev *dev) | 1122 | static inline int fw_initializing(struct mlx5_core_dev *dev) |
1109 | { | 1123 | { |
@@ -1225,6 +1239,31 @@ static inline bool mlx5_rl_is_supported(struct mlx5_core_dev *dev) | |||
1225 | return !!(dev->priv.rl_table.max_size); | 1239 | return !!(dev->priv.rl_table.max_size); |
1226 | } | 1240 | } |
1227 | 1241 | ||
1242 | static inline int mlx5_core_is_mp_slave(struct mlx5_core_dev *dev) | ||
1243 | { | ||
1244 | return MLX5_CAP_GEN(dev, affiliate_nic_vport_criteria) && | ||
1245 | MLX5_CAP_GEN(dev, num_vhca_ports) <= 1; | ||
1246 | } | ||
1247 | |||
1248 | static inline int mlx5_core_is_mp_master(struct mlx5_core_dev *dev) | ||
1249 | { | ||
1250 | return MLX5_CAP_GEN(dev, num_vhca_ports) > 1; | ||
1251 | } | ||
1252 | |||
1253 | static inline int mlx5_core_mp_enabled(struct mlx5_core_dev *dev) | ||
1254 | { | ||
1255 | return mlx5_core_is_mp_slave(dev) || | ||
1256 | mlx5_core_is_mp_master(dev); | ||
1257 | } | ||
1258 | |||
1259 | static inline int mlx5_core_native_port_num(struct mlx5_core_dev *dev) | ||
1260 | { | ||
1261 | if (!mlx5_core_mp_enabled(dev)) | ||
1262 | return 1; | ||
1263 | |||
1264 | return MLX5_CAP_GEN(dev, native_port_num); | ||
1265 | } | ||
1266 | |||
1228 | enum { | 1267 | enum { |
1229 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, | 1268 | MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32, |
1230 | }; | 1269 | }; |
@@ -1238,7 +1277,7 @@ mlx5_get_vector_affinity(struct mlx5_core_dev *dev, int vector) | |||
1238 | int eqn; | 1277 | int eqn; |
1239 | int err; | 1278 | int err; |
1240 | 1279 | ||
1241 | err = mlx5_vector2eqn(dev, vector, &eqn, &irq); | 1280 | err = mlx5_vector2eqn(dev, MLX5_EQ_VEC_COMP_BASE + vector, &eqn, &irq); |
1242 | if (err) | 1281 | if (err) |
1243 | return NULL; | 1282 | return NULL; |
1244 | 1283 | ||
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h index b25e7baa273e..a0b48afcb422 100644 --- a/include/linux/mlx5/fs.h +++ b/include/linux/mlx5/fs.h | |||
@@ -95,6 +95,10 @@ struct mlx5_flow_destination { | |||
95 | struct mlx5_flow_namespace * | 95 | struct mlx5_flow_namespace * |
96 | mlx5_get_flow_namespace(struct mlx5_core_dev *dev, | 96 | mlx5_get_flow_namespace(struct mlx5_core_dev *dev, |
97 | enum mlx5_flow_namespace_type type); | 97 | enum mlx5_flow_namespace_type type); |
98 | struct mlx5_flow_namespace * | ||
99 | mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev, | ||
100 | enum mlx5_flow_namespace_type type, | ||
101 | int vport); | ||
98 | 102 | ||
99 | struct mlx5_flow_table * | 103 | struct mlx5_flow_table * |
100 | mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, | 104 | mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns, |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 1391a82da98e..f4e417686f62 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
@@ -502,7 +502,7 @@ struct mlx5_ifc_ads_bits { | |||
502 | u8 dei_cfi[0x1]; | 502 | u8 dei_cfi[0x1]; |
503 | u8 eth_prio[0x3]; | 503 | u8 eth_prio[0x3]; |
504 | u8 sl[0x4]; | 504 | u8 sl[0x4]; |
505 | u8 port[0x8]; | 505 | u8 vhca_port_num[0x8]; |
506 | u8 rmac_47_32[0x10]; | 506 | u8 rmac_47_32[0x10]; |
507 | 507 | ||
508 | u8 rmac_31_0[0x20]; | 508 | u8 rmac_31_0[0x20]; |
@@ -794,7 +794,10 @@ enum { | |||
794 | }; | 794 | }; |
795 | 795 | ||
796 | struct mlx5_ifc_cmd_hca_cap_bits { | 796 | struct mlx5_ifc_cmd_hca_cap_bits { |
797 | u8 reserved_at_0[0x80]; | 797 | u8 reserved_at_0[0x30]; |
798 | u8 vhca_id[0x10]; | ||
799 | |||
800 | u8 reserved_at_40[0x40]; | ||
798 | 801 | ||
799 | u8 log_max_srq_sz[0x8]; | 802 | u8 log_max_srq_sz[0x8]; |
800 | u8 log_max_qp_sz[0x8]; | 803 | u8 log_max_qp_sz[0x8]; |
@@ -1023,13 +1026,21 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
1023 | u8 reserved_at_3b8[0x3]; | 1026 | u8 reserved_at_3b8[0x3]; |
1024 | u8 log_min_stride_sz_sq[0x5]; | 1027 | u8 log_min_stride_sz_sq[0x5]; |
1025 | 1028 | ||
1026 | u8 reserved_at_3c0[0x1b]; | 1029 | u8 hairpin[0x1]; |
1030 | u8 reserved_at_3c1[0x2]; | ||
1031 | u8 log_max_hairpin_queues[0x5]; | ||
1032 | u8 reserved_at_3c8[0x3]; | ||
1033 | u8 log_max_hairpin_wq_data_sz[0x5]; | ||
1034 | u8 reserved_at_3d0[0x3]; | ||
1035 | u8 log_max_hairpin_num_packets[0x5]; | ||
1036 | u8 reserved_at_3d8[0x3]; | ||
1027 | u8 log_max_wq_sz[0x5]; | 1037 | u8 log_max_wq_sz[0x5]; |
1028 | 1038 | ||
1029 | u8 nic_vport_change_event[0x1]; | 1039 | u8 nic_vport_change_event[0x1]; |
1030 | u8 disable_local_lb_uc[0x1]; | 1040 | u8 disable_local_lb_uc[0x1]; |
1031 | u8 disable_local_lb_mc[0x1]; | 1041 | u8 disable_local_lb_mc[0x1]; |
1032 | u8 reserved_at_3e3[0x8]; | 1042 | u8 log_min_hairpin_wq_data_sz[0x5]; |
1043 | u8 reserved_at_3e8[0x3]; | ||
1033 | u8 log_max_vlan_list[0x5]; | 1044 | u8 log_max_vlan_list[0x5]; |
1034 | u8 reserved_at_3f0[0x3]; | 1045 | u8 reserved_at_3f0[0x3]; |
1035 | u8 log_max_current_mc_list[0x5]; | 1046 | u8 log_max_current_mc_list[0x5]; |
@@ -1067,7 +1078,12 @@ struct mlx5_ifc_cmd_hca_cap_bits { | |||
1067 | u8 reserved_at_5f8[0x3]; | 1078 | u8 reserved_at_5f8[0x3]; |
1068 | u8 log_max_xrq[0x5]; | 1079 | u8 log_max_xrq[0x5]; |
1069 | 1080 | ||
1070 | u8 reserved_at_600[0x200]; | 1081 | u8 affiliate_nic_vport_criteria[0x8]; |
1082 | u8 native_port_num[0x8]; | ||
1083 | u8 num_vhca_ports[0x8]; | ||
1084 | u8 reserved_at_618[0x6]; | ||
1085 | u8 sw_owner_id[0x1]; | ||
1086 | u8 reserved_at_61f[0x1e1]; | ||
1071 | }; | 1087 | }; |
1072 | 1088 | ||
1073 | enum mlx5_flow_destination_type { | 1089 | enum mlx5_flow_destination_type { |
@@ -1163,7 +1179,12 @@ struct mlx5_ifc_wq_bits { | |||
1163 | u8 reserved_at_118[0x3]; | 1179 | u8 reserved_at_118[0x3]; |
1164 | u8 log_wq_sz[0x5]; | 1180 | u8 log_wq_sz[0x5]; |
1165 | 1181 | ||
1166 | u8 reserved_at_120[0x15]; | 1182 | u8 reserved_at_120[0x3]; |
1183 | u8 log_hairpin_num_packets[0x5]; | ||
1184 | u8 reserved_at_128[0x3]; | ||
1185 | u8 log_hairpin_data_sz[0x5]; | ||
1186 | u8 reserved_at_130[0x5]; | ||
1187 | |||
1167 | u8 log_wqe_num_of_strides[0x3]; | 1188 | u8 log_wqe_num_of_strides[0x3]; |
1168 | u8 two_byte_shift_en[0x1]; | 1189 | u8 two_byte_shift_en[0x1]; |
1169 | u8 reserved_at_139[0x4]; | 1190 | u8 reserved_at_139[0x4]; |
@@ -2483,7 +2504,8 @@ struct mlx5_ifc_sqc_bits { | |||
2483 | u8 state[0x4]; | 2504 | u8 state[0x4]; |
2484 | u8 reg_umr[0x1]; | 2505 | u8 reg_umr[0x1]; |
2485 | u8 allow_swp[0x1]; | 2506 | u8 allow_swp[0x1]; |
2486 | u8 reserved_at_e[0x12]; | 2507 | u8 hairpin[0x1]; |
2508 | u8 reserved_at_f[0x11]; | ||
2487 | 2509 | ||
2488 | u8 reserved_at_20[0x8]; | 2510 | u8 reserved_at_20[0x8]; |
2489 | u8 user_index[0x18]; | 2511 | u8 user_index[0x18]; |
@@ -2491,7 +2513,13 @@ struct mlx5_ifc_sqc_bits { | |||
2491 | u8 reserved_at_40[0x8]; | 2513 | u8 reserved_at_40[0x8]; |
2492 | u8 cqn[0x18]; | 2514 | u8 cqn[0x18]; |
2493 | 2515 | ||
2494 | u8 reserved_at_60[0x90]; | 2516 | u8 reserved_at_60[0x8]; |
2517 | u8 hairpin_peer_rq[0x18]; | ||
2518 | |||
2519 | u8 reserved_at_80[0x10]; | ||
2520 | u8 hairpin_peer_vhca[0x10]; | ||
2521 | |||
2522 | u8 reserved_at_a0[0x50]; | ||
2495 | 2523 | ||
2496 | u8 packet_pacing_rate_limit_index[0x10]; | 2524 | u8 packet_pacing_rate_limit_index[0x10]; |
2497 | u8 tis_lst_sz[0x10]; | 2525 | u8 tis_lst_sz[0x10]; |
@@ -2563,7 +2591,8 @@ struct mlx5_ifc_rqc_bits { | |||
2563 | u8 state[0x4]; | 2591 | u8 state[0x4]; |
2564 | u8 reserved_at_c[0x1]; | 2592 | u8 reserved_at_c[0x1]; |
2565 | u8 flush_in_error_en[0x1]; | 2593 | u8 flush_in_error_en[0x1]; |
2566 | u8 reserved_at_e[0x12]; | 2594 | u8 hairpin[0x1]; |
2595 | u8 reserved_at_f[0x11]; | ||
2567 | 2596 | ||
2568 | u8 reserved_at_20[0x8]; | 2597 | u8 reserved_at_20[0x8]; |
2569 | u8 user_index[0x18]; | 2598 | u8 user_index[0x18]; |
@@ -2577,7 +2606,13 @@ struct mlx5_ifc_rqc_bits { | |||
2577 | u8 reserved_at_80[0x8]; | 2606 | u8 reserved_at_80[0x8]; |
2578 | u8 rmpn[0x18]; | 2607 | u8 rmpn[0x18]; |
2579 | 2608 | ||
2580 | u8 reserved_at_a0[0xe0]; | 2609 | u8 reserved_at_a0[0x8]; |
2610 | u8 hairpin_peer_sq[0x18]; | ||
2611 | |||
2612 | u8 reserved_at_c0[0x10]; | ||
2613 | u8 hairpin_peer_vhca[0x10]; | ||
2614 | |||
2615 | u8 reserved_at_e0[0xa0]; | ||
2581 | 2616 | ||
2582 | struct mlx5_ifc_wq_bits wq; | 2617 | struct mlx5_ifc_wq_bits wq; |
2583 | }; | 2618 | }; |
@@ -2616,7 +2651,12 @@ struct mlx5_ifc_nic_vport_context_bits { | |||
2616 | u8 event_on_mc_address_change[0x1]; | 2651 | u8 event_on_mc_address_change[0x1]; |
2617 | u8 event_on_uc_address_change[0x1]; | 2652 | u8 event_on_uc_address_change[0x1]; |
2618 | 2653 | ||
2619 | u8 reserved_at_40[0xf0]; | 2654 | u8 reserved_at_40[0xc]; |
2655 | |||
2656 | u8 affiliation_criteria[0x4]; | ||
2657 | u8 affiliated_vhca_id[0x10]; | ||
2658 | |||
2659 | u8 reserved_at_60[0xd0]; | ||
2620 | 2660 | ||
2621 | u8 mtu[0x10]; | 2661 | u8 mtu[0x10]; |
2622 | 2662 | ||
@@ -3259,7 +3299,8 @@ struct mlx5_ifc_set_roce_address_in_bits { | |||
3259 | u8 op_mod[0x10]; | 3299 | u8 op_mod[0x10]; |
3260 | 3300 | ||
3261 | u8 roce_address_index[0x10]; | 3301 | u8 roce_address_index[0x10]; |
3262 | u8 reserved_at_50[0x10]; | 3302 | u8 reserved_at_50[0xc]; |
3303 | u8 vhca_port_num[0x4]; | ||
3263 | 3304 | ||
3264 | u8 reserved_at_60[0x20]; | 3305 | u8 reserved_at_60[0x20]; |
3265 | 3306 | ||
@@ -3879,7 +3920,8 @@ struct mlx5_ifc_query_roce_address_in_bits { | |||
3879 | u8 op_mod[0x10]; | 3920 | u8 op_mod[0x10]; |
3880 | 3921 | ||
3881 | u8 roce_address_index[0x10]; | 3922 | u8 roce_address_index[0x10]; |
3882 | u8 reserved_at_50[0x10]; | 3923 | u8 reserved_at_50[0xc]; |
3924 | u8 vhca_port_num[0x4]; | ||
3883 | 3925 | ||
3884 | u8 reserved_at_60[0x20]; | 3926 | u8 reserved_at_60[0x20]; |
3885 | }; | 3927 | }; |
@@ -5311,7 +5353,9 @@ struct mlx5_ifc_modify_nic_vport_context_out_bits { | |||
5311 | }; | 5353 | }; |
5312 | 5354 | ||
5313 | struct mlx5_ifc_modify_nic_vport_field_select_bits { | 5355 | struct mlx5_ifc_modify_nic_vport_field_select_bits { |
5314 | u8 reserved_at_0[0x14]; | 5356 | u8 reserved_at_0[0x12]; |
5357 | u8 affiliation[0x1]; | ||
5358 | u8 reserved_at_e[0x1]; | ||
5315 | u8 disable_uc_local_lb[0x1]; | 5359 | u8 disable_uc_local_lb[0x1]; |
5316 | u8 disable_mc_local_lb[0x1]; | 5360 | u8 disable_mc_local_lb[0x1]; |
5317 | u8 node_guid[0x1]; | 5361 | u8 node_guid[0x1]; |
@@ -5532,6 +5576,7 @@ struct mlx5_ifc_init_hca_in_bits { | |||
5532 | u8 op_mod[0x10]; | 5576 | u8 op_mod[0x10]; |
5533 | 5577 | ||
5534 | u8 reserved_at_40[0x40]; | 5578 | u8 reserved_at_40[0x40]; |
5579 | u8 sw_owner_id[4][0x20]; | ||
5535 | }; | 5580 | }; |
5536 | 5581 | ||
5537 | struct mlx5_ifc_init2rtr_qp_out_bits { | 5582 | struct mlx5_ifc_init2rtr_qp_out_bits { |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index 62af7512dabb..4778d41085d4 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
@@ -473,6 +473,11 @@ struct mlx5_core_qp { | |||
473 | int pid; | 473 | int pid; |
474 | }; | 474 | }; |
475 | 475 | ||
476 | struct mlx5_core_dct { | ||
477 | struct mlx5_core_qp mqp; | ||
478 | struct completion drained; | ||
479 | }; | ||
480 | |||
476 | struct mlx5_qp_path { | 481 | struct mlx5_qp_path { |
477 | u8 fl_free_ar; | 482 | u8 fl_free_ar; |
478 | u8 rsvd3; | 483 | u8 rsvd3; |
@@ -549,6 +554,9 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, | |||
549 | return radix_tree_lookup(&dev->priv.mkey_table.tree, key); | 554 | return radix_tree_lookup(&dev->priv.mkey_table.tree, key); |
550 | } | 555 | } |
551 | 556 | ||
557 | int mlx5_core_create_dct(struct mlx5_core_dev *dev, | ||
558 | struct mlx5_core_dct *qp, | ||
559 | u32 *in, int inlen); | ||
552 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 560 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
553 | struct mlx5_core_qp *qp, | 561 | struct mlx5_core_qp *qp, |
554 | u32 *in, | 562 | u32 *in, |
@@ -558,8 +566,12 @@ int mlx5_core_qp_modify(struct mlx5_core_dev *dev, u16 opcode, | |||
558 | struct mlx5_core_qp *qp); | 566 | struct mlx5_core_qp *qp); |
559 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, | 567 | int mlx5_core_destroy_qp(struct mlx5_core_dev *dev, |
560 | struct mlx5_core_qp *qp); | 568 | struct mlx5_core_qp *qp); |
569 | int mlx5_core_destroy_dct(struct mlx5_core_dev *dev, | ||
570 | struct mlx5_core_dct *dct); | ||
561 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, | 571 | int mlx5_core_qp_query(struct mlx5_core_dev *dev, struct mlx5_core_qp *qp, |
562 | u32 *out, int outlen); | 572 | u32 *out, int outlen); |
573 | int mlx5_core_dct_query(struct mlx5_core_dev *dev, struct mlx5_core_dct *dct, | ||
574 | u32 *out, int outlen); | ||
563 | 575 | ||
564 | int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, | 576 | int mlx5_core_set_delay_drop(struct mlx5_core_dev *dev, |
565 | u32 timeout_usec); | 577 | u32 timeout_usec); |
diff --git a/include/linux/mlx5/transobj.h b/include/linux/mlx5/transobj.h index 88441f5ece25..7e8f281f8c00 100644 --- a/include/linux/mlx5/transobj.h +++ b/include/linux/mlx5/transobj.h | |||
@@ -75,4 +75,27 @@ int mlx5_core_modify_rqt(struct mlx5_core_dev *dev, u32 rqtn, u32 *in, | |||
75 | int inlen); | 75 | int inlen); |
76 | void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); | 76 | void mlx5_core_destroy_rqt(struct mlx5_core_dev *dev, u32 rqtn); |
77 | 77 | ||
78 | struct mlx5_hairpin_params { | ||
79 | u8 log_data_size; | ||
80 | u8 log_num_packets; | ||
81 | u16 q_counter; | ||
82 | int num_channels; | ||
83 | }; | ||
84 | |||
85 | struct mlx5_hairpin { | ||
86 | struct mlx5_core_dev *func_mdev; | ||
87 | struct mlx5_core_dev *peer_mdev; | ||
88 | |||
89 | int num_channels; | ||
90 | |||
91 | u32 *rqn; | ||
92 | u32 *sqn; | ||
93 | }; | ||
94 | |||
95 | struct mlx5_hairpin * | ||
96 | mlx5_core_hairpin_create(struct mlx5_core_dev *func_mdev, | ||
97 | struct mlx5_core_dev *peer_mdev, | ||
98 | struct mlx5_hairpin_params *params); | ||
99 | |||
100 | void mlx5_core_hairpin_destroy(struct mlx5_hairpin *pair); | ||
78 | #endif /* __TRANSOBJ_H__ */ | 101 | #endif /* __TRANSOBJ_H__ */ |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index aaa0bb9e7655..64e193e87394 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
@@ -116,4 +116,8 @@ int mlx5_core_modify_hca_vport_context(struct mlx5_core_dev *dev, | |||
116 | struct mlx5_hca_vport_context *req); | 116 | struct mlx5_hca_vport_context *req); |
117 | int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); | 117 | int mlx5_nic_vport_update_local_lb(struct mlx5_core_dev *mdev, bool enable); |
118 | int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); | 118 | int mlx5_nic_vport_query_local_lb(struct mlx5_core_dev *mdev, bool *status); |
119 | |||
120 | int mlx5_nic_vport_affiliate_multiport(struct mlx5_core_dev *master_mdev, | ||
121 | struct mlx5_core_dev *port_mdev); | ||
122 | int mlx5_nic_vport_unaffiliate_multiport(struct mlx5_core_dev *port_mdev); | ||
119 | #endif /* __MLX5_VPORT_H__ */ | 123 | #endif /* __MLX5_VPORT_H__ */ |
diff --git a/include/linux/mm.h b/include/linux/mm.h index ea818ff739cd..ad06d42adb1a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1312,8 +1312,6 @@ void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, | |||
1312 | unsigned long end, unsigned long floor, unsigned long ceiling); | 1312 | unsigned long end, unsigned long floor, unsigned long ceiling); |
1313 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, | 1313 | int copy_page_range(struct mm_struct *dst, struct mm_struct *src, |
1314 | struct vm_area_struct *vma); | 1314 | struct vm_area_struct *vma); |
1315 | void unmap_mapping_range(struct address_space *mapping, | ||
1316 | loff_t const holebegin, loff_t const holelen, int even_cows); | ||
1317 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, | 1315 | int follow_pte_pmd(struct mm_struct *mm, unsigned long address, |
1318 | unsigned long *start, unsigned long *end, | 1316 | unsigned long *start, unsigned long *end, |
1319 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); | 1317 | pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); |
@@ -1324,12 +1322,6 @@ int follow_phys(struct vm_area_struct *vma, unsigned long address, | |||
1324 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, | 1322 | int generic_access_phys(struct vm_area_struct *vma, unsigned long addr, |
1325 | void *buf, int len, int write); | 1323 | void *buf, int len, int write); |
1326 | 1324 | ||
1327 | static inline void unmap_shared_mapping_range(struct address_space *mapping, | ||
1328 | loff_t const holebegin, loff_t const holelen) | ||
1329 | { | ||
1330 | unmap_mapping_range(mapping, holebegin, holelen, 0); | ||
1331 | } | ||
1332 | |||
1333 | extern void truncate_pagecache(struct inode *inode, loff_t new); | 1325 | extern void truncate_pagecache(struct inode *inode, loff_t new); |
1334 | extern void truncate_setsize(struct inode *inode, loff_t newsize); | 1326 | extern void truncate_setsize(struct inode *inode, loff_t newsize); |
1335 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); | 1327 | void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to); |
@@ -1344,6 +1336,10 @@ extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
1344 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, | 1336 | extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, |
1345 | unsigned long address, unsigned int fault_flags, | 1337 | unsigned long address, unsigned int fault_flags, |
1346 | bool *unlocked); | 1338 | bool *unlocked); |
1339 | void unmap_mapping_pages(struct address_space *mapping, | ||
1340 | pgoff_t start, pgoff_t nr, bool even_cows); | ||
1341 | void unmap_mapping_range(struct address_space *mapping, | ||
1342 | loff_t const holebegin, loff_t const holelen, int even_cows); | ||
1347 | #else | 1343 | #else |
1348 | static inline int handle_mm_fault(struct vm_area_struct *vma, | 1344 | static inline int handle_mm_fault(struct vm_area_struct *vma, |
1349 | unsigned long address, unsigned int flags) | 1345 | unsigned long address, unsigned int flags) |
@@ -1360,10 +1356,20 @@ static inline int fixup_user_fault(struct task_struct *tsk, | |||
1360 | BUG(); | 1356 | BUG(); |
1361 | return -EFAULT; | 1357 | return -EFAULT; |
1362 | } | 1358 | } |
1359 | static inline void unmap_mapping_pages(struct address_space *mapping, | ||
1360 | pgoff_t start, pgoff_t nr, bool even_cows) { } | ||
1361 | static inline void unmap_mapping_range(struct address_space *mapping, | ||
1362 | loff_t const holebegin, loff_t const holelen, int even_cows) { } | ||
1363 | #endif | 1363 | #endif |
1364 | 1364 | ||
1365 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, | 1365 | static inline void unmap_shared_mapping_range(struct address_space *mapping, |
1366 | unsigned int gup_flags); | 1366 | loff_t const holebegin, loff_t const holelen) |
1367 | { | ||
1368 | unmap_mapping_range(mapping, holebegin, holelen, 0); | ||
1369 | } | ||
1370 | |||
1371 | extern int access_process_vm(struct task_struct *tsk, unsigned long addr, | ||
1372 | void *buf, int len, unsigned int gup_flags); | ||
1367 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, | 1373 | extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, |
1368 | void *buf, int len, unsigned int gup_flags); | 1374 | void *buf, int len, unsigned int gup_flags); |
1369 | extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, | 1375 | extern int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, |
@@ -2069,8 +2075,8 @@ static inline void zero_resv_unavail(void) {} | |||
2069 | #endif | 2075 | #endif |
2070 | 2076 | ||
2071 | extern void set_dma_reserve(unsigned long new_dma_reserve); | 2077 | extern void set_dma_reserve(unsigned long new_dma_reserve); |
2072 | extern void memmap_init_zone(unsigned long, int, unsigned long, | 2078 | extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, |
2073 | unsigned long, enum memmap_context); | 2079 | enum memmap_context, struct vmem_altmap *); |
2074 | extern void setup_per_zone_wmarks(void); | 2080 | extern void setup_per_zone_wmarks(void); |
2075 | extern int __meminit init_per_zone_wmark_min(void); | 2081 | extern int __meminit init_per_zone_wmark_min(void); |
2076 | extern void mem_init(void); | 2082 | extern void mem_init(void); |
@@ -2538,7 +2544,8 @@ void sparse_mem_maps_populate_node(struct page **map_map, | |||
2538 | unsigned long map_count, | 2544 | unsigned long map_count, |
2539 | int nodeid); | 2545 | int nodeid); |
2540 | 2546 | ||
2541 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid); | 2547 | struct page *sparse_mem_map_populate(unsigned long pnum, int nid, |
2548 | struct vmem_altmap *altmap); | ||
2542 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); | 2549 | pgd_t *vmemmap_pgd_populate(unsigned long addr, int node); |
2543 | p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); | 2550 | p4d_t *vmemmap_p4d_populate(pgd_t *pgd, unsigned long addr, int node); |
2544 | pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); | 2551 | pud_t *vmemmap_pud_populate(p4d_t *p4d, unsigned long addr, int node); |
@@ -2546,20 +2553,17 @@ pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node); | |||
2546 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); | 2553 | pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node); |
2547 | void *vmemmap_alloc_block(unsigned long size, int node); | 2554 | void *vmemmap_alloc_block(unsigned long size, int node); |
2548 | struct vmem_altmap; | 2555 | struct vmem_altmap; |
2549 | void *__vmemmap_alloc_block_buf(unsigned long size, int node, | 2556 | void *vmemmap_alloc_block_buf(unsigned long size, int node); |
2550 | struct vmem_altmap *altmap); | 2557 | void *altmap_alloc_block_buf(unsigned long size, struct vmem_altmap *altmap); |
2551 | static inline void *vmemmap_alloc_block_buf(unsigned long size, int node) | ||
2552 | { | ||
2553 | return __vmemmap_alloc_block_buf(size, node, NULL); | ||
2554 | } | ||
2555 | |||
2556 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); | 2558 | void vmemmap_verify(pte_t *, int, unsigned long, unsigned long); |
2557 | int vmemmap_populate_basepages(unsigned long start, unsigned long end, | 2559 | int vmemmap_populate_basepages(unsigned long start, unsigned long end, |
2558 | int node); | 2560 | int node); |
2559 | int vmemmap_populate(unsigned long start, unsigned long end, int node); | 2561 | int vmemmap_populate(unsigned long start, unsigned long end, int node, |
2562 | struct vmem_altmap *altmap); | ||
2560 | void vmemmap_populate_print_last(void); | 2563 | void vmemmap_populate_print_last(void); |
2561 | #ifdef CONFIG_MEMORY_HOTPLUG | 2564 | #ifdef CONFIG_MEMORY_HOTPLUG |
2562 | void vmemmap_free(unsigned long start, unsigned long end); | 2565 | void vmemmap_free(unsigned long start, unsigned long end, |
2566 | struct vmem_altmap *altmap); | ||
2563 | #endif | 2567 | #endif |
2564 | void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, | 2568 | void register_page_bootmem_memmap(unsigned long section_nr, struct page *map, |
2565 | unsigned long nr_pages); | 2569 | unsigned long nr_pages); |
@@ -2570,8 +2574,8 @@ enum mf_flags { | |||
2570 | MF_MUST_KILL = 1 << 2, | 2574 | MF_MUST_KILL = 1 << 2, |
2571 | MF_SOFT_OFFLINE = 1 << 3, | 2575 | MF_SOFT_OFFLINE = 1 << 3, |
2572 | }; | 2576 | }; |
2573 | extern int memory_failure(unsigned long pfn, int trapno, int flags); | 2577 | extern int memory_failure(unsigned long pfn, int flags); |
2574 | extern void memory_failure_queue(unsigned long pfn, int trapno, int flags); | 2578 | extern void memory_failure_queue(unsigned long pfn, int flags); |
2575 | extern int unpoison_memory(unsigned long pfn); | 2579 | extern int unpoison_memory(unsigned long pfn); |
2576 | extern int get_hwpoison_page(struct page *page); | 2580 | extern int get_hwpoison_page(struct page *page); |
2577 | #define put_hwpoison_page(page) put_page(page) | 2581 | #define put_hwpoison_page(page) put_page(page) |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index cfd0ac4e5e0e..fd1af6b9591d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -31,28 +31,56 @@ struct hmm; | |||
31 | * it to keep track of whatever it is we are using the page for at the | 31 | * it to keep track of whatever it is we are using the page for at the |
32 | * moment. Note that we have no way to track which tasks are using | 32 | * moment. Note that we have no way to track which tasks are using |
33 | * a page, though if it is a pagecache page, rmap structures can tell us | 33 | * a page, though if it is a pagecache page, rmap structures can tell us |
34 | * who is mapping it. | 34 | * who is mapping it. If you allocate the page using alloc_pages(), you |
35 | * can use some of the space in struct page for your own purposes. | ||
35 | * | 36 | * |
36 | * The objects in struct page are organized in double word blocks in | 37 | * Pages that were once in the page cache may be found under the RCU lock |
37 | * order to allows us to use atomic double word operations on portions | 38 | * even after they have been recycled to a different purpose. The page |
38 | * of struct page. That is currently only used by slub but the arrangement | 39 | * cache reads and writes some of the fields in struct page to pin the |
39 | * allows the use of atomic double word operations on the flags/mapping | 40 | * page before checking that it's still in the page cache. It is vital |
40 | * and lru list pointers also. | 41 | * that all users of struct page: |
42 | * 1. Use the first word as PageFlags. | ||
43 | * 2. Clear or preserve bit 0 of page->compound_head. It is used as | ||
44 | * PageTail for compound pages, and the page cache must not see false | ||
45 | * positives. Some users put a pointer here (guaranteed to be at least | ||
46 | * 4-byte aligned), other users avoid using the field altogether. | ||
47 | * 3. page->_refcount must either not be used, or must be used in such a | ||
48 | * way that other CPUs temporarily incrementing and then decrementing the | ||
49 | * refcount does not cause problems. On receiving the page from | ||
50 | * alloc_pages(), the refcount will be positive. | ||
51 | * 4. Either preserve page->_mapcount or restore it to -1 before freeing it. | ||
52 | * | ||
53 | * If you allocate pages of order > 0, you can use the fields in the struct | ||
54 | * page associated with each page, but bear in mind that the pages may have | ||
55 | * been inserted individually into the page cache, so you must use the above | ||
56 | * four fields in a compatible way for each struct page. | ||
57 | * | ||
58 | * SLUB uses cmpxchg_double() to atomically update its freelist and | ||
59 | * counters. That requires that freelist & counters be adjacent and | ||
60 | * double-word aligned. We align all struct pages to double-word | ||
61 | * boundaries, and ensure that 'freelist' is aligned within the | ||
62 | * struct. | ||
41 | */ | 63 | */ |
64 | #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE | ||
65 | #define _struct_page_alignment __aligned(2 * sizeof(unsigned long)) | ||
66 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) | ||
67 | #define _slub_counter_t unsigned long | ||
68 | #else | ||
69 | #define _slub_counter_t unsigned int | ||
70 | #endif | ||
71 | #else /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ | ||
72 | #define _struct_page_alignment | ||
73 | #define _slub_counter_t unsigned int | ||
74 | #endif /* !CONFIG_HAVE_ALIGNED_STRUCT_PAGE */ | ||
75 | |||
42 | struct page { | 76 | struct page { |
43 | /* First double word block */ | 77 | /* First double word block */ |
44 | unsigned long flags; /* Atomic flags, some possibly | 78 | unsigned long flags; /* Atomic flags, some possibly |
45 | * updated asynchronously */ | 79 | * updated asynchronously */ |
46 | union { | 80 | union { |
47 | struct address_space *mapping; /* If low bit clear, points to | 81 | /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */ |
48 | * inode address_space, or NULL. | 82 | struct address_space *mapping; |
49 | * If page mapped as anonymous | 83 | |
50 | * memory, low bit is set, and | ||
51 | * it points to anon_vma object | ||
52 | * or KSM private structure. See | ||
53 | * PAGE_MAPPING_ANON and | ||
54 | * PAGE_MAPPING_KSM. | ||
55 | */ | ||
56 | void *s_mem; /* slab first object */ | 84 | void *s_mem; /* slab first object */ |
57 | atomic_t compound_mapcount; /* first tail page */ | 85 | atomic_t compound_mapcount; /* first tail page */ |
58 | /* page_deferred_list().next -- second tail page */ | 86 | /* page_deferred_list().next -- second tail page */ |
@@ -66,40 +94,27 @@ struct page { | |||
66 | }; | 94 | }; |
67 | 95 | ||
68 | union { | 96 | union { |
69 | #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \ | 97 | _slub_counter_t counters; |
70 | defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | 98 | unsigned int active; /* SLAB */ |
71 | /* Used for cmpxchg_double in slub */ | 99 | struct { /* SLUB */ |
72 | unsigned long counters; | 100 | unsigned inuse:16; |
73 | #else | 101 | unsigned objects:15; |
74 | /* | 102 | unsigned frozen:1; |
75 | * Keep _refcount separate from slub cmpxchg_double data. | 103 | }; |
76 | * As the rest of the double word is protected by slab_lock | 104 | int units; /* SLOB */ |
77 | * but _refcount is not. | 105 | |
78 | */ | 106 | struct { /* Page cache */ |
79 | unsigned counters; | 107 | /* |
80 | #endif | 108 | * Count of ptes mapped in mms, to show when |
81 | struct { | 109 | * page is mapped & limit reverse map searches. |
110 | * | ||
111 | * Extra information about page type may be | ||
112 | * stored here for pages that are never mapped, | ||
113 | * in which case the value MUST BE <= -2. | ||
114 | * See page-flags.h for more details. | ||
115 | */ | ||
116 | atomic_t _mapcount; | ||
82 | 117 | ||
83 | union { | ||
84 | /* | ||
85 | * Count of ptes mapped in mms, to show when | ||
86 | * page is mapped & limit reverse map searches. | ||
87 | * | ||
88 | * Extra information about page type may be | ||
89 | * stored here for pages that are never mapped, | ||
90 | * in which case the value MUST BE <= -2. | ||
91 | * See page-flags.h for more details. | ||
92 | */ | ||
93 | atomic_t _mapcount; | ||
94 | |||
95 | unsigned int active; /* SLAB */ | ||
96 | struct { /* SLUB */ | ||
97 | unsigned inuse:16; | ||
98 | unsigned objects:15; | ||
99 | unsigned frozen:1; | ||
100 | }; | ||
101 | int units; /* SLOB */ | ||
102 | }; | ||
103 | /* | 118 | /* |
104 | * Usage count, *USE WRAPPER FUNCTION* when manual | 119 | * Usage count, *USE WRAPPER FUNCTION* when manual |
105 | * accounting. See page_ref.h | 120 | * accounting. See page_ref.h |
@@ -109,8 +124,6 @@ struct page { | |||
109 | }; | 124 | }; |
110 | 125 | ||
111 | /* | 126 | /* |
112 | * Third double word block | ||
113 | * | ||
114 | * WARNING: bit 0 of the first word encode PageTail(). That means | 127 | * WARNING: bit 0 of the first word encode PageTail(). That means |
115 | * the rest users of the storage space MUST NOT use the bit to | 128 | * the rest users of the storage space MUST NOT use the bit to |
116 | * avoid collision and false-positive PageTail(). | 129 | * avoid collision and false-positive PageTail(). |
@@ -145,19 +158,9 @@ struct page { | |||
145 | unsigned long compound_head; /* If bit zero is set */ | 158 | unsigned long compound_head; /* If bit zero is set */ |
146 | 159 | ||
147 | /* First tail page only */ | 160 | /* First tail page only */ |
148 | #ifdef CONFIG_64BIT | 161 | unsigned char compound_dtor; |
149 | /* | 162 | unsigned char compound_order; |
150 | * On 64 bit system we have enough space in struct page | 163 | /* two/six bytes available here */ |
151 | * to encode compound_dtor and compound_order with | ||
152 | * unsigned int. It can help compiler generate better or | ||
153 | * smaller code on some archtectures. | ||
154 | */ | ||
155 | unsigned int compound_dtor; | ||
156 | unsigned int compound_order; | ||
157 | #else | ||
158 | unsigned short int compound_dtor; | ||
159 | unsigned short int compound_order; | ||
160 | #endif | ||
161 | }; | 164 | }; |
162 | 165 | ||
163 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS | 166 | #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && USE_SPLIT_PMD_PTLOCKS |
@@ -171,15 +174,14 @@ struct page { | |||
171 | #endif | 174 | #endif |
172 | }; | 175 | }; |
173 | 176 | ||
174 | /* Remainder is not double word aligned */ | ||
175 | union { | 177 | union { |
176 | unsigned long private; /* Mapping-private opaque data: | 178 | /* |
177 | * usually used for buffer_heads | 179 | * Mapping-private opaque data: |
178 | * if PagePrivate set; used for | 180 | * Usually used for buffer_heads if PagePrivate |
179 | * swp_entry_t if PageSwapCache; | 181 | * Used for swp_entry_t if PageSwapCache |
180 | * indicates order in the buddy | 182 | * Indicates order in the buddy system if PageBuddy |
181 | * system if PG_buddy is set. | 183 | */ |
182 | */ | 184 | unsigned long private; |
183 | #if USE_SPLIT_PTE_PTLOCKS | 185 | #if USE_SPLIT_PTE_PTLOCKS |
184 | #if ALLOC_SPLIT_PTLOCKS | 186 | #if ALLOC_SPLIT_PTLOCKS |
185 | spinlock_t *ptl; | 187 | spinlock_t *ptl; |
@@ -212,15 +214,7 @@ struct page { | |||
212 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS | 214 | #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS |
213 | int _last_cpupid; | 215 | int _last_cpupid; |
214 | #endif | 216 | #endif |
215 | } | 217 | } _struct_page_alignment; |
216 | /* | ||
217 | * The struct page can be forced to be double word aligned so that atomic ops | ||
218 | * on double words work. The SLUB allocator can make use of such a feature. | ||
219 | */ | ||
220 | #ifdef CONFIG_HAVE_ALIGNED_STRUCT_PAGE | ||
221 | __aligned(2 * sizeof(unsigned long)) | ||
222 | #endif | ||
223 | ; | ||
224 | 218 | ||
225 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) | 219 | #define PAGE_FRAG_CACHE_MAX_SIZE __ALIGN_MASK(32768, ~PAGE_MASK) |
226 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) | 220 | #define PAGE_FRAG_CACHE_MAX_ORDER get_order(PAGE_FRAG_CACHE_MAX_SIZE) |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index e7743eca1021..85146235231e 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
@@ -324,6 +324,7 @@ struct mmc_host { | |||
324 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ | 324 | #define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */ |
325 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ | 325 | #define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */ |
326 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ | 326 | #define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */ |
327 | #define MMC_CAP_DONE_COMPLETE (1 << 27) /* RW reqs can be completed within mmc_request_done() */ | ||
327 | #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ | 328 | #define MMC_CAP_CD_WAKE (1 << 28) /* Enable card detect wake */ |
328 | #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ | 329 | #define MMC_CAP_CMD_DURING_TFR (1 << 29) /* Commands during data transfer */ |
329 | #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ | 330 | #define MMC_CAP_CMD23 (1 << 30) /* CMD23 supported. */ |
@@ -380,6 +381,7 @@ struct mmc_host { | |||
380 | unsigned int doing_retune:1; /* re-tuning in progress */ | 381 | unsigned int doing_retune:1; /* re-tuning in progress */ |
381 | unsigned int retune_now:1; /* do re-tuning at next req */ | 382 | unsigned int retune_now:1; /* do re-tuning at next req */ |
382 | unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ | 383 | unsigned int retune_paused:1; /* re-tuning is temporarily disabled */ |
384 | unsigned int use_blk_mq:1; /* use blk-mq */ | ||
383 | 385 | ||
384 | int rescan_disable; /* disable card detection */ | 386 | int rescan_disable; /* disable card detection */ |
385 | int rescan_entered; /* used with nonremovable devices */ | 387 | int rescan_entered; /* used with nonremovable devices */ |
@@ -422,9 +424,6 @@ struct mmc_host { | |||
422 | 424 | ||
423 | struct dentry *debugfs_root; | 425 | struct dentry *debugfs_root; |
424 | 426 | ||
425 | struct mmc_async_req *areq; /* active async req */ | ||
426 | struct mmc_context_info context_info; /* async synchronization info */ | ||
427 | |||
428 | /* Ongoing data transfer that allows commands during transfer */ | 427 | /* Ongoing data transfer that allows commands during transfer */ |
429 | struct mmc_request *ongoing_mrq; | 428 | struct mmc_request *ongoing_mrq; |
430 | 429 | ||
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index 82f0d289f110..91f1ba0663c8 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h | |||
@@ -33,5 +33,6 @@ void mmc_gpio_set_cd_isr(struct mmc_host *host, | |||
33 | irqreturn_t (*isr)(int irq, void *dev_id)); | 33 | irqreturn_t (*isr)(int irq, void *dev_id)); |
34 | void mmc_gpiod_request_cd_irq(struct mmc_host *host); | 34 | void mmc_gpiod_request_cd_irq(struct mmc_host *host); |
35 | bool mmc_can_gpio_cd(struct mmc_host *host); | 35 | bool mmc_can_gpio_cd(struct mmc_host *host); |
36 | bool mmc_can_gpio_ro(struct mmc_host *host); | ||
36 | 37 | ||
37 | #endif | 38 | #endif |
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h index b25dc9db19fc..2d07a1ed5a31 100644 --- a/include/linux/mmu_notifier.h +++ b/include/linux/mmu_notifier.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #ifndef _LINUX_MMU_NOTIFIER_H | 2 | #ifndef _LINUX_MMU_NOTIFIER_H |
3 | #define _LINUX_MMU_NOTIFIER_H | 3 | #define _LINUX_MMU_NOTIFIER_H |
4 | 4 | ||
5 | #include <linux/types.h> | ||
5 | #include <linux/list.h> | 6 | #include <linux/list.h> |
6 | #include <linux/spinlock.h> | 7 | #include <linux/spinlock.h> |
7 | #include <linux/mm_types.h> | 8 | #include <linux/mm_types.h> |
@@ -10,6 +11,9 @@ | |||
10 | struct mmu_notifier; | 11 | struct mmu_notifier; |
11 | struct mmu_notifier_ops; | 12 | struct mmu_notifier_ops; |
12 | 13 | ||
14 | /* mmu_notifier_ops flags */ | ||
15 | #define MMU_INVALIDATE_DOES_NOT_BLOCK (0x01) | ||
16 | |||
13 | #ifdef CONFIG_MMU_NOTIFIER | 17 | #ifdef CONFIG_MMU_NOTIFIER |
14 | 18 | ||
15 | /* | 19 | /* |
@@ -27,6 +31,15 @@ struct mmu_notifier_mm { | |||
27 | 31 | ||
28 | struct mmu_notifier_ops { | 32 | struct mmu_notifier_ops { |
29 | /* | 33 | /* |
34 | * Flags to specify behavior of callbacks for this MMU notifier. | ||
35 | * Used to determine which context an operation may be called. | ||
36 | * | ||
37 | * MMU_INVALIDATE_DOES_NOT_BLOCK: invalidate_range_* callbacks do not | ||
38 | * block | ||
39 | */ | ||
40 | int flags; | ||
41 | |||
42 | /* | ||
30 | * Called either by mmu_notifier_unregister or when the mm is | 43 | * Called either by mmu_notifier_unregister or when the mm is |
31 | * being destroyed by exit_mmap, always before all pages are | 44 | * being destroyed by exit_mmap, always before all pages are |
32 | * freed. This can run concurrently with other mmu notifier | 45 | * freed. This can run concurrently with other mmu notifier |
@@ -137,6 +150,10 @@ struct mmu_notifier_ops { | |||
137 | * page. Pages will no longer be referenced by the linux | 150 | * page. Pages will no longer be referenced by the linux |
138 | * address space but may still be referenced by sptes until | 151 | * address space but may still be referenced by sptes until |
139 | * the last refcount is dropped. | 152 | * the last refcount is dropped. |
153 | * | ||
154 | * If both of these callbacks cannot block, and invalidate_range | ||
155 | * cannot block, mmu_notifier_ops.flags should have | ||
156 | * MMU_INVALIDATE_DOES_NOT_BLOCK set. | ||
140 | */ | 157 | */ |
141 | void (*invalidate_range_start)(struct mmu_notifier *mn, | 158 | void (*invalidate_range_start)(struct mmu_notifier *mn, |
142 | struct mm_struct *mm, | 159 | struct mm_struct *mm, |
@@ -159,12 +176,13 @@ struct mmu_notifier_ops { | |||
159 | * external TLB range needs to be flushed. For more in depth | 176 | * external TLB range needs to be flushed. For more in depth |
160 | * discussion on this see Documentation/vm/mmu_notifier.txt | 177 | * discussion on this see Documentation/vm/mmu_notifier.txt |
161 | * | 178 | * |
162 | * The invalidate_range() function is called under the ptl | ||
163 | * spin-lock and not allowed to sleep. | ||
164 | * | ||
165 | * Note that this function might be called with just a sub-range | 179 | * Note that this function might be called with just a sub-range |
166 | * of what was passed to invalidate_range_start()/end(), if | 180 | * of what was passed to invalidate_range_start()/end(), if |
167 | * called between those functions. | 181 | * called between those functions. |
182 | * | ||
183 | * If this callback cannot block, and invalidate_range_{start,end} | ||
184 | * cannot block, mmu_notifier_ops.flags should have | ||
185 | * MMU_INVALIDATE_DOES_NOT_BLOCK set. | ||
168 | */ | 186 | */ |
169 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, | 187 | void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm, |
170 | unsigned long start, unsigned long end); | 188 | unsigned long start, unsigned long end); |
@@ -218,6 +236,7 @@ extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm, | |||
218 | bool only_end); | 236 | bool only_end); |
219 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, | 237 | extern void __mmu_notifier_invalidate_range(struct mm_struct *mm, |
220 | unsigned long start, unsigned long end); | 238 | unsigned long start, unsigned long end); |
239 | extern bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm); | ||
221 | 240 | ||
222 | static inline void mmu_notifier_release(struct mm_struct *mm) | 241 | static inline void mmu_notifier_release(struct mm_struct *mm) |
223 | { | 242 | { |
@@ -457,6 +476,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm, | |||
457 | { | 476 | { |
458 | } | 477 | } |
459 | 478 | ||
479 | static inline bool mm_has_blockable_invalidate_notifiers(struct mm_struct *mm) | ||
480 | { | ||
481 | return false; | ||
482 | } | ||
483 | |||
460 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) | 484 | static inline void mmu_notifier_mm_init(struct mm_struct *mm) |
461 | { | 485 | { |
462 | } | 486 | } |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 67f2e3c38939..7522a6987595 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -1166,8 +1166,16 @@ extern unsigned long usemap_size(void); | |||
1166 | 1166 | ||
1167 | /* | 1167 | /* |
1168 | * We use the lower bits of the mem_map pointer to store | 1168 | * We use the lower bits of the mem_map pointer to store |
1169 | * a little bit of information. There should be at least | 1169 | * a little bit of information. The pointer is calculated |
1170 | * 3 bits here due to 32-bit alignment. | 1170 | * as mem_map - section_nr_to_pfn(pnum). The result is |
1171 | * aligned to the minimum alignment of the two values: | ||
1172 | * 1. All mem_map arrays are page-aligned. | ||
1173 | * 2. section_nr_to_pfn() always clears PFN_SECTION_SHIFT | ||
1174 | * lowest bits. PFN_SECTION_SHIFT is arch-specific | ||
1175 | * (equal SECTION_SIZE_BITS - PAGE_SHIFT), and the | ||
1176 | * worst combination is powerpc with 256k pages, | ||
1177 | * which results in PFN_SECTION_SHIFT equal 6. | ||
1178 | * To sum it up, at least 6 bits are available. | ||
1171 | */ | 1179 | */ |
1172 | #define SECTION_MARKED_PRESENT (1UL<<0) | 1180 | #define SECTION_MARKED_PRESENT (1UL<<0) |
1173 | #define SECTION_HAS_MEM_MAP (1UL<<1) | 1181 | #define SECTION_HAS_MEM_MAP (1UL<<1) |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index abb6dc2ebbf8..48fb2b43c35a 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
@@ -229,6 +229,12 @@ struct hda_device_id { | |||
229 | unsigned long driver_data; | 229 | unsigned long driver_data; |
230 | }; | 230 | }; |
231 | 231 | ||
232 | struct sdw_device_id { | ||
233 | __u16 mfg_id; | ||
234 | __u16 part_id; | ||
235 | kernel_ulong_t driver_data; | ||
236 | }; | ||
237 | |||
232 | /* | 238 | /* |
233 | * Struct used for matching a device | 239 | * Struct used for matching a device |
234 | */ | 240 | */ |
@@ -452,6 +458,19 @@ struct spi_device_id { | |||
452 | kernel_ulong_t driver_data; /* Data private to the driver */ | 458 | kernel_ulong_t driver_data; /* Data private to the driver */ |
453 | }; | 459 | }; |
454 | 460 | ||
461 | /* SLIMbus */ | ||
462 | |||
463 | #define SLIMBUS_NAME_SIZE 32 | ||
464 | #define SLIMBUS_MODULE_PREFIX "slim:" | ||
465 | |||
466 | struct slim_device_id { | ||
467 | __u16 manf_id, prod_code; | ||
468 | __u16 dev_index, instance; | ||
469 | |||
470 | /* Data private to the driver */ | ||
471 | kernel_ulong_t driver_data; | ||
472 | }; | ||
473 | |||
455 | #define SPMI_NAME_SIZE 32 | 474 | #define SPMI_NAME_SIZE 32 |
456 | #define SPMI_MODULE_PREFIX "spmi:" | 475 | #define SPMI_MODULE_PREFIX "spmi:" |
457 | 476 | ||
diff --git a/include/linux/module.h b/include/linux/module.h index c69b49abe877..d44df9b2c131 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/jump_label.h> | 19 | #include <linux/jump_label.h> |
20 | #include <linux/export.h> | 20 | #include <linux/export.h> |
21 | #include <linux/rbtree_latch.h> | 21 | #include <linux/rbtree_latch.h> |
22 | #include <linux/error-injection.h> | ||
22 | 23 | ||
23 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
24 | #include <asm/module.h> | 25 | #include <asm/module.h> |
@@ -475,6 +476,11 @@ struct module { | |||
475 | ctor_fn_t *ctors; | 476 | ctor_fn_t *ctors; |
476 | unsigned int num_ctors; | 477 | unsigned int num_ctors; |
477 | #endif | 478 | #endif |
479 | |||
480 | #ifdef CONFIG_FUNCTION_ERROR_INJECTION | ||
481 | struct error_injection_entry *ei_funcs; | ||
482 | unsigned int num_ei_funcs; | ||
483 | #endif | ||
478 | } ____cacheline_aligned __randomize_layout; | 484 | } ____cacheline_aligned __randomize_layout; |
479 | #ifndef MODULE_ARCH_INIT | 485 | #ifndef MODULE_ARCH_INIT |
480 | #define MODULE_ARCH_INIT {} | 486 | #define MODULE_ARCH_INIT {} |
@@ -485,7 +491,7 @@ extern struct mutex module_mutex; | |||
485 | /* FIXME: It'd be nice to isolate modules during init, too, so they | 491 | /* FIXME: It'd be nice to isolate modules during init, too, so they |
486 | aren't used before they (may) fail. But presently too much code | 492 | aren't used before they (may) fail. But presently too much code |
487 | (IDE & SCSI) require entry into the module during init.*/ | 493 | (IDE & SCSI) require entry into the module during init.*/ |
488 | static inline int module_is_live(struct module *mod) | 494 | static inline bool module_is_live(struct module *mod) |
489 | { | 495 | { |
490 | return mod->state != MODULE_STATE_GOING; | 496 | return mod->state != MODULE_STATE_GOING; |
491 | } | 497 | } |
@@ -606,6 +612,9 @@ int ref_module(struct module *a, struct module *b); | |||
606 | __mod ? __mod->name : "kernel"; \ | 612 | __mod ? __mod->name : "kernel"; \ |
607 | }) | 613 | }) |
608 | 614 | ||
615 | /* Dereference module function descriptor */ | ||
616 | void *dereference_module_function_descriptor(struct module *mod, void *ptr); | ||
617 | |||
609 | /* For kallsyms to ask for address resolution. namebuf should be at | 618 | /* For kallsyms to ask for address resolution. namebuf should be at |
610 | * least KSYM_NAME_LEN long: a pointer to namebuf is returned if | 619 | * least KSYM_NAME_LEN long: a pointer to namebuf is returned if |
611 | * found, otherwise NULL. */ | 620 | * found, otherwise NULL. */ |
@@ -760,6 +769,13 @@ static inline bool is_module_sig_enforced(void) | |||
760 | return false; | 769 | return false; |
761 | } | 770 | } |
762 | 771 | ||
772 | /* Dereference module function descriptor */ | ||
773 | static inline | ||
774 | void *dereference_module_function_descriptor(struct module *mod, void *ptr) | ||
775 | { | ||
776 | return ptr; | ||
777 | } | ||
778 | |||
763 | #endif /* CONFIG_MODULES */ | 779 | #endif /* CONFIG_MODULES */ |
764 | 780 | ||
765 | #ifdef CONFIG_SYSFS | 781 | #ifdef CONFIG_SYSFS |
@@ -801,6 +817,15 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, | |||
801 | static inline void module_bug_cleanup(struct module *mod) {} | 817 | static inline void module_bug_cleanup(struct module *mod) {} |
802 | #endif /* CONFIG_GENERIC_BUG */ | 818 | #endif /* CONFIG_GENERIC_BUG */ |
803 | 819 | ||
820 | #ifdef RETPOLINE | ||
821 | extern bool retpoline_module_ok(bool has_retpoline); | ||
822 | #else | ||
823 | static inline bool retpoline_module_ok(bool has_retpoline) | ||
824 | { | ||
825 | return true; | ||
826 | } | ||
827 | #endif | ||
828 | |||
804 | #ifdef CONFIG_MODULE_SIG | 829 | #ifdef CONFIG_MODULE_SIG |
805 | static inline bool module_sig_ok(struct module *module) | 830 | static inline bool module_sig_ok(struct module *module) |
806 | { | 831 | { |
diff --git a/include/linux/mtd/map.h b/include/linux/mtd/map.h index 3aa56e3104bb..b5b43f94f311 100644 --- a/include/linux/mtd/map.h +++ b/include/linux/mtd/map.h | |||
@@ -270,75 +270,67 @@ void map_destroy(struct mtd_info *mtd); | |||
270 | #define INVALIDATE_CACHED_RANGE(map, from, size) \ | 270 | #define INVALIDATE_CACHED_RANGE(map, from, size) \ |
271 | do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) | 271 | do { if (map->inval_cache) map->inval_cache(map, from, size); } while (0) |
272 | 272 | ||
273 | 273 | #define map_word_equal(map, val1, val2) \ | |
274 | static inline int map_word_equal(struct map_info *map, map_word val1, map_word val2) | 274 | ({ \ |
275 | { | 275 | int i, ret = 1; \ |
276 | int i; | 276 | for (i = 0; i < map_words(map); i++) \ |
277 | 277 | if ((val1).x[i] != (val2).x[i]) { \ | |
278 | for (i = 0; i < map_words(map); i++) { | 278 | ret = 0; \ |
279 | if (val1.x[i] != val2.x[i]) | 279 | break; \ |
280 | return 0; | 280 | } \ |
281 | } | 281 | ret; \ |
282 | 282 | }) | |
283 | return 1; | 283 | |
284 | } | 284 | #define map_word_and(map, val1, val2) \ |
285 | 285 | ({ \ | |
286 | static inline map_word map_word_and(struct map_info *map, map_word val1, map_word val2) | 286 | map_word r; \ |
287 | { | 287 | int i; \ |
288 | map_word r; | 288 | for (i = 0; i < map_words(map); i++) \ |
289 | int i; | 289 | r.x[i] = (val1).x[i] & (val2).x[i]; \ |
290 | 290 | r; \ | |
291 | for (i = 0; i < map_words(map); i++) | 291 | }) |
292 | r.x[i] = val1.x[i] & val2.x[i]; | 292 | |
293 | 293 | #define map_word_clr(map, val1, val2) \ | |
294 | return r; | 294 | ({ \ |
295 | } | 295 | map_word r; \ |
296 | 296 | int i; \ | |
297 | static inline map_word map_word_clr(struct map_info *map, map_word val1, map_word val2) | 297 | for (i = 0; i < map_words(map); i++) \ |
298 | { | 298 | r.x[i] = (val1).x[i] & ~(val2).x[i]; \ |
299 | map_word r; | 299 | r; \ |
300 | int i; | 300 | }) |
301 | 301 | ||
302 | for (i = 0; i < map_words(map); i++) | 302 | #define map_word_or(map, val1, val2) \ |
303 | r.x[i] = val1.x[i] & ~val2.x[i]; | 303 | ({ \ |
304 | 304 | map_word r; \ | |
305 | return r; | 305 | int i; \ |
306 | } | 306 | for (i = 0; i < map_words(map); i++) \ |
307 | 307 | r.x[i] = (val1).x[i] | (val2).x[i]; \ | |
308 | static inline map_word map_word_or(struct map_info *map, map_word val1, map_word val2) | 308 | r; \ |
309 | { | 309 | }) |
310 | map_word r; | 310 | |
311 | int i; | 311 | #define map_word_andequal(map, val1, val2, val3) \ |
312 | 312 | ({ \ | |
313 | for (i = 0; i < map_words(map); i++) | 313 | int i, ret = 1; \ |
314 | r.x[i] = val1.x[i] | val2.x[i]; | 314 | for (i = 0; i < map_words(map); i++) { \ |
315 | 315 | if (((val1).x[i] & (val2).x[i]) != (val2).x[i]) { \ | |
316 | return r; | 316 | ret = 0; \ |
317 | } | 317 | break; \ |
318 | 318 | } \ | |
319 | static inline int map_word_andequal(struct map_info *map, map_word val1, map_word val2, map_word val3) | 319 | } \ |
320 | { | 320 | ret; \ |
321 | int i; | 321 | }) |
322 | 322 | ||
323 | for (i = 0; i < map_words(map); i++) { | 323 | #define map_word_bitsset(map, val1, val2) \ |
324 | if ((val1.x[i] & val2.x[i]) != val3.x[i]) | 324 | ({ \ |
325 | return 0; | 325 | int i, ret = 0; \ |
326 | } | 326 | for (i = 0; i < map_words(map); i++) { \ |
327 | 327 | if ((val1).x[i] & (val2).x[i]) { \ | |
328 | return 1; | 328 | ret = 1; \ |
329 | } | 329 | break; \ |
330 | 330 | } \ | |
331 | static inline int map_word_bitsset(struct map_info *map, map_word val1, map_word val2) | 331 | } \ |
332 | { | 332 | ret; \ |
333 | int i; | 333 | }) |
334 | |||
335 | for (i = 0; i < map_words(map); i++) { | ||
336 | if (val1.x[i] & val2.x[i]) | ||
337 | return 1; | ||
338 | } | ||
339 | |||
340 | return 0; | ||
341 | } | ||
342 | 334 | ||
343 | static inline map_word map_word_load(struct map_info *map, const void *ptr) | 335 | static inline map_word map_word_load(struct map_info *map, const void *ptr) |
344 | { | 336 | { |
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h index cd55bf14ad51..205ededccc60 100644 --- a/include/linux/mtd/mtd.h +++ b/include/linux/mtd/mtd.h | |||
@@ -489,6 +489,34 @@ static inline uint32_t mtd_mod_by_eb(uint64_t sz, struct mtd_info *mtd) | |||
489 | return do_div(sz, mtd->erasesize); | 489 | return do_div(sz, mtd->erasesize); |
490 | } | 490 | } |
491 | 491 | ||
492 | /** | ||
493 | * mtd_align_erase_req - Adjust an erase request to align things on eraseblock | ||
494 | * boundaries. | ||
495 | * @mtd: the MTD device this erase request applies on | ||
496 | * @req: the erase request to adjust | ||
497 | * | ||
498 | * This function will adjust @req->addr and @req->len to align them on | ||
499 | * @mtd->erasesize. Of course we expect @mtd->erasesize to be != 0. | ||
500 | */ | ||
501 | static inline void mtd_align_erase_req(struct mtd_info *mtd, | ||
502 | struct erase_info *req) | ||
503 | { | ||
504 | u32 mod; | ||
505 | |||
506 | if (WARN_ON(!mtd->erasesize)) | ||
507 | return; | ||
508 | |||
509 | mod = mtd_mod_by_eb(req->addr, mtd); | ||
510 | if (mod) { | ||
511 | req->addr -= mod; | ||
512 | req->len += mod; | ||
513 | } | ||
514 | |||
515 | mod = mtd_mod_by_eb(req->addr + req->len, mtd); | ||
516 | if (mod) | ||
517 | req->len += mtd->erasesize - mod; | ||
518 | } | ||
519 | |||
492 | static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) | 520 | static inline uint32_t mtd_div_by_ws(uint64_t sz, struct mtd_info *mtd) |
493 | { | 521 | { |
494 | if (mtd->writesize_shift) | 522 | if (mtd->writesize_shift) |
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 749bb08c4772..56c5570aadbe 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h | |||
@@ -133,12 +133,6 @@ enum nand_ecc_algo { | |||
133 | */ | 133 | */ |
134 | #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) | 134 | #define NAND_ECC_GENERIC_ERASED_CHECK BIT(0) |
135 | #define NAND_ECC_MAXIMIZE BIT(1) | 135 | #define NAND_ECC_MAXIMIZE BIT(1) |
136 | /* | ||
137 | * If your controller already sends the required NAND commands when | ||
138 | * reading or writing a page, then the framework is not supposed to | ||
139 | * send READ0 and SEQIN/PAGEPROG respectively. | ||
140 | */ | ||
141 | #define NAND_ECC_CUSTOM_PAGE_ACCESS BIT(2) | ||
142 | 136 | ||
143 | /* Bit mask for flags passed to do_nand_read_ecc */ | 137 | /* Bit mask for flags passed to do_nand_read_ecc */ |
144 | #define NAND_GET_DEVICE 0x80 | 138 | #define NAND_GET_DEVICE 0x80 |
@@ -191,11 +185,6 @@ enum nand_ecc_algo { | |||
191 | /* Non chip related options */ | 185 | /* Non chip related options */ |
192 | /* This option skips the bbt scan during initialization. */ | 186 | /* This option skips the bbt scan during initialization. */ |
193 | #define NAND_SKIP_BBTSCAN 0x00010000 | 187 | #define NAND_SKIP_BBTSCAN 0x00010000 |
194 | /* | ||
195 | * This option is defined if the board driver allocates its own buffers | ||
196 | * (e.g. because it needs them DMA-coherent). | ||
197 | */ | ||
198 | #define NAND_OWN_BUFFERS 0x00020000 | ||
199 | /* Chip may not exist, so silence any errors in scan */ | 188 | /* Chip may not exist, so silence any errors in scan */ |
200 | #define NAND_SCAN_SILENT_NODEV 0x00040000 | 189 | #define NAND_SCAN_SILENT_NODEV 0x00040000 |
201 | /* | 190 | /* |
@@ -525,6 +514,8 @@ static const struct nand_ecc_caps __name = { \ | |||
525 | * @postpad: padding information for syndrome based ECC generators | 514 | * @postpad: padding information for syndrome based ECC generators |
526 | * @options: ECC specific options (see NAND_ECC_XXX flags defined above) | 515 | * @options: ECC specific options (see NAND_ECC_XXX flags defined above) |
527 | * @priv: pointer to private ECC control data | 516 | * @priv: pointer to private ECC control data |
517 | * @calc_buf: buffer for calculated ECC, size is oobsize. | ||
518 | * @code_buf: buffer for ECC read from flash, size is oobsize. | ||
528 | * @hwctl: function to control hardware ECC generator. Must only | 519 | * @hwctl: function to control hardware ECC generator. Must only |
529 | * be provided if an hardware ECC is available | 520 | * be provided if an hardware ECC is available |
530 | * @calculate: function for ECC calculation or readback from ECC hardware | 521 | * @calculate: function for ECC calculation or readback from ECC hardware |
@@ -575,6 +566,8 @@ struct nand_ecc_ctrl { | |||
575 | int postpad; | 566 | int postpad; |
576 | unsigned int options; | 567 | unsigned int options; |
577 | void *priv; | 568 | void *priv; |
569 | u8 *calc_buf; | ||
570 | u8 *code_buf; | ||
578 | void (*hwctl)(struct mtd_info *mtd, int mode); | 571 | void (*hwctl)(struct mtd_info *mtd, int mode); |
579 | int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, | 572 | int (*calculate)(struct mtd_info *mtd, const uint8_t *dat, |
580 | uint8_t *ecc_code); | 573 | uint8_t *ecc_code); |
@@ -602,26 +595,6 @@ struct nand_ecc_ctrl { | |||
602 | int page); | 595 | int page); |
603 | }; | 596 | }; |
604 | 597 | ||
605 | static inline int nand_standard_page_accessors(struct nand_ecc_ctrl *ecc) | ||
606 | { | ||
607 | return !(ecc->options & NAND_ECC_CUSTOM_PAGE_ACCESS); | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * struct nand_buffers - buffer structure for read/write | ||
612 | * @ecccalc: buffer pointer for calculated ECC, size is oobsize. | ||
613 | * @ecccode: buffer pointer for ECC read from flash, size is oobsize. | ||
614 | * @databuf: buffer pointer for data, size is (page size + oobsize). | ||
615 | * | ||
616 | * Do not change the order of buffers. databuf and oobrbuf must be in | ||
617 | * consecutive order. | ||
618 | */ | ||
619 | struct nand_buffers { | ||
620 | uint8_t *ecccalc; | ||
621 | uint8_t *ecccode; | ||
622 | uint8_t *databuf; | ||
623 | }; | ||
624 | |||
625 | /** | 598 | /** |
626 | * struct nand_sdr_timings - SDR NAND chip timings | 599 | * struct nand_sdr_timings - SDR NAND chip timings |
627 | * | 600 | * |
@@ -762,6 +735,350 @@ struct nand_manufacturer_ops { | |||
762 | }; | 735 | }; |
763 | 736 | ||
764 | /** | 737 | /** |
738 | * struct nand_op_cmd_instr - Definition of a command instruction | ||
739 | * @opcode: the command to issue in one cycle | ||
740 | */ | ||
741 | struct nand_op_cmd_instr { | ||
742 | u8 opcode; | ||
743 | }; | ||
744 | |||
745 | /** | ||
746 | * struct nand_op_addr_instr - Definition of an address instruction | ||
747 | * @naddrs: length of the @addrs array | ||
748 | * @addrs: array containing the address cycles to issue | ||
749 | */ | ||
750 | struct nand_op_addr_instr { | ||
751 | unsigned int naddrs; | ||
752 | const u8 *addrs; | ||
753 | }; | ||
754 | |||
755 | /** | ||
756 | * struct nand_op_data_instr - Definition of a data instruction | ||
757 | * @len: number of data bytes to move | ||
758 | * @in: buffer to fill when reading from the NAND chip | ||
759 | * @out: buffer to read from when writing to the NAND chip | ||
760 | * @force_8bit: force 8-bit access | ||
761 | * | ||
762 | * Please note that "in" and "out" are inverted from the ONFI specification | ||
763 | * and are from the controller perspective, so a "in" is a read from the NAND | ||
764 | * chip while a "out" is a write to the NAND chip. | ||
765 | */ | ||
766 | struct nand_op_data_instr { | ||
767 | unsigned int len; | ||
768 | union { | ||
769 | void *in; | ||
770 | const void *out; | ||
771 | } buf; | ||
772 | bool force_8bit; | ||
773 | }; | ||
774 | |||
775 | /** | ||
776 | * struct nand_op_waitrdy_instr - Definition of a wait ready instruction | ||
777 | * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms | ||
778 | */ | ||
779 | struct nand_op_waitrdy_instr { | ||
780 | unsigned int timeout_ms; | ||
781 | }; | ||
782 | |||
783 | /** | ||
784 | * enum nand_op_instr_type - Definition of all instruction types | ||
785 | * @NAND_OP_CMD_INSTR: command instruction | ||
786 | * @NAND_OP_ADDR_INSTR: address instruction | ||
787 | * @NAND_OP_DATA_IN_INSTR: data in instruction | ||
788 | * @NAND_OP_DATA_OUT_INSTR: data out instruction | ||
789 | * @NAND_OP_WAITRDY_INSTR: wait ready instruction | ||
790 | */ | ||
791 | enum nand_op_instr_type { | ||
792 | NAND_OP_CMD_INSTR, | ||
793 | NAND_OP_ADDR_INSTR, | ||
794 | NAND_OP_DATA_IN_INSTR, | ||
795 | NAND_OP_DATA_OUT_INSTR, | ||
796 | NAND_OP_WAITRDY_INSTR, | ||
797 | }; | ||
798 | |||
799 | /** | ||
800 | * struct nand_op_instr - Instruction object | ||
801 | * @type: the instruction type | ||
802 | * @cmd/@addr/@data/@waitrdy: extra data associated to the instruction. | ||
803 | * You'll have to use the appropriate element | ||
804 | * depending on @type | ||
805 | * @delay_ns: delay the controller should apply after the instruction has been | ||
806 | * issued on the bus. Most modern controllers have internal timings | ||
807 | * control logic, and in this case, the controller driver can ignore | ||
808 | * this field. | ||
809 | */ | ||
810 | struct nand_op_instr { | ||
811 | enum nand_op_instr_type type; | ||
812 | union { | ||
813 | struct nand_op_cmd_instr cmd; | ||
814 | struct nand_op_addr_instr addr; | ||
815 | struct nand_op_data_instr data; | ||
816 | struct nand_op_waitrdy_instr waitrdy; | ||
817 | } ctx; | ||
818 | unsigned int delay_ns; | ||
819 | }; | ||
820 | |||
821 | /* | ||
822 | * Special handling must be done for the WAITRDY timeout parameter as it usually | ||
823 | * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or | ||
824 | * tBERS (during an erase) which all of them are u64 values that cannot be | ||
825 | * divided by usual kernel macros and must be handled with the special | ||
826 | * DIV_ROUND_UP_ULL() macro. | ||
827 | */ | ||
828 | #define __DIVIDE(dividend, divisor) ({ \ | ||
829 | sizeof(dividend) == sizeof(u32) ? \ | ||
830 | DIV_ROUND_UP(dividend, divisor) : \ | ||
831 | DIV_ROUND_UP_ULL(dividend, divisor); \ | ||
832 | }) | ||
833 | #define PSEC_TO_NSEC(x) __DIVIDE(x, 1000) | ||
834 | #define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000) | ||
835 | |||
836 | #define NAND_OP_CMD(id, ns) \ | ||
837 | { \ | ||
838 | .type = NAND_OP_CMD_INSTR, \ | ||
839 | .ctx.cmd.opcode = id, \ | ||
840 | .delay_ns = ns, \ | ||
841 | } | ||
842 | |||
843 | #define NAND_OP_ADDR(ncycles, cycles, ns) \ | ||
844 | { \ | ||
845 | .type = NAND_OP_ADDR_INSTR, \ | ||
846 | .ctx.addr = { \ | ||
847 | .naddrs = ncycles, \ | ||
848 | .addrs = cycles, \ | ||
849 | }, \ | ||
850 | .delay_ns = ns, \ | ||
851 | } | ||
852 | |||
853 | #define NAND_OP_DATA_IN(l, b, ns) \ | ||
854 | { \ | ||
855 | .type = NAND_OP_DATA_IN_INSTR, \ | ||
856 | .ctx.data = { \ | ||
857 | .len = l, \ | ||
858 | .buf.in = b, \ | ||
859 | .force_8bit = false, \ | ||
860 | }, \ | ||
861 | .delay_ns = ns, \ | ||
862 | } | ||
863 | |||
864 | #define NAND_OP_DATA_OUT(l, b, ns) \ | ||
865 | { \ | ||
866 | .type = NAND_OP_DATA_OUT_INSTR, \ | ||
867 | .ctx.data = { \ | ||
868 | .len = l, \ | ||
869 | .buf.out = b, \ | ||
870 | .force_8bit = false, \ | ||
871 | }, \ | ||
872 | .delay_ns = ns, \ | ||
873 | } | ||
874 | |||
875 | #define NAND_OP_8BIT_DATA_IN(l, b, ns) \ | ||
876 | { \ | ||
877 | .type = NAND_OP_DATA_IN_INSTR, \ | ||
878 | .ctx.data = { \ | ||
879 | .len = l, \ | ||
880 | .buf.in = b, \ | ||
881 | .force_8bit = true, \ | ||
882 | }, \ | ||
883 | .delay_ns = ns, \ | ||
884 | } | ||
885 | |||
886 | #define NAND_OP_8BIT_DATA_OUT(l, b, ns) \ | ||
887 | { \ | ||
888 | .type = NAND_OP_DATA_OUT_INSTR, \ | ||
889 | .ctx.data = { \ | ||
890 | .len = l, \ | ||
891 | .buf.out = b, \ | ||
892 | .force_8bit = true, \ | ||
893 | }, \ | ||
894 | .delay_ns = ns, \ | ||
895 | } | ||
896 | |||
897 | #define NAND_OP_WAIT_RDY(tout_ms, ns) \ | ||
898 | { \ | ||
899 | .type = NAND_OP_WAITRDY_INSTR, \ | ||
900 | .ctx.waitrdy.timeout_ms = tout_ms, \ | ||
901 | .delay_ns = ns, \ | ||
902 | } | ||
903 | |||
904 | /** | ||
905 | * struct nand_subop - a sub operation | ||
906 | * @instrs: array of instructions | ||
907 | * @ninstrs: length of the @instrs array | ||
908 | * @first_instr_start_off: offset to start from for the first instruction | ||
909 | * of the sub-operation | ||
910 | * @last_instr_end_off: offset to end at (excluded) for the last instruction | ||
911 | * of the sub-operation | ||
912 | * | ||
913 | * Both @first_instr_start_off and @last_instr_end_off only apply to data or | ||
914 | * address instructions. | ||
915 | * | ||
916 | * When an operation cannot be handled as is by the NAND controller, it will | ||
917 | * be split by the parser into sub-operations which will be passed to the | ||
918 | * controller driver. | ||
919 | */ | ||
920 | struct nand_subop { | ||
921 | const struct nand_op_instr *instrs; | ||
922 | unsigned int ninstrs; | ||
923 | unsigned int first_instr_start_off; | ||
924 | unsigned int last_instr_end_off; | ||
925 | }; | ||
926 | |||
927 | int nand_subop_get_addr_start_off(const struct nand_subop *subop, | ||
928 | unsigned int op_id); | ||
929 | int nand_subop_get_num_addr_cyc(const struct nand_subop *subop, | ||
930 | unsigned int op_id); | ||
931 | int nand_subop_get_data_start_off(const struct nand_subop *subop, | ||
932 | unsigned int op_id); | ||
933 | int nand_subop_get_data_len(const struct nand_subop *subop, | ||
934 | unsigned int op_id); | ||
935 | |||
936 | /** | ||
937 | * struct nand_op_parser_addr_constraints - Constraints for address instructions | ||
938 | * @maxcycles: maximum number of address cycles the controller can issue in a | ||
939 | * single step | ||
940 | */ | ||
941 | struct nand_op_parser_addr_constraints { | ||
942 | unsigned int maxcycles; | ||
943 | }; | ||
944 | |||
945 | /** | ||
946 | * struct nand_op_parser_data_constraints - Constraints for data instructions | ||
947 | * @maxlen: maximum data length that the controller can handle in a single step | ||
948 | */ | ||
949 | struct nand_op_parser_data_constraints { | ||
950 | unsigned int maxlen; | ||
951 | }; | ||
952 | |||
953 | /** | ||
954 | * struct nand_op_parser_pattern_elem - One element of a pattern | ||
955 | * @type: the instructuction type | ||
956 | * @optional: whether this element of the pattern is optional or mandatory | ||
957 | * @addr/@data: address or data constraint (number of cycles or data length) | ||
958 | */ | ||
959 | struct nand_op_parser_pattern_elem { | ||
960 | enum nand_op_instr_type type; | ||
961 | bool optional; | ||
962 | union { | ||
963 | struct nand_op_parser_addr_constraints addr; | ||
964 | struct nand_op_parser_data_constraints data; | ||
965 | } ctx; | ||
966 | }; | ||
967 | |||
968 | #define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \ | ||
969 | { \ | ||
970 | .type = NAND_OP_CMD_INSTR, \ | ||
971 | .optional = _opt, \ | ||
972 | } | ||
973 | |||
974 | #define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \ | ||
975 | { \ | ||
976 | .type = NAND_OP_ADDR_INSTR, \ | ||
977 | .optional = _opt, \ | ||
978 | .ctx.addr.maxcycles = _maxcycles, \ | ||
979 | } | ||
980 | |||
981 | #define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \ | ||
982 | { \ | ||
983 | .type = NAND_OP_DATA_IN_INSTR, \ | ||
984 | .optional = _opt, \ | ||
985 | .ctx.data.maxlen = _maxlen, \ | ||
986 | } | ||
987 | |||
988 | #define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \ | ||
989 | { \ | ||
990 | .type = NAND_OP_DATA_OUT_INSTR, \ | ||
991 | .optional = _opt, \ | ||
992 | .ctx.data.maxlen = _maxlen, \ | ||
993 | } | ||
994 | |||
995 | #define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \ | ||
996 | { \ | ||
997 | .type = NAND_OP_WAITRDY_INSTR, \ | ||
998 | .optional = _opt, \ | ||
999 | } | ||
1000 | |||
1001 | /** | ||
1002 | * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor | ||
1003 | * @elems: array of pattern elements | ||
1004 | * @nelems: number of pattern elements in @elems array | ||
1005 | * @exec: the function that will issue a sub-operation | ||
1006 | * | ||
1007 | * A pattern is a list of elements, each element reprensenting one instruction | ||
1008 | * with its constraints. The pattern itself is used by the core to match NAND | ||
1009 | * chip operation with NAND controller operations. | ||
1010 | * Once a match between a NAND controller operation pattern and a NAND chip | ||
1011 | * operation (or a sub-set of a NAND operation) is found, the pattern ->exec() | ||
1012 | * hook is called so that the controller driver can issue the operation on the | ||
1013 | * bus. | ||
1014 | * | ||
1015 | * Controller drivers should declare as many patterns as they support and pass | ||
1016 | * this list of patterns (created with the help of the following macro) to | ||
1017 | * the nand_op_parser_exec_op() helper. | ||
1018 | */ | ||
1019 | struct nand_op_parser_pattern { | ||
1020 | const struct nand_op_parser_pattern_elem *elems; | ||
1021 | unsigned int nelems; | ||
1022 | int (*exec)(struct nand_chip *chip, const struct nand_subop *subop); | ||
1023 | }; | ||
1024 | |||
1025 | #define NAND_OP_PARSER_PATTERN(_exec, ...) \ | ||
1026 | { \ | ||
1027 | .exec = _exec, \ | ||
1028 | .elems = (struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \ | ||
1029 | .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \ | ||
1030 | sizeof(struct nand_op_parser_pattern_elem), \ | ||
1031 | } | ||
1032 | |||
1033 | /** | ||
1034 | * struct nand_op_parser - NAND controller operation parser descriptor | ||
1035 | * @patterns: array of supported patterns | ||
1036 | * @npatterns: length of the @patterns array | ||
1037 | * | ||
1038 | * The parser descriptor is just an array of supported patterns which will be | ||
1039 | * iterated by nand_op_parser_exec_op() everytime it tries to execute an | ||
1040 | * NAND operation (or tries to determine if a specific operation is supported). | ||
1041 | * | ||
1042 | * It is worth mentioning that patterns will be tested in their declaration | ||
1043 | * order, and the first match will be taken, so it's important to order patterns | ||
1044 | * appropriately so that simple/inefficient patterns are placed at the end of | ||
1045 | * the list. Usually, this is where you put single instruction patterns. | ||
1046 | */ | ||
1047 | struct nand_op_parser { | ||
1048 | const struct nand_op_parser_pattern *patterns; | ||
1049 | unsigned int npatterns; | ||
1050 | }; | ||
1051 | |||
1052 | #define NAND_OP_PARSER(...) \ | ||
1053 | { \ | ||
1054 | .patterns = (struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \ | ||
1055 | .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \ | ||
1056 | sizeof(struct nand_op_parser_pattern), \ | ||
1057 | } | ||
1058 | |||
1059 | /** | ||
1060 | * struct nand_operation - NAND operation descriptor | ||
1061 | * @instrs: array of instructions to execute | ||
1062 | * @ninstrs: length of the @instrs array | ||
1063 | * | ||
1064 | * The actual operation structure that will be passed to chip->exec_op(). | ||
1065 | */ | ||
1066 | struct nand_operation { | ||
1067 | const struct nand_op_instr *instrs; | ||
1068 | unsigned int ninstrs; | ||
1069 | }; | ||
1070 | |||
1071 | #define NAND_OPERATION(_instrs) \ | ||
1072 | { \ | ||
1073 | .instrs = _instrs, \ | ||
1074 | .ninstrs = ARRAY_SIZE(_instrs), \ | ||
1075 | } | ||
1076 | |||
1077 | int nand_op_parser_exec_op(struct nand_chip *chip, | ||
1078 | const struct nand_op_parser *parser, | ||
1079 | const struct nand_operation *op, bool check_only); | ||
1080 | |||
1081 | /** | ||
765 | * struct nand_chip - NAND Private Flash Chip Data | 1082 | * struct nand_chip - NAND Private Flash Chip Data |
766 | * @mtd: MTD device registered to the MTD framework | 1083 | * @mtd: MTD device registered to the MTD framework |
767 | * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the | 1084 | * @IO_ADDR_R: [BOARDSPECIFIC] address to read the 8 I/O lines of the |
@@ -787,10 +1104,13 @@ struct nand_manufacturer_ops { | |||
787 | * commands to the chip. | 1104 | * commands to the chip. |
788 | * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on | 1105 | * @waitfunc: [REPLACEABLE] hardwarespecific function for wait on |
789 | * ready. | 1106 | * ready. |
1107 | * @exec_op: controller specific method to execute NAND operations. | ||
1108 | * This method replaces ->cmdfunc(), | ||
1109 | * ->{read,write}_{buf,byte,word}(), ->dev_ready() and | ||
1110 | * ->waifunc(). | ||
790 | * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for | 1111 | * @setup_read_retry: [FLASHSPECIFIC] flash (vendor) specific function for |
791 | * setting the read-retry mode. Mostly needed for MLC NAND. | 1112 | * setting the read-retry mode. Mostly needed for MLC NAND. |
792 | * @ecc: [BOARDSPECIFIC] ECC control structure | 1113 | * @ecc: [BOARDSPECIFIC] ECC control structure |
793 | * @buffers: buffer structure for read/write | ||
794 | * @buf_align: minimum buffer alignment required by a platform | 1114 | * @buf_align: minimum buffer alignment required by a platform |
795 | * @hwcontrol: platform-specific hardware control structure | 1115 | * @hwcontrol: platform-specific hardware control structure |
796 | * @erase: [REPLACEABLE] erase function | 1116 | * @erase: [REPLACEABLE] erase function |
@@ -830,6 +1150,7 @@ struct nand_manufacturer_ops { | |||
830 | * @numchips: [INTERN] number of physical chips | 1150 | * @numchips: [INTERN] number of physical chips |
831 | * @chipsize: [INTERN] the size of one chip for multichip arrays | 1151 | * @chipsize: [INTERN] the size of one chip for multichip arrays |
832 | * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 | 1152 | * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 |
1153 | * @data_buf: [INTERN] buffer for data, size is (page size + oobsize). | ||
833 | * @pagebuf: [INTERN] holds the pagenumber which is currently in | 1154 | * @pagebuf: [INTERN] holds the pagenumber which is currently in |
834 | * data_buf. | 1155 | * data_buf. |
835 | * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is | 1156 | * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is |
@@ -886,6 +1207,9 @@ struct nand_chip { | |||
886 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, | 1207 | void (*cmdfunc)(struct mtd_info *mtd, unsigned command, int column, |
887 | int page_addr); | 1208 | int page_addr); |
888 | int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); | 1209 | int(*waitfunc)(struct mtd_info *mtd, struct nand_chip *this); |
1210 | int (*exec_op)(struct nand_chip *chip, | ||
1211 | const struct nand_operation *op, | ||
1212 | bool check_only); | ||
889 | int (*erase)(struct mtd_info *mtd, int page); | 1213 | int (*erase)(struct mtd_info *mtd, int page); |
890 | int (*scan_bbt)(struct mtd_info *mtd); | 1214 | int (*scan_bbt)(struct mtd_info *mtd); |
891 | int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, | 1215 | int (*onfi_set_features)(struct mtd_info *mtd, struct nand_chip *chip, |
@@ -896,7 +1220,6 @@ struct nand_chip { | |||
896 | int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, | 1220 | int (*setup_data_interface)(struct mtd_info *mtd, int chipnr, |
897 | const struct nand_data_interface *conf); | 1221 | const struct nand_data_interface *conf); |
898 | 1222 | ||
899 | |||
900 | int chip_delay; | 1223 | int chip_delay; |
901 | unsigned int options; | 1224 | unsigned int options; |
902 | unsigned int bbt_options; | 1225 | unsigned int bbt_options; |
@@ -908,6 +1231,7 @@ struct nand_chip { | |||
908 | int numchips; | 1231 | int numchips; |
909 | uint64_t chipsize; | 1232 | uint64_t chipsize; |
910 | int pagemask; | 1233 | int pagemask; |
1234 | u8 *data_buf; | ||
911 | int pagebuf; | 1235 | int pagebuf; |
912 | unsigned int pagebuf_bitflips; | 1236 | unsigned int pagebuf_bitflips; |
913 | int subpagesize; | 1237 | int subpagesize; |
@@ -928,7 +1252,7 @@ struct nand_chip { | |||
928 | u16 max_bb_per_die; | 1252 | u16 max_bb_per_die; |
929 | u32 blocks_per_die; | 1253 | u32 blocks_per_die; |
930 | 1254 | ||
931 | struct nand_data_interface *data_interface; | 1255 | struct nand_data_interface data_interface; |
932 | 1256 | ||
933 | int read_retries; | 1257 | int read_retries; |
934 | 1258 | ||
@@ -938,7 +1262,6 @@ struct nand_chip { | |||
938 | struct nand_hw_control *controller; | 1262 | struct nand_hw_control *controller; |
939 | 1263 | ||
940 | struct nand_ecc_ctrl ecc; | 1264 | struct nand_ecc_ctrl ecc; |
941 | struct nand_buffers *buffers; | ||
942 | unsigned long buf_align; | 1265 | unsigned long buf_align; |
943 | struct nand_hw_control hwcontrol; | 1266 | struct nand_hw_control hwcontrol; |
944 | 1267 | ||
@@ -956,6 +1279,15 @@ struct nand_chip { | |||
956 | } manufacturer; | 1279 | } manufacturer; |
957 | }; | 1280 | }; |
958 | 1281 | ||
1282 | static inline int nand_exec_op(struct nand_chip *chip, | ||
1283 | const struct nand_operation *op) | ||
1284 | { | ||
1285 | if (!chip->exec_op) | ||
1286 | return -ENOTSUPP; | ||
1287 | |||
1288 | return chip->exec_op(chip, op, false); | ||
1289 | } | ||
1290 | |||
959 | extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; | 1291 | extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops; |
960 | extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; | 1292 | extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops; |
961 | 1293 | ||
@@ -1225,8 +1557,7 @@ static inline int onfi_get_sync_timing_mode(struct nand_chip *chip) | |||
1225 | return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); | 1557 | return le16_to_cpu(chip->onfi_params.src_sync_timing_mode); |
1226 | } | 1558 | } |
1227 | 1559 | ||
1228 | int onfi_init_data_interface(struct nand_chip *chip, | 1560 | int onfi_fill_data_interface(struct nand_chip *chip, |
1229 | struct nand_data_interface *iface, | ||
1230 | enum nand_data_interface_type type, | 1561 | enum nand_data_interface_type type, |
1231 | int timing_mode); | 1562 | int timing_mode); |
1232 | 1563 | ||
@@ -1269,8 +1600,6 @@ static inline int jedec_feature(struct nand_chip *chip) | |||
1269 | 1600 | ||
1270 | /* get timing characteristics from ONFI timing mode. */ | 1601 | /* get timing characteristics from ONFI timing mode. */ |
1271 | const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); | 1602 | const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode); |
1272 | /* get data interface from ONFI timing mode 0, used after reset. */ | ||
1273 | const struct nand_data_interface *nand_get_default_data_interface(void); | ||
1274 | 1603 | ||
1275 | int nand_check_erased_ecc_chunk(void *data, int datalen, | 1604 | int nand_check_erased_ecc_chunk(void *data, int datalen, |
1276 | void *ecc, int ecclen, | 1605 | void *ecc, int ecclen, |
@@ -1316,9 +1645,45 @@ int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, | |||
1316 | /* Reset and initialize a NAND device */ | 1645 | /* Reset and initialize a NAND device */ |
1317 | int nand_reset(struct nand_chip *chip, int chipnr); | 1646 | int nand_reset(struct nand_chip *chip, int chipnr); |
1318 | 1647 | ||
1648 | /* NAND operation helpers */ | ||
1649 | int nand_reset_op(struct nand_chip *chip); | ||
1650 | int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf, | ||
1651 | unsigned int len); | ||
1652 | int nand_status_op(struct nand_chip *chip, u8 *status); | ||
1653 | int nand_exit_status_op(struct nand_chip *chip); | ||
1654 | int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock); | ||
1655 | int nand_read_page_op(struct nand_chip *chip, unsigned int page, | ||
1656 | unsigned int offset_in_page, void *buf, unsigned int len); | ||
1657 | int nand_change_read_column_op(struct nand_chip *chip, | ||
1658 | unsigned int offset_in_page, void *buf, | ||
1659 | unsigned int len, bool force_8bit); | ||
1660 | int nand_read_oob_op(struct nand_chip *chip, unsigned int page, | ||
1661 | unsigned int offset_in_page, void *buf, unsigned int len); | ||
1662 | int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page, | ||
1663 | unsigned int offset_in_page, const void *buf, | ||
1664 | unsigned int len); | ||
1665 | int nand_prog_page_end_op(struct nand_chip *chip); | ||
1666 | int nand_prog_page_op(struct nand_chip *chip, unsigned int page, | ||
1667 | unsigned int offset_in_page, const void *buf, | ||
1668 | unsigned int len); | ||
1669 | int nand_change_write_column_op(struct nand_chip *chip, | ||
1670 | unsigned int offset_in_page, const void *buf, | ||
1671 | unsigned int len, bool force_8bit); | ||
1672 | int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len, | ||
1673 | bool force_8bit); | ||
1674 | int nand_write_data_op(struct nand_chip *chip, const void *buf, | ||
1675 | unsigned int len, bool force_8bit); | ||
1676 | |||
1319 | /* Free resources held by the NAND device */ | 1677 | /* Free resources held by the NAND device */ |
1320 | void nand_cleanup(struct nand_chip *chip); | 1678 | void nand_cleanup(struct nand_chip *chip); |
1321 | 1679 | ||
1322 | /* Default extended ID decoding function */ | 1680 | /* Default extended ID decoding function */ |
1323 | void nand_decode_ext_id(struct nand_chip *chip); | 1681 | void nand_decode_ext_id(struct nand_chip *chip); |
1682 | |||
1683 | /* | ||
1684 | * External helper for controller drivers that have to implement the WAITRDY | ||
1685 | * instruction and have no physical pin to check it. | ||
1686 | */ | ||
1687 | int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms); | ||
1688 | |||
1324 | #endif /* __LINUX_MTD_RAWNAND_H */ | 1689 | #endif /* __LINUX_MTD_RAWNAND_H */ |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index d0c66a0975cf..de36969eb359 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
@@ -61,6 +61,7 @@ | |||
61 | #define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ | 61 | #define SPINOR_OP_RDSFDP 0x5a /* Read SFDP */ |
62 | #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ | 62 | #define SPINOR_OP_RDCR 0x35 /* Read configuration register */ |
63 | #define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ | 63 | #define SPINOR_OP_RDFSR 0x70 /* Read flag status register */ |
64 | #define SPINOR_OP_CLFSR 0x50 /* Clear flag status register */ | ||
64 | 65 | ||
65 | /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ | 66 | /* 4-byte address opcodes - used on Spansion and some Macronix flashes. */ |
66 | #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ | 67 | #define SPINOR_OP_READ_4B 0x13 /* Read data bytes (low frequency) */ |
@@ -130,7 +131,10 @@ | |||
130 | #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ | 131 | #define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */ |
131 | 132 | ||
132 | /* Flag Status Register bits */ | 133 | /* Flag Status Register bits */ |
133 | #define FSR_READY BIT(7) | 134 | #define FSR_READY BIT(7) /* Device status, 0 = Busy, 1 = Ready */ |
135 | #define FSR_E_ERR BIT(5) /* Erase operation status */ | ||
136 | #define FSR_P_ERR BIT(4) /* Program operation status */ | ||
137 | #define FSR_PT_ERR BIT(1) /* Protection error bit */ | ||
134 | 138 | ||
135 | /* Configuration Register bits. */ | 139 | /* Configuration Register bits. */ |
136 | #define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ | 140 | #define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */ |
@@ -399,4 +403,10 @@ struct spi_nor_hwcaps { | |||
399 | int spi_nor_scan(struct spi_nor *nor, const char *name, | 403 | int spi_nor_scan(struct spi_nor *nor, const char *name, |
400 | const struct spi_nor_hwcaps *hwcaps); | 404 | const struct spi_nor_hwcaps *hwcaps); |
401 | 405 | ||
406 | /** | ||
407 | * spi_nor_restore_addr_mode() - restore the status of SPI NOR | ||
408 | * @nor: the spi_nor structure | ||
409 | */ | ||
410 | void spi_nor_restore(struct spi_nor *nor); | ||
411 | |||
402 | #endif | 412 | #endif |
diff --git a/include/linux/mutex.h b/include/linux/mutex.h index 153274f78402..f25c13423bd4 100644 --- a/include/linux/mutex.h +++ b/include/linux/mutex.h | |||
@@ -138,9 +138,9 @@ extern void __mutex_init(struct mutex *lock, const char *name, | |||
138 | * mutex_is_locked - is the mutex locked | 138 | * mutex_is_locked - is the mutex locked |
139 | * @lock: the mutex to be queried | 139 | * @lock: the mutex to be queried |
140 | * | 140 | * |
141 | * Returns 1 if the mutex is locked, 0 if unlocked. | 141 | * Returns true if the mutex is locked, false if unlocked. |
142 | */ | 142 | */ |
143 | static inline int mutex_is_locked(struct mutex *lock) | 143 | static inline bool mutex_is_locked(struct mutex *lock) |
144 | { | 144 | { |
145 | /* | 145 | /* |
146 | * XXX think about spin_is_locked | 146 | * XXX think about spin_is_locked |
diff --git a/include/linux/mux/consumer.h b/include/linux/mux/consumer.h index ea96d4c82be7..5fc6bb2fefad 100644 --- a/include/linux/mux/consumer.h +++ b/include/linux/mux/consumer.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * mux/consumer.h - definitions for the multiplexer consumer interface | 3 | * mux/consumer.h - definitions for the multiplexer consumer interface |
3 | * | 4 | * |
4 | * Copyright (C) 2017 Axentia Technologies AB | 5 | * Copyright (C) 2017 Axentia Technologies AB |
5 | * | 6 | * |
6 | * Author: Peter Rosin <peda@axentia.se> | 7 | * Author: Peter Rosin <peda@axentia.se> |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #ifndef _LINUX_MUX_CONSUMER_H | 10 | #ifndef _LINUX_MUX_CONSUMER_H |
diff --git a/include/linux/mux/driver.h b/include/linux/mux/driver.h index 35c3579c3304..627a2c6bc02d 100644 --- a/include/linux/mux/driver.h +++ b/include/linux/mux/driver.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * mux/driver.h - definitions for the multiplexer driver interface | 3 | * mux/driver.h - definitions for the multiplexer driver interface |
3 | * | 4 | * |
4 | * Copyright (C) 2017 Axentia Technologies AB | 5 | * Copyright (C) 2017 Axentia Technologies AB |
5 | * | 6 | * |
6 | * Author: Peter Rosin <peda@axentia.se> | 7 | * Author: Peter Rosin <peda@axentia.se> |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #ifndef _LINUX_MUX_DRIVER_H | 10 | #ifndef _LINUX_MUX_DRIVER_H |
diff --git a/include/linux/net.h b/include/linux/net.h index caeb159abda5..91216b16feb7 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -147,7 +147,7 @@ struct proto_ops { | |||
147 | int (*getname) (struct socket *sock, | 147 | int (*getname) (struct socket *sock, |
148 | struct sockaddr *addr, | 148 | struct sockaddr *addr, |
149 | int *sockaddr_len, int peer); | 149 | int *sockaddr_len, int peer); |
150 | unsigned int (*poll) (struct file *file, struct socket *sock, | 150 | __poll_t (*poll) (struct file *file, struct socket *sock, |
151 | struct poll_table_struct *wait); | 151 | struct poll_table_struct *wait); |
152 | int (*ioctl) (struct socket *sock, unsigned int cmd, | 152 | int (*ioctl) (struct socket *sock, unsigned int cmd, |
153 | unsigned long arg); | 153 | unsigned long arg); |
@@ -306,7 +306,6 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset, | |||
306 | size_t size, int flags); | 306 | size_t size, int flags); |
307 | int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, | 307 | int kernel_sendpage_locked(struct sock *sk, struct page *page, int offset, |
308 | size_t size, int flags); | 308 | size_t size, int flags); |
309 | int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg); | ||
310 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); | 309 | int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how); |
311 | 310 | ||
312 | /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ | 311 | /* Routine returns the IP overhead imposed by a (caller-protected) socket. */ |
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h new file mode 100644 index 000000000000..bebeaad897cc --- /dev/null +++ b/include/linux/net_dim.h | |||
@@ -0,0 +1,380 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2016, Mellanox Technologies. All rights reserved. | ||
3 | * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #ifndef NET_DIM_H | ||
35 | #define NET_DIM_H | ||
36 | |||
37 | #include <linux/module.h> | ||
38 | |||
39 | struct net_dim_cq_moder { | ||
40 | u16 usec; | ||
41 | u16 pkts; | ||
42 | u8 cq_period_mode; | ||
43 | }; | ||
44 | |||
45 | struct net_dim_sample { | ||
46 | ktime_t time; | ||
47 | u32 pkt_ctr; | ||
48 | u32 byte_ctr; | ||
49 | u16 event_ctr; | ||
50 | }; | ||
51 | |||
52 | struct net_dim_stats { | ||
53 | int ppms; /* packets per msec */ | ||
54 | int bpms; /* bytes per msec */ | ||
55 | int epms; /* events per msec */ | ||
56 | }; | ||
57 | |||
58 | struct net_dim { /* Adaptive Moderation */ | ||
59 | u8 state; | ||
60 | struct net_dim_stats prev_stats; | ||
61 | struct net_dim_sample start_sample; | ||
62 | struct work_struct work; | ||
63 | u8 profile_ix; | ||
64 | u8 mode; | ||
65 | u8 tune_state; | ||
66 | u8 steps_right; | ||
67 | u8 steps_left; | ||
68 | u8 tired; | ||
69 | }; | ||
70 | |||
71 | enum { | ||
72 | NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0, | ||
73 | NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1, | ||
74 | NET_DIM_CQ_PERIOD_NUM_MODES | ||
75 | }; | ||
76 | |||
77 | /* Adaptive moderation logic */ | ||
78 | enum { | ||
79 | NET_DIM_START_MEASURE, | ||
80 | NET_DIM_MEASURE_IN_PROGRESS, | ||
81 | NET_DIM_APPLY_NEW_PROFILE, | ||
82 | }; | ||
83 | |||
84 | enum { | ||
85 | NET_DIM_PARKING_ON_TOP, | ||
86 | NET_DIM_PARKING_TIRED, | ||
87 | NET_DIM_GOING_RIGHT, | ||
88 | NET_DIM_GOING_LEFT, | ||
89 | }; | ||
90 | |||
91 | enum { | ||
92 | NET_DIM_STATS_WORSE, | ||
93 | NET_DIM_STATS_SAME, | ||
94 | NET_DIM_STATS_BETTER, | ||
95 | }; | ||
96 | |||
97 | enum { | ||
98 | NET_DIM_STEPPED, | ||
99 | NET_DIM_TOO_TIRED, | ||
100 | NET_DIM_ON_EDGE, | ||
101 | }; | ||
102 | |||
103 | #define NET_DIM_PARAMS_NUM_PROFILES 5 | ||
104 | /* Adaptive moderation profiles */ | ||
105 | #define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256 | ||
106 | #define NET_DIM_DEF_PROFILE_CQE 1 | ||
107 | #define NET_DIM_DEF_PROFILE_EQE 1 | ||
108 | |||
109 | /* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */ | ||
110 | #define NET_DIM_EQE_PROFILES { \ | ||
111 | {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ | ||
112 | {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ | ||
113 | {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ | ||
114 | {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ | ||
115 | {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \ | ||
116 | } | ||
117 | |||
118 | #define NET_DIM_CQE_PROFILES { \ | ||
119 | {2, 256}, \ | ||
120 | {8, 128}, \ | ||
121 | {16, 64}, \ | ||
122 | {32, 64}, \ | ||
123 | {64, 64} \ | ||
124 | } | ||
125 | |||
126 | static const struct net_dim_cq_moder | ||
127 | profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = { | ||
128 | NET_DIM_EQE_PROFILES, | ||
129 | NET_DIM_CQE_PROFILES, | ||
130 | }; | ||
131 | |||
132 | static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode, | ||
133 | int ix) | ||
134 | { | ||
135 | struct net_dim_cq_moder cq_moder; | ||
136 | |||
137 | cq_moder = profile[cq_period_mode][ix]; | ||
138 | cq_moder.cq_period_mode = cq_period_mode; | ||
139 | return cq_moder; | ||
140 | } | ||
141 | |||
142 | static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode) | ||
143 | { | ||
144 | int default_profile_ix; | ||
145 | |||
146 | if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE) | ||
147 | default_profile_ix = NET_DIM_DEF_PROFILE_CQE; | ||
148 | else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */ | ||
149 | default_profile_ix = NET_DIM_DEF_PROFILE_EQE; | ||
150 | |||
151 | return net_dim_get_profile(rx_cq_period_mode, default_profile_ix); | ||
152 | } | ||
153 | |||
154 | static inline bool net_dim_on_top(struct net_dim *dim) | ||
155 | { | ||
156 | switch (dim->tune_state) { | ||
157 | case NET_DIM_PARKING_ON_TOP: | ||
158 | case NET_DIM_PARKING_TIRED: | ||
159 | return true; | ||
160 | case NET_DIM_GOING_RIGHT: | ||
161 | return (dim->steps_left > 1) && (dim->steps_right == 1); | ||
162 | default: /* NET_DIM_GOING_LEFT */ | ||
163 | return (dim->steps_right > 1) && (dim->steps_left == 1); | ||
164 | } | ||
165 | } | ||
166 | |||
167 | static inline void net_dim_turn(struct net_dim *dim) | ||
168 | { | ||
169 | switch (dim->tune_state) { | ||
170 | case NET_DIM_PARKING_ON_TOP: | ||
171 | case NET_DIM_PARKING_TIRED: | ||
172 | break; | ||
173 | case NET_DIM_GOING_RIGHT: | ||
174 | dim->tune_state = NET_DIM_GOING_LEFT; | ||
175 | dim->steps_left = 0; | ||
176 | break; | ||
177 | case NET_DIM_GOING_LEFT: | ||
178 | dim->tune_state = NET_DIM_GOING_RIGHT; | ||
179 | dim->steps_right = 0; | ||
180 | break; | ||
181 | } | ||
182 | } | ||
183 | |||
184 | static inline int net_dim_step(struct net_dim *dim) | ||
185 | { | ||
186 | if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2)) | ||
187 | return NET_DIM_TOO_TIRED; | ||
188 | |||
189 | switch (dim->tune_state) { | ||
190 | case NET_DIM_PARKING_ON_TOP: | ||
191 | case NET_DIM_PARKING_TIRED: | ||
192 | break; | ||
193 | case NET_DIM_GOING_RIGHT: | ||
194 | if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1)) | ||
195 | return NET_DIM_ON_EDGE; | ||
196 | dim->profile_ix++; | ||
197 | dim->steps_right++; | ||
198 | break; | ||
199 | case NET_DIM_GOING_LEFT: | ||
200 | if (dim->profile_ix == 0) | ||
201 | return NET_DIM_ON_EDGE; | ||
202 | dim->profile_ix--; | ||
203 | dim->steps_left++; | ||
204 | break; | ||
205 | } | ||
206 | |||
207 | dim->tired++; | ||
208 | return NET_DIM_STEPPED; | ||
209 | } | ||
210 | |||
211 | static inline void net_dim_park_on_top(struct net_dim *dim) | ||
212 | { | ||
213 | dim->steps_right = 0; | ||
214 | dim->steps_left = 0; | ||
215 | dim->tired = 0; | ||
216 | dim->tune_state = NET_DIM_PARKING_ON_TOP; | ||
217 | } | ||
218 | |||
219 | static inline void net_dim_park_tired(struct net_dim *dim) | ||
220 | { | ||
221 | dim->steps_right = 0; | ||
222 | dim->steps_left = 0; | ||
223 | dim->tune_state = NET_DIM_PARKING_TIRED; | ||
224 | } | ||
225 | |||
226 | static inline void net_dim_exit_parking(struct net_dim *dim) | ||
227 | { | ||
228 | dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT : | ||
229 | NET_DIM_GOING_RIGHT; | ||
230 | net_dim_step(dim); | ||
231 | } | ||
232 | |||
233 | #define IS_SIGNIFICANT_DIFF(val, ref) \ | ||
234 | (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */ | ||
235 | |||
236 | static inline int net_dim_stats_compare(struct net_dim_stats *curr, | ||
237 | struct net_dim_stats *prev) | ||
238 | { | ||
239 | if (!prev->bpms) | ||
240 | return curr->bpms ? NET_DIM_STATS_BETTER : | ||
241 | NET_DIM_STATS_SAME; | ||
242 | |||
243 | if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms)) | ||
244 | return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER : | ||
245 | NET_DIM_STATS_WORSE; | ||
246 | |||
247 | if (!prev->ppms) | ||
248 | return curr->ppms ? NET_DIM_STATS_BETTER : | ||
249 | NET_DIM_STATS_SAME; | ||
250 | |||
251 | if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms)) | ||
252 | return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER : | ||
253 | NET_DIM_STATS_WORSE; | ||
254 | |||
255 | if (!prev->epms) | ||
256 | return NET_DIM_STATS_SAME; | ||
257 | |||
258 | if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms)) | ||
259 | return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER : | ||
260 | NET_DIM_STATS_WORSE; | ||
261 | |||
262 | return NET_DIM_STATS_SAME; | ||
263 | } | ||
264 | |||
265 | static inline bool net_dim_decision(struct net_dim_stats *curr_stats, | ||
266 | struct net_dim *dim) | ||
267 | { | ||
268 | int prev_state = dim->tune_state; | ||
269 | int prev_ix = dim->profile_ix; | ||
270 | int stats_res; | ||
271 | int step_res; | ||
272 | |||
273 | switch (dim->tune_state) { | ||
274 | case NET_DIM_PARKING_ON_TOP: | ||
275 | stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); | ||
276 | if (stats_res != NET_DIM_STATS_SAME) | ||
277 | net_dim_exit_parking(dim); | ||
278 | break; | ||
279 | |||
280 | case NET_DIM_PARKING_TIRED: | ||
281 | dim->tired--; | ||
282 | if (!dim->tired) | ||
283 | net_dim_exit_parking(dim); | ||
284 | break; | ||
285 | |||
286 | case NET_DIM_GOING_RIGHT: | ||
287 | case NET_DIM_GOING_LEFT: | ||
288 | stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats); | ||
289 | if (stats_res != NET_DIM_STATS_BETTER) | ||
290 | net_dim_turn(dim); | ||
291 | |||
292 | if (net_dim_on_top(dim)) { | ||
293 | net_dim_park_on_top(dim); | ||
294 | break; | ||
295 | } | ||
296 | |||
297 | step_res = net_dim_step(dim); | ||
298 | switch (step_res) { | ||
299 | case NET_DIM_ON_EDGE: | ||
300 | net_dim_park_on_top(dim); | ||
301 | break; | ||
302 | case NET_DIM_TOO_TIRED: | ||
303 | net_dim_park_tired(dim); | ||
304 | break; | ||
305 | } | ||
306 | |||
307 | break; | ||
308 | } | ||
309 | |||
310 | if ((prev_state != NET_DIM_PARKING_ON_TOP) || | ||
311 | (dim->tune_state != NET_DIM_PARKING_ON_TOP)) | ||
312 | dim->prev_stats = *curr_stats; | ||
313 | |||
314 | return dim->profile_ix != prev_ix; | ||
315 | } | ||
316 | |||
317 | static inline void net_dim_sample(u16 event_ctr, | ||
318 | u64 packets, | ||
319 | u64 bytes, | ||
320 | struct net_dim_sample *s) | ||
321 | { | ||
322 | s->time = ktime_get(); | ||
323 | s->pkt_ctr = packets; | ||
324 | s->byte_ctr = bytes; | ||
325 | s->event_ctr = event_ctr; | ||
326 | } | ||
327 | |||
328 | #define NET_DIM_NEVENTS 64 | ||
329 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) | ||
330 | #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1)) | ||
331 | |||
332 | static inline void net_dim_calc_stats(struct net_dim_sample *start, | ||
333 | struct net_dim_sample *end, | ||
334 | struct net_dim_stats *curr_stats) | ||
335 | { | ||
336 | /* u32 holds up to 71 minutes, should be enough */ | ||
337 | u32 delta_us = ktime_us_delta(end->time, start->time); | ||
338 | u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr); | ||
339 | u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr, | ||
340 | start->byte_ctr); | ||
341 | |||
342 | if (!delta_us) | ||
343 | return; | ||
344 | |||
345 | curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us); | ||
346 | curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us); | ||
347 | curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC, | ||
348 | delta_us); | ||
349 | } | ||
350 | |||
351 | static inline void net_dim(struct net_dim *dim, | ||
352 | struct net_dim_sample end_sample) | ||
353 | { | ||
354 | struct net_dim_stats curr_stats; | ||
355 | u16 nevents; | ||
356 | |||
357 | switch (dim->state) { | ||
358 | case NET_DIM_MEASURE_IN_PROGRESS: | ||
359 | nevents = BIT_GAP(BITS_PER_TYPE(u16), | ||
360 | end_sample.event_ctr, | ||
361 | dim->start_sample.event_ctr); | ||
362 | if (nevents < NET_DIM_NEVENTS) | ||
363 | break; | ||
364 | net_dim_calc_stats(&dim->start_sample, &end_sample, | ||
365 | &curr_stats); | ||
366 | if (net_dim_decision(&curr_stats, dim)) { | ||
367 | dim->state = NET_DIM_APPLY_NEW_PROFILE; | ||
368 | schedule_work(&dim->work); | ||
369 | break; | ||
370 | } | ||
371 | /* fall through */ | ||
372 | case NET_DIM_START_MEASURE: | ||
373 | dim->state = NET_DIM_MEASURE_IN_PROGRESS; | ||
374 | break; | ||
375 | case NET_DIM_APPLY_NEW_PROFILE: | ||
376 | break; | ||
377 | } | ||
378 | } | ||
379 | |||
380 | #endif /* NET_DIM_H */ | ||
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index b1b0ca7ccb2b..db84c516bcfb 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
@@ -78,6 +78,8 @@ enum { | |||
78 | NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ | 78 | NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */ |
79 | NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ | 79 | NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */ |
80 | 80 | ||
81 | NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */ | ||
82 | |||
81 | /* | 83 | /* |
82 | * Add your fresh new feature above and remember to update | 84 | * Add your fresh new feature above and remember to update |
83 | * netdev_features_strings[] in net/core/ethtool.c and maybe | 85 | * netdev_features_strings[] in net/core/ethtool.c and maybe |
@@ -97,6 +99,7 @@ enum { | |||
97 | #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) | 99 | #define NETIF_F_FRAGLIST __NETIF_F(FRAGLIST) |
98 | #define NETIF_F_FSO __NETIF_F(FSO) | 100 | #define NETIF_F_FSO __NETIF_F(FSO) |
99 | #define NETIF_F_GRO __NETIF_F(GRO) | 101 | #define NETIF_F_GRO __NETIF_F(GRO) |
102 | #define NETIF_F_GRO_HW __NETIF_F(GRO_HW) | ||
100 | #define NETIF_F_GSO __NETIF_F(GSO) | 103 | #define NETIF_F_GSO __NETIF_F(GSO) |
101 | #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) | 104 | #define NETIF_F_GSO_ROBUST __NETIF_F(GSO_ROBUST) |
102 | #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) | 105 | #define NETIF_F_HIGHDMA __NETIF_F(HIGHDMA) |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index ef789e1d679e..5eef6c8e2741 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
@@ -44,6 +44,7 @@ | |||
44 | #include <net/dcbnl.h> | 44 | #include <net/dcbnl.h> |
45 | #endif | 45 | #endif |
46 | #include <net/netprio_cgroup.h> | 46 | #include <net/netprio_cgroup.h> |
47 | #include <net/xdp.h> | ||
47 | 48 | ||
48 | #include <linux/netdev_features.h> | 49 | #include <linux/netdev_features.h> |
49 | #include <linux/neighbour.h> | 50 | #include <linux/neighbour.h> |
@@ -686,6 +687,7 @@ struct netdev_rx_queue { | |||
686 | #endif | 687 | #endif |
687 | struct kobject kobj; | 688 | struct kobject kobj; |
688 | struct net_device *dev; | 689 | struct net_device *dev; |
690 | struct xdp_rxq_info xdp_rxq; | ||
689 | } ____cacheline_aligned_in_smp; | 691 | } ____cacheline_aligned_in_smp; |
690 | 692 | ||
691 | /* | 693 | /* |
@@ -778,6 +780,7 @@ enum tc_setup_type { | |||
778 | TC_SETUP_BLOCK, | 780 | TC_SETUP_BLOCK, |
779 | TC_SETUP_QDISC_CBS, | 781 | TC_SETUP_QDISC_CBS, |
780 | TC_SETUP_QDISC_RED, | 782 | TC_SETUP_QDISC_RED, |
783 | TC_SETUP_QDISC_PRIO, | ||
781 | }; | 784 | }; |
782 | 785 | ||
783 | /* These structures hold the attributes of bpf state that are being passed | 786 | /* These structures hold the attributes of bpf state that are being passed |
@@ -802,9 +805,11 @@ enum bpf_netdev_command { | |||
802 | BPF_OFFLOAD_VERIFIER_PREP, | 805 | BPF_OFFLOAD_VERIFIER_PREP, |
803 | BPF_OFFLOAD_TRANSLATE, | 806 | BPF_OFFLOAD_TRANSLATE, |
804 | BPF_OFFLOAD_DESTROY, | 807 | BPF_OFFLOAD_DESTROY, |
808 | BPF_OFFLOAD_MAP_ALLOC, | ||
809 | BPF_OFFLOAD_MAP_FREE, | ||
805 | }; | 810 | }; |
806 | 811 | ||
807 | struct bpf_ext_analyzer_ops; | 812 | struct bpf_prog_offload_ops; |
808 | struct netlink_ext_ack; | 813 | struct netlink_ext_ack; |
809 | 814 | ||
810 | struct netdev_bpf { | 815 | struct netdev_bpf { |
@@ -820,16 +825,22 @@ struct netdev_bpf { | |||
820 | struct { | 825 | struct { |
821 | u8 prog_attached; | 826 | u8 prog_attached; |
822 | u32 prog_id; | 827 | u32 prog_id; |
828 | /* flags with which program was installed */ | ||
829 | u32 prog_flags; | ||
823 | }; | 830 | }; |
824 | /* BPF_OFFLOAD_VERIFIER_PREP */ | 831 | /* BPF_OFFLOAD_VERIFIER_PREP */ |
825 | struct { | 832 | struct { |
826 | struct bpf_prog *prog; | 833 | struct bpf_prog *prog; |
827 | const struct bpf_ext_analyzer_ops *ops; /* callee set */ | 834 | const struct bpf_prog_offload_ops *ops; /* callee set */ |
828 | } verifier; | 835 | } verifier; |
829 | /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ | 836 | /* BPF_OFFLOAD_TRANSLATE, BPF_OFFLOAD_DESTROY */ |
830 | struct { | 837 | struct { |
831 | struct bpf_prog *prog; | 838 | struct bpf_prog *prog; |
832 | } offload; | 839 | } offload; |
840 | /* BPF_OFFLOAD_MAP_ALLOC, BPF_OFFLOAD_MAP_FREE */ | ||
841 | struct { | ||
842 | struct bpf_offloaded_map *offmap; | ||
843 | }; | ||
833 | }; | 844 | }; |
834 | }; | 845 | }; |
835 | 846 | ||
@@ -840,6 +851,7 @@ struct xfrmdev_ops { | |||
840 | void (*xdo_dev_state_free) (struct xfrm_state *x); | 851 | void (*xdo_dev_state_free) (struct xfrm_state *x); |
841 | bool (*xdo_dev_offload_ok) (struct sk_buff *skb, | 852 | bool (*xdo_dev_offload_ok) (struct sk_buff *skb, |
842 | struct xfrm_state *x); | 853 | struct xfrm_state *x); |
854 | void (*xdo_dev_state_advance_esn) (struct xfrm_state *x); | ||
843 | }; | 855 | }; |
844 | #endif | 856 | #endif |
845 | 857 | ||
@@ -1458,8 +1470,6 @@ enum netdev_priv_flags { | |||
1458 | * @base_addr: Device I/O address | 1470 | * @base_addr: Device I/O address |
1459 | * @irq: Device IRQ number | 1471 | * @irq: Device IRQ number |
1460 | * | 1472 | * |
1461 | * @carrier_changes: Stats to monitor carrier on<->off transitions | ||
1462 | * | ||
1463 | * @state: Generic network queuing layer state, see netdev_state_t | 1473 | * @state: Generic network queuing layer state, see netdev_state_t |
1464 | * @dev_list: The global list of network devices | 1474 | * @dev_list: The global list of network devices |
1465 | * @napi_list: List entry used for polling NAPI devices | 1475 | * @napi_list: List entry used for polling NAPI devices |
@@ -1495,6 +1505,8 @@ enum netdev_priv_flags { | |||
1495 | * do not use this in drivers | 1505 | * do not use this in drivers |
1496 | * @rx_nohandler: nohandler dropped packets by core network on | 1506 | * @rx_nohandler: nohandler dropped packets by core network on |
1497 | * inactive devices, do not use this in drivers | 1507 | * inactive devices, do not use this in drivers |
1508 | * @carrier_up_count: Number of times the carrier has been up | ||
1509 | * @carrier_down_count: Number of times the carrier has been down | ||
1498 | * | 1510 | * |
1499 | * @wireless_handlers: List of functions to handle Wireless Extensions, | 1511 | * @wireless_handlers: List of functions to handle Wireless Extensions, |
1500 | * instead of ioctl, | 1512 | * instead of ioctl, |
@@ -1669,8 +1681,6 @@ struct net_device { | |||
1669 | unsigned long base_addr; | 1681 | unsigned long base_addr; |
1670 | int irq; | 1682 | int irq; |
1671 | 1683 | ||
1672 | atomic_t carrier_changes; | ||
1673 | |||
1674 | /* | 1684 | /* |
1675 | * Some hardware also needs these fields (state,dev_list, | 1685 | * Some hardware also needs these fields (state,dev_list, |
1676 | * napi_list,unreg_list,close_list) but they are not | 1686 | * napi_list,unreg_list,close_list) but they are not |
@@ -1708,6 +1718,10 @@ struct net_device { | |||
1708 | atomic_long_t tx_dropped; | 1718 | atomic_long_t tx_dropped; |
1709 | atomic_long_t rx_nohandler; | 1719 | atomic_long_t rx_nohandler; |
1710 | 1720 | ||
1721 | /* Stats to monitor link on/off, flapping */ | ||
1722 | atomic_t carrier_up_count; | ||
1723 | atomic_t carrier_down_count; | ||
1724 | |||
1711 | #ifdef CONFIG_WIRELESS_EXT | 1725 | #ifdef CONFIG_WIRELESS_EXT |
1712 | const struct iw_handler_def *wireless_handlers; | 1726 | const struct iw_handler_def *wireless_handlers; |
1713 | struct iw_public_data *wireless_data; | 1727 | struct iw_public_data *wireless_data; |
@@ -1724,7 +1738,7 @@ struct net_device { | |||
1724 | const struct ndisc_ops *ndisc_ops; | 1738 | const struct ndisc_ops *ndisc_ops; |
1725 | #endif | 1739 | #endif |
1726 | 1740 | ||
1727 | #ifdef CONFIG_XFRM | 1741 | #ifdef CONFIG_XFRM_OFFLOAD |
1728 | const struct xfrmdev_ops *xfrmdev_ops; | 1742 | const struct xfrmdev_ops *xfrmdev_ops; |
1729 | #endif | 1743 | #endif |
1730 | 1744 | ||
@@ -1801,12 +1815,9 @@ struct net_device { | |||
1801 | /* Interface address info used in eth_type_trans() */ | 1815 | /* Interface address info used in eth_type_trans() */ |
1802 | unsigned char *dev_addr; | 1816 | unsigned char *dev_addr; |
1803 | 1817 | ||
1804 | #ifdef CONFIG_SYSFS | ||
1805 | struct netdev_rx_queue *_rx; | 1818 | struct netdev_rx_queue *_rx; |
1806 | |||
1807 | unsigned int num_rx_queues; | 1819 | unsigned int num_rx_queues; |
1808 | unsigned int real_num_rx_queues; | 1820 | unsigned int real_num_rx_queues; |
1809 | #endif | ||
1810 | 1821 | ||
1811 | struct bpf_prog __rcu *xdp_prog; | 1822 | struct bpf_prog __rcu *xdp_prog; |
1812 | unsigned long gro_flush_timeout; | 1823 | unsigned long gro_flush_timeout; |
@@ -2751,7 +2762,8 @@ static inline bool dev_validate_header(const struct net_device *dev, | |||
2751 | return false; | 2762 | return false; |
2752 | } | 2763 | } |
2753 | 2764 | ||
2754 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); | 2765 | typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, |
2766 | int len, int size); | ||
2755 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); | 2767 | int register_gifconf(unsigned int family, gifconf_func_t *gifconf); |
2756 | static inline int unregister_gifconf(unsigned int family) | 2768 | static inline int unregister_gifconf(unsigned int family) |
2757 | { | 2769 | { |
@@ -2791,7 +2803,9 @@ struct softnet_data { | |||
2791 | struct Qdisc *output_queue; | 2803 | struct Qdisc *output_queue; |
2792 | struct Qdisc **output_queue_tailp; | 2804 | struct Qdisc **output_queue_tailp; |
2793 | struct sk_buff *completion_queue; | 2805 | struct sk_buff *completion_queue; |
2794 | 2806 | #ifdef CONFIG_XFRM_OFFLOAD | |
2807 | struct sk_buff_head xfrm_backlog; | ||
2808 | #endif | ||
2795 | #ifdef CONFIG_RPS | 2809 | #ifdef CONFIG_RPS |
2796 | /* input_queue_head should be written by cpu owning this struct, | 2810 | /* input_queue_head should be written by cpu owning this struct, |
2797 | * and only read by other cpus. Worth using a cache line. | 2811 | * and only read by other cpus. Worth using a cache line. |
@@ -3214,6 +3228,12 @@ static inline int netif_set_real_num_rx_queues(struct net_device *dev, | |||
3214 | } | 3228 | } |
3215 | #endif | 3229 | #endif |
3216 | 3230 | ||
3231 | static inline struct netdev_rx_queue * | ||
3232 | __netif_get_rx_queue(struct net_device *dev, unsigned int rxq) | ||
3233 | { | ||
3234 | return dev->_rx + rxq; | ||
3235 | } | ||
3236 | |||
3217 | #ifdef CONFIG_SYSFS | 3237 | #ifdef CONFIG_SYSFS |
3218 | static inline unsigned int get_netdev_rx_queue_index( | 3238 | static inline unsigned int get_netdev_rx_queue_index( |
3219 | struct netdev_rx_queue *queue) | 3239 | struct netdev_rx_queue *queue) |
@@ -3302,7 +3322,9 @@ int netdev_rx_handler_register(struct net_device *dev, | |||
3302 | void netdev_rx_handler_unregister(struct net_device *dev); | 3322 | void netdev_rx_handler_unregister(struct net_device *dev); |
3303 | 3323 | ||
3304 | bool dev_valid_name(const char *name); | 3324 | bool dev_valid_name(const char *name); |
3305 | int dev_ioctl(struct net *net, unsigned int cmd, void __user *); | 3325 | int dev_ioctl(struct net *net, unsigned int cmd, struct ifreq *ifr, |
3326 | bool *need_copyout); | ||
3327 | int dev_ifconf(struct net *net, struct ifconf *, int); | ||
3306 | int dev_ethtool(struct net *net, struct ifreq *); | 3328 | int dev_ethtool(struct net *net, struct ifreq *); |
3307 | unsigned int dev_get_flags(const struct net_device *); | 3329 | unsigned int dev_get_flags(const struct net_device *); |
3308 | int __dev_change_flags(struct net_device *, unsigned int flags); | 3330 | int __dev_change_flags(struct net_device *, unsigned int flags); |
@@ -3315,6 +3337,7 @@ int dev_get_alias(const struct net_device *, char *, size_t); | |||
3315 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); | 3337 | int dev_change_net_namespace(struct net_device *, struct net *, const char *); |
3316 | int __dev_set_mtu(struct net_device *, int); | 3338 | int __dev_set_mtu(struct net_device *, int); |
3317 | int dev_set_mtu(struct net_device *, int); | 3339 | int dev_set_mtu(struct net_device *, int); |
3340 | int dev_change_tx_queue_len(struct net_device *, unsigned long); | ||
3318 | void dev_set_group(struct net_device *, int); | 3341 | void dev_set_group(struct net_device *, int); |
3319 | int dev_set_mac_address(struct net_device *, struct sockaddr *); | 3342 | int dev_set_mac_address(struct net_device *, struct sockaddr *); |
3320 | int dev_change_carrier(struct net_device *, bool new_carrier); | 3343 | int dev_change_carrier(struct net_device *, bool new_carrier); |
@@ -3323,14 +3346,15 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
3323 | int dev_get_phys_port_name(struct net_device *dev, | 3346 | int dev_get_phys_port_name(struct net_device *dev, |
3324 | char *name, size_t len); | 3347 | char *name, size_t len); |
3325 | int dev_change_proto_down(struct net_device *dev, bool proto_down); | 3348 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
3326 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev); | 3349 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
3327 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 3350 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
3328 | struct netdev_queue *txq, int *ret); | 3351 | struct netdev_queue *txq, int *ret); |
3329 | 3352 | ||
3330 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); | 3353 | typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf); |
3331 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, | 3354 | int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack, |
3332 | int fd, u32 flags); | 3355 | int fd, u32 flags); |
3333 | u8 __dev_xdp_attached(struct net_device *dev, bpf_op_t xdp_op, u32 *prog_id); | 3356 | void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op, |
3357 | struct netdev_bpf *xdp); | ||
3334 | 3358 | ||
3335 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3359 | int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
3336 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); | 3360 | int dev_forward_skb(struct net_device *dev, struct sk_buff *skb); |
@@ -4399,11 +4423,11 @@ do { \ | |||
4399 | * file/line information and a backtrace. | 4423 | * file/line information and a backtrace. |
4400 | */ | 4424 | */ |
4401 | #define netdev_WARN(dev, format, args...) \ | 4425 | #define netdev_WARN(dev, format, args...) \ |
4402 | WARN(1, "netdevice: %s%s\n" format, netdev_name(dev), \ | 4426 | WARN(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
4403 | netdev_reg_state(dev), ##args) | 4427 | netdev_reg_state(dev), ##args) |
4404 | 4428 | ||
4405 | #define netdev_WARN_ONCE(dev, condition, format, arg...) \ | 4429 | #define netdev_WARN_ONCE(dev, format, args...) \ |
4406 | WARN_ONCE(1, "netdevice: %s%s\n" format, netdev_name(dev) \ | 4430 | WARN_ONCE(1, "netdevice: %s%s: " format, netdev_name(dev), \ |
4407 | netdev_reg_state(dev), ##args) | 4431 | netdev_reg_state(dev), ##args) |
4408 | 4432 | ||
4409 | /* netif printk helpers, similar to netdev_printk */ | 4433 | /* netif printk helpers, similar to netdev_printk */ |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index b24e9b101651..85a1a0b32c66 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -67,6 +67,7 @@ struct nf_hook_ops { | |||
67 | struct net_device *dev; | 67 | struct net_device *dev; |
68 | void *priv; | 68 | void *priv; |
69 | u_int8_t pf; | 69 | u_int8_t pf; |
70 | bool nat_hook; | ||
70 | unsigned int hooknum; | 71 | unsigned int hooknum; |
71 | /* Hooks are ordered in ascending priority. */ | 72 | /* Hooks are ordered in ascending priority. */ |
72 | int priority; | 73 | int priority; |
@@ -77,17 +78,28 @@ struct nf_hook_entry { | |||
77 | void *priv; | 78 | void *priv; |
78 | }; | 79 | }; |
79 | 80 | ||
81 | struct nf_hook_entries_rcu_head { | ||
82 | struct rcu_head head; | ||
83 | void *allocation; | ||
84 | }; | ||
85 | |||
80 | struct nf_hook_entries { | 86 | struct nf_hook_entries { |
81 | u16 num_hook_entries; | 87 | u16 num_hook_entries; |
82 | /* padding */ | 88 | /* padding */ |
83 | struct nf_hook_entry hooks[]; | 89 | struct nf_hook_entry hooks[]; |
84 | 90 | ||
85 | /* trailer: pointers to original orig_ops of each hook. | 91 | /* trailer: pointers to original orig_ops of each hook, |
86 | * | 92 | * followed by rcu_head and scratch space used for freeing |
87 | * This is not part of struct nf_hook_entry since its only | 93 | * the structure via call_rcu. |
88 | * needed in slow path (hook register/unregister). | ||
89 | * | 94 | * |
95 | * This is not part of struct nf_hook_entry since its only | ||
96 | * needed in slow path (hook register/unregister): | ||
90 | * const struct nf_hook_ops *orig_ops[] | 97 | * const struct nf_hook_ops *orig_ops[] |
98 | * | ||
99 | * For the same reason, we store this at end -- its | ||
100 | * only needed when a hook is deleted, not during | ||
101 | * packet path processing: | ||
102 | * struct nf_hook_entries_rcu_head head | ||
91 | */ | 103 | */ |
92 | }; | 104 | }; |
93 | 105 | ||
@@ -184,7 +196,7 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, | |||
184 | struct net_device *indev, struct net_device *outdev, | 196 | struct net_device *indev, struct net_device *outdev, |
185 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) | 197 | int (*okfn)(struct net *, struct sock *, struct sk_buff *)) |
186 | { | 198 | { |
187 | struct nf_hook_entries *hook_head; | 199 | struct nf_hook_entries *hook_head = NULL; |
188 | int ret = 1; | 200 | int ret = 1; |
189 | 201 | ||
190 | #ifdef HAVE_JUMP_LABEL | 202 | #ifdef HAVE_JUMP_LABEL |
@@ -195,7 +207,33 @@ static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net, | |||
195 | #endif | 207 | #endif |
196 | 208 | ||
197 | rcu_read_lock(); | 209 | rcu_read_lock(); |
198 | hook_head = rcu_dereference(net->nf.hooks[pf][hook]); | 210 | switch (pf) { |
211 | case NFPROTO_IPV4: | ||
212 | hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]); | ||
213 | break; | ||
214 | case NFPROTO_IPV6: | ||
215 | hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]); | ||
216 | break; | ||
217 | case NFPROTO_ARP: | ||
218 | #ifdef CONFIG_NETFILTER_FAMILY_ARP | ||
219 | hook_head = rcu_dereference(net->nf.hooks_arp[hook]); | ||
220 | #endif | ||
221 | break; | ||
222 | case NFPROTO_BRIDGE: | ||
223 | #ifdef CONFIG_NETFILTER_FAMILY_BRIDGE | ||
224 | hook_head = rcu_dereference(net->nf.hooks_bridge[hook]); | ||
225 | #endif | ||
226 | break; | ||
227 | #if IS_ENABLED(CONFIG_DECNET) | ||
228 | case NFPROTO_DECNET: | ||
229 | hook_head = rcu_dereference(net->nf.hooks_decnet[hook]); | ||
230 | break; | ||
231 | #endif | ||
232 | default: | ||
233 | WARN_ON_ONCE(1); | ||
234 | break; | ||
235 | } | ||
236 | |||
199 | if (hook_head) { | 237 | if (hook_head) { |
200 | struct nf_hook_state state; | 238 | struct nf_hook_state state; |
201 | 239 | ||
@@ -271,64 +309,16 @@ int skb_make_writable(struct sk_buff *skb, unsigned int writable_len); | |||
271 | struct flowi; | 309 | struct flowi; |
272 | struct nf_queue_entry; | 310 | struct nf_queue_entry; |
273 | 311 | ||
274 | struct nf_afinfo { | 312 | __sum16 nf_checksum(struct sk_buff *skb, unsigned int hook, |
275 | unsigned short family; | 313 | unsigned int dataoff, u_int8_t protocol, |
276 | __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, | 314 | unsigned short family); |
277 | unsigned int dataoff, u_int8_t protocol); | ||
278 | __sum16 (*checksum_partial)(struct sk_buff *skb, | ||
279 | unsigned int hook, | ||
280 | unsigned int dataoff, | ||
281 | unsigned int len, | ||
282 | u_int8_t protocol); | ||
283 | int (*route)(struct net *net, struct dst_entry **dst, | ||
284 | struct flowi *fl, bool strict); | ||
285 | void (*saveroute)(const struct sk_buff *skb, | ||
286 | struct nf_queue_entry *entry); | ||
287 | int (*reroute)(struct net *net, struct sk_buff *skb, | ||
288 | const struct nf_queue_entry *entry); | ||
289 | int route_key_size; | ||
290 | }; | ||
291 | |||
292 | extern const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO]; | ||
293 | static inline const struct nf_afinfo *nf_get_afinfo(unsigned short family) | ||
294 | { | ||
295 | return rcu_dereference(nf_afinfo[family]); | ||
296 | } | ||
297 | |||
298 | static inline __sum16 | ||
299 | nf_checksum(struct sk_buff *skb, unsigned int hook, unsigned int dataoff, | ||
300 | u_int8_t protocol, unsigned short family) | ||
301 | { | ||
302 | const struct nf_afinfo *afinfo; | ||
303 | __sum16 csum = 0; | ||
304 | |||
305 | rcu_read_lock(); | ||
306 | afinfo = nf_get_afinfo(family); | ||
307 | if (afinfo) | ||
308 | csum = afinfo->checksum(skb, hook, dataoff, protocol); | ||
309 | rcu_read_unlock(); | ||
310 | return csum; | ||
311 | } | ||
312 | |||
313 | static inline __sum16 | ||
314 | nf_checksum_partial(struct sk_buff *skb, unsigned int hook, | ||
315 | unsigned int dataoff, unsigned int len, | ||
316 | u_int8_t protocol, unsigned short family) | ||
317 | { | ||
318 | const struct nf_afinfo *afinfo; | ||
319 | __sum16 csum = 0; | ||
320 | |||
321 | rcu_read_lock(); | ||
322 | afinfo = nf_get_afinfo(family); | ||
323 | if (afinfo) | ||
324 | csum = afinfo->checksum_partial(skb, hook, dataoff, len, | ||
325 | protocol); | ||
326 | rcu_read_unlock(); | ||
327 | return csum; | ||
328 | } | ||
329 | 315 | ||
330 | int nf_register_afinfo(const struct nf_afinfo *afinfo); | 316 | __sum16 nf_checksum_partial(struct sk_buff *skb, unsigned int hook, |
331 | void nf_unregister_afinfo(const struct nf_afinfo *afinfo); | 317 | unsigned int dataoff, unsigned int len, |
318 | u_int8_t protocol, unsigned short family); | ||
319 | int nf_route(struct net *net, struct dst_entry **dst, struct flowi *fl, | ||
320 | bool strict, unsigned short family); | ||
321 | int nf_reroute(struct sk_buff *skb, struct nf_queue_entry *entry); | ||
332 | 322 | ||
333 | #include <net/flow.h> | 323 | #include <net/flow.h> |
334 | extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); | 324 | extern void (*nf_nat_decode_session_hook)(struct sk_buff *, struct flowi *); |
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h index 8e42253e5d4d..34fc80f3eb90 100644 --- a/include/linux/netfilter/ipset/ip_set.h +++ b/include/linux/netfilter/ipset/ip_set.h | |||
@@ -122,6 +122,8 @@ struct ip_set_ext { | |||
122 | u64 bytes; | 122 | u64 bytes; |
123 | char *comment; | 123 | char *comment; |
124 | u32 timeout; | 124 | u32 timeout; |
125 | u8 packets_op; | ||
126 | u8 bytes_op; | ||
125 | }; | 127 | }; |
126 | 128 | ||
127 | struct ip_set; | 129 | struct ip_set; |
@@ -339,6 +341,10 @@ extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[], | |||
339 | struct ip_set_ext *ext); | 341 | struct ip_set_ext *ext); |
340 | extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, | 342 | extern int ip_set_put_extensions(struct sk_buff *skb, const struct ip_set *set, |
341 | const void *e, bool active); | 343 | const void *e, bool active); |
344 | extern bool ip_set_match_extensions(struct ip_set *set, | ||
345 | const struct ip_set_ext *ext, | ||
346 | struct ip_set_ext *mext, | ||
347 | u32 flags, void *data); | ||
342 | 348 | ||
343 | static inline int | 349 | static inline int |
344 | ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) | 350 | ip_set_get_hostipaddr4(struct nlattr *nla, u32 *ipaddr) |
diff --git a/include/linux/netfilter/ipset/ip_set_counter.h b/include/linux/netfilter/ipset/ip_set_counter.h index bb6fba480118..3d33a2c3f39f 100644 --- a/include/linux/netfilter/ipset/ip_set_counter.h +++ b/include/linux/netfilter/ipset/ip_set_counter.h | |||
@@ -34,20 +34,33 @@ ip_set_get_packets(const struct ip_set_counter *counter) | |||
34 | return (u64)atomic64_read(&(counter)->packets); | 34 | return (u64)atomic64_read(&(counter)->packets); |
35 | } | 35 | } |
36 | 36 | ||
37 | static inline bool | ||
38 | ip_set_match_counter(u64 counter, u64 match, u8 op) | ||
39 | { | ||
40 | switch (op) { | ||
41 | case IPSET_COUNTER_NONE: | ||
42 | return true; | ||
43 | case IPSET_COUNTER_EQ: | ||
44 | return counter == match; | ||
45 | case IPSET_COUNTER_NE: | ||
46 | return counter != match; | ||
47 | case IPSET_COUNTER_LT: | ||
48 | return counter < match; | ||
49 | case IPSET_COUNTER_GT: | ||
50 | return counter > match; | ||
51 | } | ||
52 | return false; | ||
53 | } | ||
54 | |||
37 | static inline void | 55 | static inline void |
38 | ip_set_update_counter(struct ip_set_counter *counter, | 56 | ip_set_update_counter(struct ip_set_counter *counter, |
39 | const struct ip_set_ext *ext, | 57 | const struct ip_set_ext *ext, u32 flags) |
40 | struct ip_set_ext *mext, u32 flags) | ||
41 | { | 58 | { |
42 | if (ext->packets != ULLONG_MAX && | 59 | if (ext->packets != ULLONG_MAX && |
43 | !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { | 60 | !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) { |
44 | ip_set_add_bytes(ext->bytes, counter); | 61 | ip_set_add_bytes(ext->bytes, counter); |
45 | ip_set_add_packets(ext->packets, counter); | 62 | ip_set_add_packets(ext->packets, counter); |
46 | } | 63 | } |
47 | if (flags & IPSET_FLAG_MATCH_COUNTERS) { | ||
48 | mext->packets = ip_set_get_packets(counter); | ||
49 | mext->bytes = ip_set_get_bytes(counter); | ||
50 | } | ||
51 | } | 64 | } |
52 | 65 | ||
53 | static inline bool | 66 | static inline bool |
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 495ba4dd9da5..34551f8aaf9d 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h | |||
@@ -67,8 +67,7 @@ static inline bool lockdep_nfnl_is_held(__u8 subsys_id) | |||
67 | * @ss: The nfnetlink subsystem ID | 67 | * @ss: The nfnetlink subsystem ID |
68 | * | 68 | * |
69 | * Return the value of the specified RCU-protected pointer, but omit | 69 | * Return the value of the specified RCU-protected pointer, but omit |
70 | * both the smp_read_barrier_depends() and the READ_ONCE(), because | 70 | * the READ_ONCE(), because caller holds the NFNL subsystem mutex. |
71 | * caller holds the NFNL subsystem mutex. | ||
72 | */ | 71 | */ |
73 | #define nfnl_dereference(p, ss) \ | 72 | #define nfnl_dereference(p, ss) \ |
74 | rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) | 73 | rcu_dereference_protected(p, lockdep_nfnl_is_held(ss)) |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 33f7530f96b9..1313b35c3ab7 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
@@ -320,6 +320,8 @@ int xt_find_revision(u8 af, const char *name, u8 revision, int target, | |||
320 | 320 | ||
321 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, | 321 | struct xt_table *xt_find_table_lock(struct net *net, u_int8_t af, |
322 | const char *name); | 322 | const char *name); |
323 | struct xt_table *xt_request_find_table_lock(struct net *net, u_int8_t af, | ||
324 | const char *name); | ||
323 | void xt_table_unlock(struct xt_table *t); | 325 | void xt_table_unlock(struct xt_table *t); |
324 | 326 | ||
325 | int xt_proto_init(struct net *net, u_int8_t af); | 327 | int xt_proto_init(struct net *net, u_int8_t af); |
diff --git a/include/linux/netfilter_defs.h b/include/linux/netfilter_defs.h index dc6111adea06..8dddfb151f00 100644 --- a/include/linux/netfilter_defs.h +++ b/include/linux/netfilter_defs.h | |||
@@ -4,7 +4,17 @@ | |||
4 | 4 | ||
5 | #include <uapi/linux/netfilter.h> | 5 | #include <uapi/linux/netfilter.h> |
6 | 6 | ||
7 | /* in/out/forward only */ | ||
8 | #define NF_ARP_NUMHOOKS 3 | ||
9 | |||
10 | /* max hook is NF_DN_ROUTE (6), also see uapi/linux/netfilter_decnet.h */ | ||
11 | #define NF_DN_NUMHOOKS 7 | ||
12 | |||
13 | #if IS_ENABLED(CONFIG_DECNET) | ||
7 | /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ | 14 | /* Largest hook number + 1, see uapi/linux/netfilter_decnet.h */ |
8 | #define NF_MAX_HOOKS 8 | 15 | #define NF_MAX_HOOKS NF_DN_NUMHOOKS |
16 | #else | ||
17 | #define NF_MAX_HOOKS NF_INET_NUMHOOKS | ||
18 | #endif | ||
9 | 19 | ||
10 | #endif | 20 | #endif |
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 98c03b2462b5..b31dabfdb453 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h | |||
@@ -6,7 +6,53 @@ | |||
6 | 6 | ||
7 | #include <uapi/linux/netfilter_ipv4.h> | 7 | #include <uapi/linux/netfilter_ipv4.h> |
8 | 8 | ||
9 | /* Extra routing may needed on local out, as the QUEUE target never returns | ||
10 | * control to the table. | ||
11 | */ | ||
12 | struct ip_rt_info { | ||
13 | __be32 daddr; | ||
14 | __be32 saddr; | ||
15 | u_int8_t tos; | ||
16 | u_int32_t mark; | ||
17 | }; | ||
18 | |||
9 | int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); | 19 | int ip_route_me_harder(struct net *net, struct sk_buff *skb, unsigned addr_type); |
20 | |||
21 | struct nf_queue_entry; | ||
22 | |||
23 | #ifdef CONFIG_INET | ||
10 | __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | 24 | __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, |
11 | unsigned int dataoff, u_int8_t protocol); | 25 | unsigned int dataoff, u_int8_t protocol); |
26 | __sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook, | ||
27 | unsigned int dataoff, unsigned int len, | ||
28 | u_int8_t protocol); | ||
29 | int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, | ||
30 | bool strict); | ||
31 | int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); | ||
32 | #else | ||
33 | static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | ||
34 | unsigned int dataoff, u_int8_t protocol) | ||
35 | { | ||
36 | return 0; | ||
37 | } | ||
38 | static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb, | ||
39 | unsigned int hook, | ||
40 | unsigned int dataoff, | ||
41 | unsigned int len, | ||
42 | u_int8_t protocol) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | static inline int nf_ip_route(struct net *net, struct dst_entry **dst, | ||
47 | struct flowi *fl, bool strict) | ||
48 | { | ||
49 | return -EOPNOTSUPP; | ||
50 | } | ||
51 | static inline int nf_ip_reroute(struct sk_buff *skb, | ||
52 | const struct nf_queue_entry *entry) | ||
53 | { | ||
54 | return -EOPNOTSUPP; | ||
55 | } | ||
56 | #endif /* CONFIG_INET */ | ||
57 | |||
12 | #endif /*__LINUX_IP_NETFILTER_H*/ | 58 | #endif /*__LINUX_IP_NETFILTER_H*/ |
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index 47c6b04c28c0..288c597e75b3 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h | |||
@@ -9,6 +9,17 @@ | |||
9 | 9 | ||
10 | #include <uapi/linux/netfilter_ipv6.h> | 10 | #include <uapi/linux/netfilter_ipv6.h> |
11 | 11 | ||
12 | /* Extra routing may needed on local out, as the QUEUE target never returns | ||
13 | * control to the table. | ||
14 | */ | ||
15 | struct ip6_rt_info { | ||
16 | struct in6_addr daddr; | ||
17 | struct in6_addr saddr; | ||
18 | u_int32_t mark; | ||
19 | }; | ||
20 | |||
21 | struct nf_queue_entry; | ||
22 | |||
12 | /* | 23 | /* |
13 | * Hook functions for ipv6 to allow xt_* modules to be built-in even | 24 | * Hook functions for ipv6 to allow xt_* modules to be built-in even |
14 | * if IPv6 is a module. | 25 | * if IPv6 is a module. |
@@ -19,6 +30,14 @@ struct nf_ipv6_ops { | |||
19 | void (*route_input)(struct sk_buff *skb); | 30 | void (*route_input)(struct sk_buff *skb); |
20 | int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, | 31 | int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, |
21 | int (*output)(struct net *, struct sock *, struct sk_buff *)); | 32 | int (*output)(struct net *, struct sock *, struct sk_buff *)); |
33 | __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook, | ||
34 | unsigned int dataoff, u_int8_t protocol); | ||
35 | __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook, | ||
36 | unsigned int dataoff, unsigned int len, | ||
37 | u_int8_t protocol); | ||
38 | int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, | ||
39 | bool strict); | ||
40 | int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); | ||
22 | }; | 41 | }; |
23 | 42 | ||
24 | #ifdef CONFIG_NETFILTER | 43 | #ifdef CONFIG_NETFILTER |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 47adac640191..57ffaa20d564 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
@@ -457,7 +457,12 @@ enum lock_type4 { | |||
457 | 457 | ||
458 | #define NFS4_DEBUG 1 | 458 | #define NFS4_DEBUG 1 |
459 | 459 | ||
460 | /* Index of predefined Linux client operations */ | 460 | /* |
461 | * Index of predefined Linux client operations | ||
462 | * | ||
463 | * To ensure that /proc/net/rpc/nfs remains correctly ordered, please | ||
464 | * append only to this enum when adding new client operations. | ||
465 | */ | ||
461 | 466 | ||
462 | enum { | 467 | enum { |
463 | NFSPROC4_CLNT_NULL = 0, /* Unused */ | 468 | NFSPROC4_CLNT_NULL = 0, /* Unused */ |
@@ -480,7 +485,6 @@ enum { | |||
480 | NFSPROC4_CLNT_ACCESS, | 485 | NFSPROC4_CLNT_ACCESS, |
481 | NFSPROC4_CLNT_GETATTR, | 486 | NFSPROC4_CLNT_GETATTR, |
482 | NFSPROC4_CLNT_LOOKUP, | 487 | NFSPROC4_CLNT_LOOKUP, |
483 | NFSPROC4_CLNT_LOOKUPP, | ||
484 | NFSPROC4_CLNT_LOOKUP_ROOT, | 488 | NFSPROC4_CLNT_LOOKUP_ROOT, |
485 | NFSPROC4_CLNT_REMOVE, | 489 | NFSPROC4_CLNT_REMOVE, |
486 | NFSPROC4_CLNT_RENAME, | 490 | NFSPROC4_CLNT_RENAME, |
@@ -500,7 +504,6 @@ enum { | |||
500 | NFSPROC4_CLNT_SECINFO, | 504 | NFSPROC4_CLNT_SECINFO, |
501 | NFSPROC4_CLNT_FSID_PRESENT, | 505 | NFSPROC4_CLNT_FSID_PRESENT, |
502 | 506 | ||
503 | /* nfs41 */ | ||
504 | NFSPROC4_CLNT_EXCHANGE_ID, | 507 | NFSPROC4_CLNT_EXCHANGE_ID, |
505 | NFSPROC4_CLNT_CREATE_SESSION, | 508 | NFSPROC4_CLNT_CREATE_SESSION, |
506 | NFSPROC4_CLNT_DESTROY_SESSION, | 509 | NFSPROC4_CLNT_DESTROY_SESSION, |
@@ -518,13 +521,14 @@ enum { | |||
518 | NFSPROC4_CLNT_BIND_CONN_TO_SESSION, | 521 | NFSPROC4_CLNT_BIND_CONN_TO_SESSION, |
519 | NFSPROC4_CLNT_DESTROY_CLIENTID, | 522 | NFSPROC4_CLNT_DESTROY_CLIENTID, |
520 | 523 | ||
521 | /* nfs42 */ | ||
522 | NFSPROC4_CLNT_SEEK, | 524 | NFSPROC4_CLNT_SEEK, |
523 | NFSPROC4_CLNT_ALLOCATE, | 525 | NFSPROC4_CLNT_ALLOCATE, |
524 | NFSPROC4_CLNT_DEALLOCATE, | 526 | NFSPROC4_CLNT_DEALLOCATE, |
525 | NFSPROC4_CLNT_LAYOUTSTATS, | 527 | NFSPROC4_CLNT_LAYOUTSTATS, |
526 | NFSPROC4_CLNT_CLONE, | 528 | NFSPROC4_CLNT_CLONE, |
527 | NFSPROC4_CLNT_COPY, | 529 | NFSPROC4_CLNT_COPY, |
530 | |||
531 | NFSPROC4_CLNT_LOOKUPP, | ||
528 | }; | 532 | }; |
529 | 533 | ||
530 | /* nfs41 types */ | 534 | /* nfs41 types */ |
diff --git a/include/linux/nospec.h b/include/linux/nospec.h new file mode 100644 index 000000000000..b99bced39ac2 --- /dev/null +++ b/include/linux/nospec.h | |||
@@ -0,0 +1,72 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright(c) 2018 Linus Torvalds. All rights reserved. | ||
3 | // Copyright(c) 2018 Alexei Starovoitov. All rights reserved. | ||
4 | // Copyright(c) 2018 Intel Corporation. All rights reserved. | ||
5 | |||
6 | #ifndef _LINUX_NOSPEC_H | ||
7 | #define _LINUX_NOSPEC_H | ||
8 | |||
9 | /** | ||
10 | * array_index_mask_nospec() - generate a ~0 mask when index < size, 0 otherwise | ||
11 | * @index: array element index | ||
12 | * @size: number of elements in array | ||
13 | * | ||
14 | * When @index is out of bounds (@index >= @size), the sign bit will be | ||
15 | * set. Extend the sign bit to all bits and invert, giving a result of | ||
16 | * zero for an out of bounds index, or ~0 if within bounds [0, @size). | ||
17 | */ | ||
18 | #ifndef array_index_mask_nospec | ||
19 | static inline unsigned long array_index_mask_nospec(unsigned long index, | ||
20 | unsigned long size) | ||
21 | { | ||
22 | /* | ||
23 | * Warn developers about inappropriate array_index_nospec() usage. | ||
24 | * | ||
25 | * Even if the CPU speculates past the WARN_ONCE branch, the | ||
26 | * sign bit of @index is taken into account when generating the | ||
27 | * mask. | ||
28 | * | ||
29 | * This warning is compiled out when the compiler can infer that | ||
30 | * @index and @size are less than LONG_MAX. | ||
31 | */ | ||
32 | if (WARN_ONCE(index > LONG_MAX || size > LONG_MAX, | ||
33 | "array_index_nospec() limited to range of [0, LONG_MAX]\n")) | ||
34 | return 0; | ||
35 | |||
36 | /* | ||
37 | * Always calculate and emit the mask even if the compiler | ||
38 | * thinks the mask is not needed. The compiler does not take | ||
39 | * into account the value of @index under speculation. | ||
40 | */ | ||
41 | OPTIMIZER_HIDE_VAR(index); | ||
42 | return ~(long)(index | (size - 1UL - index)) >> (BITS_PER_LONG - 1); | ||
43 | } | ||
44 | #endif | ||
45 | |||
46 | /* | ||
47 | * array_index_nospec - sanitize an array index after a bounds check | ||
48 | * | ||
49 | * For a code sequence like: | ||
50 | * | ||
51 | * if (index < size) { | ||
52 | * index = array_index_nospec(index, size); | ||
53 | * val = array[index]; | ||
54 | * } | ||
55 | * | ||
56 | * ...if the CPU speculates past the bounds check then | ||
57 | * array_index_nospec() will clamp the index within the range of [0, | ||
58 | * size). | ||
59 | */ | ||
60 | #define array_index_nospec(index, size) \ | ||
61 | ({ \ | ||
62 | typeof(index) _i = (index); \ | ||
63 | typeof(size) _s = (size); \ | ||
64 | unsigned long _mask = array_index_mask_nospec(_i, _s); \ | ||
65 | \ | ||
66 | BUILD_BUG_ON(sizeof(_i) > sizeof(long)); \ | ||
67 | BUILD_BUG_ON(sizeof(_s) > sizeof(long)); \ | ||
68 | \ | ||
69 | _i &= _mask; \ | ||
70 | _i; \ | ||
71 | }) | ||
72 | #endif /* _LINUX_NOSPEC_H */ | ||
diff --git a/include/linux/ntb.h b/include/linux/ntb.h index c308964777eb..181d16601dd9 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h | |||
@@ -71,6 +71,7 @@ struct pci_dev; | |||
71 | * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. | 71 | * @NTB_TOPO_B2B_USD: On primary side of local ntb upstream of remote ntb. |
72 | * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. | 72 | * @NTB_TOPO_B2B_DSD: On primary side of local ntb downstream of remote ntb. |
73 | * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. | 73 | * @NTB_TOPO_SWITCH: Connected via a switch which supports ntb. |
74 | * @NTB_TOPO_CROSSLINK: Connected via two symmetric switchecs | ||
74 | */ | 75 | */ |
75 | enum ntb_topo { | 76 | enum ntb_topo { |
76 | NTB_TOPO_NONE = -1, | 77 | NTB_TOPO_NONE = -1, |
@@ -79,6 +80,7 @@ enum ntb_topo { | |||
79 | NTB_TOPO_B2B_USD, | 80 | NTB_TOPO_B2B_USD, |
80 | NTB_TOPO_B2B_DSD, | 81 | NTB_TOPO_B2B_DSD, |
81 | NTB_TOPO_SWITCH, | 82 | NTB_TOPO_SWITCH, |
83 | NTB_TOPO_CROSSLINK, | ||
82 | }; | 84 | }; |
83 | 85 | ||
84 | static inline int ntb_topo_is_b2b(enum ntb_topo topo) | 86 | static inline int ntb_topo_is_b2b(enum ntb_topo topo) |
@@ -94,12 +96,13 @@ static inline int ntb_topo_is_b2b(enum ntb_topo topo) | |||
94 | static inline char *ntb_topo_string(enum ntb_topo topo) | 96 | static inline char *ntb_topo_string(enum ntb_topo topo) |
95 | { | 97 | { |
96 | switch (topo) { | 98 | switch (topo) { |
97 | case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; | 99 | case NTB_TOPO_NONE: return "NTB_TOPO_NONE"; |
98 | case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; | 100 | case NTB_TOPO_PRI: return "NTB_TOPO_PRI"; |
99 | case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; | 101 | case NTB_TOPO_SEC: return "NTB_TOPO_SEC"; |
100 | case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; | 102 | case NTB_TOPO_B2B_USD: return "NTB_TOPO_B2B_USD"; |
101 | case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; | 103 | case NTB_TOPO_B2B_DSD: return "NTB_TOPO_B2B_DSD"; |
102 | case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; | 104 | case NTB_TOPO_SWITCH: return "NTB_TOPO_SWITCH"; |
105 | case NTB_TOPO_CROSSLINK: return "NTB_TOPO_CROSSLINK"; | ||
103 | } | 106 | } |
104 | return "NTB_TOPO_INVALID"; | 107 | return "NTB_TOPO_INVALID"; |
105 | } | 108 | } |
@@ -250,7 +253,7 @@ static inline int ntb_ctx_ops_is_valid(const struct ntb_ctx_ops *ops) | |||
250 | * @msg_set_mask: See ntb_msg_set_mask(). | 253 | * @msg_set_mask: See ntb_msg_set_mask(). |
251 | * @msg_clear_mask: See ntb_msg_clear_mask(). | 254 | * @msg_clear_mask: See ntb_msg_clear_mask(). |
252 | * @msg_read: See ntb_msg_read(). | 255 | * @msg_read: See ntb_msg_read(). |
253 | * @msg_write: See ntb_msg_write(). | 256 | * @peer_msg_write: See ntb_peer_msg_write(). |
254 | */ | 257 | */ |
255 | struct ntb_dev_ops { | 258 | struct ntb_dev_ops { |
256 | int (*port_number)(struct ntb_dev *ntb); | 259 | int (*port_number)(struct ntb_dev *ntb); |
@@ -321,8 +324,8 @@ struct ntb_dev_ops { | |||
321 | int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); | 324 | int (*msg_clear_sts)(struct ntb_dev *ntb, u64 sts_bits); |
322 | int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); | 325 | int (*msg_set_mask)(struct ntb_dev *ntb, u64 mask_bits); |
323 | int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); | 326 | int (*msg_clear_mask)(struct ntb_dev *ntb, u64 mask_bits); |
324 | int (*msg_read)(struct ntb_dev *ntb, int midx, int *pidx, u32 *msg); | 327 | u32 (*msg_read)(struct ntb_dev *ntb, int *pidx, int midx); |
325 | int (*msg_write)(struct ntb_dev *ntb, int midx, int pidx, u32 msg); | 328 | int (*peer_msg_write)(struct ntb_dev *ntb, int pidx, int midx, u32 msg); |
326 | }; | 329 | }; |
327 | 330 | ||
328 | static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) | 331 | static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) |
@@ -384,7 +387,7 @@ static inline int ntb_dev_ops_is_valid(const struct ntb_dev_ops *ops) | |||
384 | /* !ops->msg_set_mask == !ops->msg_count && */ | 387 | /* !ops->msg_set_mask == !ops->msg_count && */ |
385 | /* !ops->msg_clear_mask == !ops->msg_count && */ | 388 | /* !ops->msg_clear_mask == !ops->msg_count && */ |
386 | !ops->msg_read == !ops->msg_count && | 389 | !ops->msg_read == !ops->msg_count && |
387 | !ops->msg_write == !ops->msg_count && | 390 | !ops->peer_msg_write == !ops->msg_count && |
388 | 1; | 391 | 1; |
389 | } | 392 | } |
390 | 393 | ||
@@ -764,7 +767,7 @@ static inline int ntb_mw_get_align(struct ntb_dev *ntb, int pidx, int widx, | |||
764 | resource_size_t *size_align, | 767 | resource_size_t *size_align, |
765 | resource_size_t *size_max) | 768 | resource_size_t *size_max) |
766 | { | 769 | { |
767 | if (!(ntb_link_is_up(ntb, NULL, NULL) & (1 << pidx))) | 770 | if (!(ntb_link_is_up(ntb, NULL, NULL) & BIT_ULL(pidx))) |
768 | return -ENOTCONN; | 771 | return -ENOTCONN; |
769 | 772 | ||
770 | return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, | 773 | return ntb->ops->mw_get_align(ntb, pidx, widx, addr_align, size_align, |
@@ -1459,31 +1462,29 @@ static inline int ntb_msg_clear_mask(struct ntb_dev *ntb, u64 mask_bits) | |||
1459 | } | 1462 | } |
1460 | 1463 | ||
1461 | /** | 1464 | /** |
1462 | * ntb_msg_read() - read message register with specified index | 1465 | * ntb_msg_read() - read inbound message register with specified index |
1463 | * @ntb: NTB device context. | 1466 | * @ntb: NTB device context. |
1464 | * @midx: Message register index | ||
1465 | * @pidx: OUT - Port index of peer device a message retrieved from | 1467 | * @pidx: OUT - Port index of peer device a message retrieved from |
1466 | * @msg: OUT - Data | 1468 | * @midx: Message register index |
1467 | * | 1469 | * |
1468 | * Read data from the specified message register. Source port index of a | 1470 | * Read data from the specified message register. Source port index of a |
1469 | * message is retrieved as well. | 1471 | * message is retrieved as well. |
1470 | * | 1472 | * |
1471 | * Return: Zero on success, otherwise a negative error number. | 1473 | * Return: The value of the inbound message register. |
1472 | */ | 1474 | */ |
1473 | static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, | 1475 | static inline u32 ntb_msg_read(struct ntb_dev *ntb, int *pidx, int midx) |
1474 | u32 *msg) | ||
1475 | { | 1476 | { |
1476 | if (!ntb->ops->msg_read) | 1477 | if (!ntb->ops->msg_read) |
1477 | return -EINVAL; | 1478 | return ~(u32)0; |
1478 | 1479 | ||
1479 | return ntb->ops->msg_read(ntb, midx, pidx, msg); | 1480 | return ntb->ops->msg_read(ntb, pidx, midx); |
1480 | } | 1481 | } |
1481 | 1482 | ||
1482 | /** | 1483 | /** |
1483 | * ntb_msg_write() - write data to the specified message register | 1484 | * ntb_peer_msg_write() - write data to the specified peer message register |
1484 | * @ntb: NTB device context. | 1485 | * @ntb: NTB device context. |
1485 | * @midx: Message register index | ||
1486 | * @pidx: Port index of peer device a message being sent to | 1486 | * @pidx: Port index of peer device a message being sent to |
1487 | * @midx: Message register index | ||
1487 | * @msg: Data to send | 1488 | * @msg: Data to send |
1488 | * | 1489 | * |
1489 | * Send data to a specified peer device using the defined message register. | 1490 | * Send data to a specified peer device using the defined message register. |
@@ -1492,13 +1493,13 @@ static inline int ntb_msg_read(struct ntb_dev *ntb, int midx, int *pidx, | |||
1492 | * | 1493 | * |
1493 | * Return: Zero on success, otherwise a negative error number. | 1494 | * Return: Zero on success, otherwise a negative error number. |
1494 | */ | 1495 | */ |
1495 | static inline int ntb_msg_write(struct ntb_dev *ntb, int midx, int pidx, | 1496 | static inline int ntb_peer_msg_write(struct ntb_dev *ntb, int pidx, int midx, |
1496 | u32 msg) | 1497 | u32 msg) |
1497 | { | 1498 | { |
1498 | if (!ntb->ops->msg_write) | 1499 | if (!ntb->ops->peer_msg_write) |
1499 | return -EINVAL; | 1500 | return -EINVAL; |
1500 | 1501 | ||
1501 | return ntb->ops->msg_write(ntb, midx, pidx, msg); | 1502 | return ntb->ops->peer_msg_write(ntb, pidx, midx, msg); |
1502 | } | 1503 | } |
1503 | 1504 | ||
1504 | #endif | 1505 | #endif |
diff --git a/include/linux/nubus.h b/include/linux/nubus.h index 11ce6b1117a8..6e8200215321 100644 --- a/include/linux/nubus.h +++ b/include/linux/nubus.h | |||
@@ -5,20 +5,36 @@ | |||
5 | Originally written by Alan Cox. | 5 | Originally written by Alan Cox. |
6 | 6 | ||
7 | Hacked to death by C. Scott Ananian and David Huggins-Daines. | 7 | Hacked to death by C. Scott Ananian and David Huggins-Daines. |
8 | 8 | */ | |
9 | Some of the constants in here are from the corresponding | 9 | |
10 | NetBSD/OpenBSD header file, by Allen Briggs. We figured out the | ||
11 | rest of them on our own. */ | ||
12 | #ifndef LINUX_NUBUS_H | 10 | #ifndef LINUX_NUBUS_H |
13 | #define LINUX_NUBUS_H | 11 | #define LINUX_NUBUS_H |
14 | 12 | ||
13 | #include <linux/device.h> | ||
15 | #include <asm/nubus.h> | 14 | #include <asm/nubus.h> |
16 | #include <uapi/linux/nubus.h> | 15 | #include <uapi/linux/nubus.h> |
17 | 16 | ||
17 | struct proc_dir_entry; | ||
18 | struct seq_file; | ||
19 | |||
20 | struct nubus_dir { | ||
21 | unsigned char *base; | ||
22 | unsigned char *ptr; | ||
23 | int done; | ||
24 | int mask; | ||
25 | struct proc_dir_entry *procdir; | ||
26 | }; | ||
27 | |||
28 | struct nubus_dirent { | ||
29 | unsigned char *base; | ||
30 | unsigned char type; | ||
31 | __u32 data; /* Actually 24 bits used */ | ||
32 | int mask; | ||
33 | }; | ||
34 | |||
18 | struct nubus_board { | 35 | struct nubus_board { |
19 | struct nubus_board* next; | 36 | struct device dev; |
20 | struct nubus_dev* first_dev; | 37 | |
21 | |||
22 | /* Only 9-E actually exist, though 0-8 are also theoretically | 38 | /* Only 9-E actually exist, though 0-8 are also theoretically |
23 | possible, and 0 is a special case which represents the | 39 | possible, and 0 is a special case which represents the |
24 | motherboard and onboard peripherals (Ethernet, video) */ | 40 | motherboard and onboard peripherals (Ethernet, video) */ |
@@ -27,10 +43,10 @@ struct nubus_board { | |||
27 | char name[64]; | 43 | char name[64]; |
28 | 44 | ||
29 | /* Format block */ | 45 | /* Format block */ |
30 | unsigned char* fblock; | 46 | unsigned char *fblock; |
31 | /* Root directory (does *not* always equal fblock + doffset!) */ | 47 | /* Root directory (does *not* always equal fblock + doffset!) */ |
32 | unsigned char* directory; | 48 | unsigned char *directory; |
33 | 49 | ||
34 | unsigned long slot_addr; | 50 | unsigned long slot_addr; |
35 | /* Offset to root directory (sometimes) */ | 51 | /* Offset to root directory (sometimes) */ |
36 | unsigned long doffset; | 52 | unsigned long doffset; |
@@ -41,15 +57,15 @@ struct nubus_board { | |||
41 | unsigned char rev; | 57 | unsigned char rev; |
42 | unsigned char format; | 58 | unsigned char format; |
43 | unsigned char lanes; | 59 | unsigned char lanes; |
44 | }; | ||
45 | 60 | ||
46 | struct nubus_dev { | ||
47 | /* Next link in device list */ | ||
48 | struct nubus_dev* next; | ||
49 | /* Directory entry in /proc/bus/nubus */ | 61 | /* Directory entry in /proc/bus/nubus */ |
50 | struct proc_dir_entry* procdir; | 62 | struct proc_dir_entry *procdir; |
63 | }; | ||
64 | |||
65 | struct nubus_rsrc { | ||
66 | struct list_head list; | ||
51 | 67 | ||
52 | /* The functional resource ID of this device */ | 68 | /* The functional resource ID */ |
53 | unsigned char resid; | 69 | unsigned char resid; |
54 | /* These are mostly here for convenience; we could always read | 70 | /* These are mostly here for convenience; we could always read |
55 | them from the ROMs if we wanted to */ | 71 | them from the ROMs if we wanted to */ |
@@ -57,79 +73,116 @@ struct nubus_dev { | |||
57 | unsigned short type; | 73 | unsigned short type; |
58 | unsigned short dr_sw; | 74 | unsigned short dr_sw; |
59 | unsigned short dr_hw; | 75 | unsigned short dr_hw; |
60 | /* This is the device's name rather than the board's. | 76 | |
61 | Sometimes they are different. Usually the board name is | ||
62 | more correct. */ | ||
63 | char name[64]; | ||
64 | /* MacOS driver (I kid you not) */ | ||
65 | unsigned char* driver; | ||
66 | /* Actually this is an offset */ | ||
67 | unsigned long iobase; | ||
68 | unsigned long iosize; | ||
69 | unsigned char flags, hwdevid; | ||
70 | |||
71 | /* Functional directory */ | 77 | /* Functional directory */ |
72 | unsigned char* directory; | 78 | unsigned char *directory; |
73 | /* Much of our info comes from here */ | 79 | /* Much of our info comes from here */ |
74 | struct nubus_board* board; | 80 | struct nubus_board *board; |
81 | }; | ||
82 | |||
83 | /* This is all NuBus functional resources (used to find devices later on) */ | ||
84 | extern struct list_head nubus_func_rsrcs; | ||
85 | |||
86 | struct nubus_driver { | ||
87 | struct device_driver driver; | ||
88 | int (*probe)(struct nubus_board *board); | ||
89 | int (*remove)(struct nubus_board *board); | ||
75 | }; | 90 | }; |
76 | 91 | ||
77 | /* This is all NuBus devices (used to find devices later on) */ | 92 | extern struct bus_type nubus_bus_type; |
78 | extern struct nubus_dev* nubus_devices; | ||
79 | /* This is all NuBus cards */ | ||
80 | extern struct nubus_board* nubus_boards; | ||
81 | 93 | ||
82 | /* Generic NuBus interface functions, modelled after the PCI interface */ | 94 | /* Generic NuBus interface functions, modelled after the PCI interface */ |
83 | void nubus_scan_bus(void); | ||
84 | #ifdef CONFIG_PROC_FS | 95 | #ifdef CONFIG_PROC_FS |
85 | extern void nubus_proc_init(void); | 96 | void nubus_proc_init(void); |
97 | struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board); | ||
98 | struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, | ||
99 | const struct nubus_dirent *ent, | ||
100 | struct nubus_board *board); | ||
101 | void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, | ||
102 | const struct nubus_dirent *ent, | ||
103 | unsigned int size); | ||
104 | void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, | ||
105 | const struct nubus_dirent *ent); | ||
86 | #else | 106 | #else |
87 | static inline void nubus_proc_init(void) {} | 107 | static inline void nubus_proc_init(void) {} |
108 | static inline | ||
109 | struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) | ||
110 | { return NULL; } | ||
111 | static inline | ||
112 | struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, | ||
113 | const struct nubus_dirent *ent, | ||
114 | struct nubus_board *board) | ||
115 | { return NULL; } | ||
116 | static inline void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, | ||
117 | const struct nubus_dirent *ent, | ||
118 | unsigned int size) {} | ||
119 | static inline void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, | ||
120 | const struct nubus_dirent *ent) {} | ||
88 | #endif | 121 | #endif |
89 | int get_nubus_list(char *buf); | 122 | |
90 | int nubus_proc_attach_device(struct nubus_dev *dev); | 123 | struct nubus_rsrc *nubus_first_rsrc_or_null(void); |
91 | /* If we need more precision we can add some more of these */ | 124 | struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from); |
92 | struct nubus_dev* nubus_find_device(unsigned short category, | 125 | |
93 | unsigned short type, | 126 | #define for_each_func_rsrc(f) \ |
94 | unsigned short dr_hw, | 127 | for (f = nubus_first_rsrc_or_null(); f; f = nubus_next_rsrc_or_null(f)) |
95 | unsigned short dr_sw, | 128 | |
96 | const struct nubus_dev* from); | 129 | #define for_each_board_func_rsrc(b, f) \ |
97 | struct nubus_dev* nubus_find_type(unsigned short category, | 130 | for_each_func_rsrc(f) if (f->board != b) {} else |
98 | unsigned short type, | ||
99 | const struct nubus_dev* from); | ||
100 | /* Might have more than one device in a slot, you know... */ | ||
101 | struct nubus_dev* nubus_find_slot(unsigned int slot, | ||
102 | const struct nubus_dev* from); | ||
103 | 131 | ||
104 | /* These are somewhat more NuBus-specific. They all return 0 for | 132 | /* These are somewhat more NuBus-specific. They all return 0 for |
105 | success and -1 for failure, as you'd expect. */ | 133 | success and -1 for failure, as you'd expect. */ |
106 | 134 | ||
107 | /* The root directory which contains the board and functional | 135 | /* The root directory which contains the board and functional |
108 | directories */ | 136 | directories */ |
109 | int nubus_get_root_dir(const struct nubus_board* board, | 137 | int nubus_get_root_dir(const struct nubus_board *board, |
110 | struct nubus_dir* dir); | 138 | struct nubus_dir *dir); |
111 | /* The board directory */ | 139 | /* The board directory */ |
112 | int nubus_get_board_dir(const struct nubus_board* board, | 140 | int nubus_get_board_dir(const struct nubus_board *board, |
113 | struct nubus_dir* dir); | 141 | struct nubus_dir *dir); |
114 | /* The functional directory */ | 142 | /* The functional directory */ |
115 | int nubus_get_func_dir(const struct nubus_dev* dev, | 143 | int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir); |
116 | struct nubus_dir* dir); | ||
117 | 144 | ||
118 | /* These work on any directory gotten via the above */ | 145 | /* These work on any directory gotten via the above */ |
119 | int nubus_readdir(struct nubus_dir* dir, | 146 | int nubus_readdir(struct nubus_dir *dir, |
120 | struct nubus_dirent* ent); | 147 | struct nubus_dirent *ent); |
121 | int nubus_find_rsrc(struct nubus_dir* dir, | 148 | int nubus_find_rsrc(struct nubus_dir *dir, |
122 | unsigned char rsrc_type, | 149 | unsigned char rsrc_type, |
123 | struct nubus_dirent* ent); | 150 | struct nubus_dirent *ent); |
124 | int nubus_rewinddir(struct nubus_dir* dir); | 151 | int nubus_rewinddir(struct nubus_dir *dir); |
125 | 152 | ||
126 | /* Things to do with directory entries */ | 153 | /* Things to do with directory entries */ |
127 | int nubus_get_subdir(const struct nubus_dirent* ent, | 154 | int nubus_get_subdir(const struct nubus_dirent *ent, |
128 | struct nubus_dir* dir); | 155 | struct nubus_dir *dir); |
129 | void nubus_get_rsrc_mem(void* dest, | 156 | void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent, |
130 | const struct nubus_dirent *dirent, | 157 | unsigned int len); |
131 | int len); | 158 | unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent, |
132 | void nubus_get_rsrc_str(void* dest, | 159 | unsigned int len); |
133 | const struct nubus_dirent *dirent, | 160 | void nubus_seq_write_rsrc_mem(struct seq_file *m, |
134 | int maxlen); | 161 | const struct nubus_dirent *dirent, |
162 | unsigned int len); | ||
163 | unsigned char *nubus_dirptr(const struct nubus_dirent *nd); | ||
164 | |||
165 | /* Declarations relating to driver model objects */ | ||
166 | int nubus_bus_register(void); | ||
167 | int nubus_device_register(struct nubus_board *board); | ||
168 | int nubus_driver_register(struct nubus_driver *ndrv); | ||
169 | void nubus_driver_unregister(struct nubus_driver *ndrv); | ||
170 | int nubus_proc_show(struct seq_file *m, void *data); | ||
171 | |||
172 | static inline void nubus_set_drvdata(struct nubus_board *board, void *data) | ||
173 | { | ||
174 | dev_set_drvdata(&board->dev, data); | ||
175 | } | ||
176 | |||
177 | static inline void *nubus_get_drvdata(struct nubus_board *board) | ||
178 | { | ||
179 | return dev_get_drvdata(&board->dev); | ||
180 | } | ||
181 | |||
182 | /* Returns a pointer to the "standard" slot space. */ | ||
183 | static inline void *nubus_slot_addr(int slot) | ||
184 | { | ||
185 | return (void *)(0xF0000000 | (slot << 24)); | ||
186 | } | ||
187 | |||
135 | #endif /* LINUX_NUBUS_H */ | 188 | #endif /* LINUX_NUBUS_H */ |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index aea87f0d917b..4112e2bd747f 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
@@ -124,14 +124,20 @@ enum { | |||
124 | 124 | ||
125 | #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) | 125 | #define NVME_CMB_BIR(cmbloc) ((cmbloc) & 0x7) |
126 | #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) | 126 | #define NVME_CMB_OFST(cmbloc) (((cmbloc) >> 12) & 0xfffff) |
127 | #define NVME_CMB_SZ(cmbsz) (((cmbsz) >> 12) & 0xfffff) | 127 | |
128 | #define NVME_CMB_SZU(cmbsz) (((cmbsz) >> 8) & 0xf) | 128 | enum { |
129 | 129 | NVME_CMBSZ_SQS = 1 << 0, | |
130 | #define NVME_CMB_WDS(cmbsz) ((cmbsz) & 0x10) | 130 | NVME_CMBSZ_CQS = 1 << 1, |
131 | #define NVME_CMB_RDS(cmbsz) ((cmbsz) & 0x8) | 131 | NVME_CMBSZ_LISTS = 1 << 2, |
132 | #define NVME_CMB_LISTS(cmbsz) ((cmbsz) & 0x4) | 132 | NVME_CMBSZ_RDS = 1 << 3, |
133 | #define NVME_CMB_CQS(cmbsz) ((cmbsz) & 0x2) | 133 | NVME_CMBSZ_WDS = 1 << 4, |
134 | #define NVME_CMB_SQS(cmbsz) ((cmbsz) & 0x1) | 134 | |
135 | NVME_CMBSZ_SZ_SHIFT = 12, | ||
136 | NVME_CMBSZ_SZ_MASK = 0xfffff, | ||
137 | |||
138 | NVME_CMBSZ_SZU_SHIFT = 8, | ||
139 | NVME_CMBSZ_SZU_MASK = 0xf, | ||
140 | }; | ||
135 | 141 | ||
136 | /* | 142 | /* |
137 | * Submission and Completion Queue Entry Sizes for the NVM command set. | 143 | * Submission and Completion Queue Entry Sizes for the NVM command set. |
diff --git a/include/linux/of.h b/include/linux/of.h index d3dea1d1e3a9..da1ee95241c1 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | #ifndef _LINUX_OF_H | 2 | #ifndef _LINUX_OF_H |
2 | #define _LINUX_OF_H | 3 | #define _LINUX_OF_H |
3 | /* | 4 | /* |
@@ -9,11 +10,6 @@ | |||
9 | * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. | 10 | * Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp. |
10 | * Updates for SPARC64 by David S. Miller | 11 | * Updates for SPARC64 by David S. Miller |
11 | * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. | 12 | * Derived from PowerPC and Sparc prom.h files by Stephen Rothwell, IBM Corp. |
12 | * | ||
13 | * This program is free software; you can redistribute it and/or | ||
14 | * modify it under the terms of the GNU General Public License | ||
15 | * as published by the Free Software Foundation; either version | ||
16 | * 2 of the License, or (at your option) any later version. | ||
17 | */ | 13 | */ |
18 | #include <linux/types.h> | 14 | #include <linux/types.h> |
19 | #include <linux/bitops.h> | 15 | #include <linux/bitops.h> |
@@ -544,6 +540,8 @@ const char *of_prop_next_string(struct property *prop, const char *cur); | |||
544 | 540 | ||
545 | bool of_console_check(struct device_node *dn, char *name, int index); | 541 | bool of_console_check(struct device_node *dn, char *name, int index); |
546 | 542 | ||
543 | extern int of_cpu_node_to_id(struct device_node *np); | ||
544 | |||
547 | #else /* CONFIG_OF */ | 545 | #else /* CONFIG_OF */ |
548 | 546 | ||
549 | static inline void of_core_init(void) | 547 | static inline void of_core_init(void) |
@@ -916,6 +914,11 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag | |||
916 | { | 914 | { |
917 | } | 915 | } |
918 | 916 | ||
917 | static inline int of_cpu_node_to_id(struct device_node *np) | ||
918 | { | ||
919 | return -ENODEV; | ||
920 | } | ||
921 | |||
919 | #define of_match_ptr(_ptr) NULL | 922 | #define of_match_ptr(_ptr) NULL |
920 | #define of_match_node(_matches, _node) NULL | 923 | #define of_match_node(_matches, _node) NULL |
921 | #endif /* CONFIG_OF */ | 924 | #endif /* CONFIG_OF */ |
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h index b90d8ec57c1f..fd706cdf255c 100644 --- a/include/linux/of_dma.h +++ b/include/linux/of_dma.h | |||
@@ -1,13 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * OF helpers for DMA request / controller | 3 | * OF helpers for DMA request / controller |
3 | * | 4 | * |
4 | * Based on of_gpio.h | 5 | * Based on of_gpio.h |
5 | * | 6 | * |
6 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ | 7 | * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/ |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | */ | 8 | */ |
12 | 9 | ||
13 | #ifndef __LINUX_OF_DMA_H | 10 | #ifndef __LINUX_OF_DMA_H |
diff --git a/include/linux/of_fdt.h b/include/linux/of_fdt.h index 013c5418aeec..b9cd9ebdf9b9 100644 --- a/include/linux/of_fdt.h +++ b/include/linux/of_fdt.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Definitions for working with the Flattened Device Tree data format | 3 | * Definitions for working with the Flattened Device Tree data format |
3 | * | 4 | * |
4 | * Copyright 2009 Benjamin Herrenschmidt, IBM Corp | 5 | * Copyright 2009 Benjamin Herrenschmidt, IBM Corp |
5 | * benh@kernel.crashing.org | 6 | * benh@kernel.crashing.org |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef _LINUX_OF_FDT_H | 9 | #ifndef _LINUX_OF_FDT_H |
@@ -47,6 +44,12 @@ extern void *initial_boot_params; | |||
47 | extern char __dtb_start[]; | 44 | extern char __dtb_start[]; |
48 | extern char __dtb_end[]; | 45 | extern char __dtb_end[]; |
49 | 46 | ||
47 | /* Other Prototypes */ | ||
48 | extern u64 of_flat_dt_translate_address(unsigned long node); | ||
49 | extern void of_fdt_limit_memory(int limit); | ||
50 | #endif /* CONFIG_OF_FLATTREE */ | ||
51 | |||
52 | #ifdef CONFIG_OF_EARLY_FLATTREE | ||
50 | /* For scanning the flat device-tree at boot time */ | 53 | /* For scanning the flat device-tree at boot time */ |
51 | extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, | 54 | extern int of_scan_flat_dt(int (*it)(unsigned long node, const char *uname, |
52 | int depth, void *data), | 55 | int depth, void *data), |
@@ -77,7 +80,6 @@ extern void early_init_dt_add_memory_arch(u64 base, u64 size); | |||
77 | extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); | 80 | extern int early_init_dt_mark_hotplug_memory_arch(u64 base, u64 size); |
78 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, | 81 | extern int early_init_dt_reserve_memory_arch(phys_addr_t base, phys_addr_t size, |
79 | bool no_map); | 82 | bool no_map); |
80 | extern void * early_init_dt_alloc_memory_arch(u64 size, u64 align); | ||
81 | extern u64 dt_mem_next_cell(int s, const __be32 **cellp); | 83 | extern u64 dt_mem_next_cell(int s, const __be32 **cellp); |
82 | 84 | ||
83 | /* Early flat tree scan hooks */ | 85 | /* Early flat tree scan hooks */ |
@@ -97,16 +99,14 @@ extern void unflatten_device_tree(void); | |||
97 | extern void unflatten_and_copy_device_tree(void); | 99 | extern void unflatten_and_copy_device_tree(void); |
98 | extern void early_init_devtree(void *); | 100 | extern void early_init_devtree(void *); |
99 | extern void early_get_first_memblock_info(void *, phys_addr_t *); | 101 | extern void early_get_first_memblock_info(void *, phys_addr_t *); |
100 | extern u64 of_flat_dt_translate_address(unsigned long node); | 102 | #else /* CONFIG_OF_EARLY_FLATTREE */ |
101 | extern void of_fdt_limit_memory(int limit); | ||
102 | #else /* CONFIG_OF_FLATTREE */ | ||
103 | static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } | 103 | static inline int early_init_dt_scan_chosen_stdout(void) { return -ENODEV; } |
104 | static inline void early_init_fdt_scan_reserved_mem(void) {} | 104 | static inline void early_init_fdt_scan_reserved_mem(void) {} |
105 | static inline void early_init_fdt_reserve_self(void) {} | 105 | static inline void early_init_fdt_reserve_self(void) {} |
106 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } | 106 | static inline const char *of_flat_dt_get_machine_name(void) { return NULL; } |
107 | static inline void unflatten_device_tree(void) {} | 107 | static inline void unflatten_device_tree(void) {} |
108 | static inline void unflatten_and_copy_device_tree(void) {} | 108 | static inline void unflatten_and_copy_device_tree(void) {} |
109 | #endif /* CONFIG_OF_FLATTREE */ | 109 | #endif /* CONFIG_OF_EARLY_FLATTREE */ |
110 | 110 | ||
111 | #endif /* __ASSEMBLY__ */ | 111 | #endif /* __ASSEMBLY__ */ |
112 | #endif /* _LINUX_OF_FDT_H */ | 112 | #endif /* _LINUX_OF_FDT_H */ |
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 1fe205582111..163b79ecd01a 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h | |||
@@ -1,14 +1,10 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * OF helpers for the GPIO API | 3 | * OF helpers for the GPIO API |
3 | * | 4 | * |
4 | * Copyright (c) 2007-2008 MontaVista Software, Inc. | 5 | * Copyright (c) 2007-2008 MontaVista Software, Inc. |
5 | * | 6 | * |
6 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | 7 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> |
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2 of the License, or | ||
11 | * (at your option) any later version. | ||
12 | */ | 8 | */ |
13 | 9 | ||
14 | #ifndef __LINUX_OF_GPIO_H | 10 | #ifndef __LINUX_OF_GPIO_H |
@@ -31,7 +27,7 @@ enum of_gpio_flags { | |||
31 | OF_GPIO_ACTIVE_LOW = 0x1, | 27 | OF_GPIO_ACTIVE_LOW = 0x1, |
32 | OF_GPIO_SINGLE_ENDED = 0x2, | 28 | OF_GPIO_SINGLE_ENDED = 0x2, |
33 | OF_GPIO_OPEN_DRAIN = 0x4, | 29 | OF_GPIO_OPEN_DRAIN = 0x4, |
34 | OF_GPIO_SLEEP_MAY_LOSE_VALUE = 0x8, | 30 | OF_GPIO_TRANSITORY = 0x8, |
35 | }; | 31 | }; |
36 | 32 | ||
37 | #ifdef CONFIG_OF_GPIO | 33 | #ifdef CONFIG_OF_GPIO |
diff --git a/include/linux/of_graph.h b/include/linux/of_graph.h index 3e058f05ab04..01038a6aade0 100644 --- a/include/linux/of_graph.h +++ b/include/linux/of_graph.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * OF graph binding parsing helpers | 3 | * OF graph binding parsing helpers |
3 | * | 4 | * |
@@ -6,10 +7,6 @@ | |||
6 | * | 7 | * |
7 | * Copyright (C) 2012 Renesas Electronics Corp. | 8 | * Copyright (C) 2012 Renesas Electronics Corp. |
8 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> | 9 | * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> |
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of version 2 of the GNU General Public License as | ||
12 | * published by the Free Software Foundation. | ||
13 | */ | 10 | */ |
14 | #ifndef __LINUX_OF_GRAPH_H | 11 | #ifndef __LINUX_OF_GRAPH_H |
15 | #define __LINUX_OF_GRAPH_H | 12 | #define __LINUX_OF_GRAPH_H |
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h index cddfaff4d0b7..4fa654e4b5a9 100644 --- a/include/linux/of_iommu.h +++ b/include/linux/of_iommu.h | |||
@@ -34,9 +34,6 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev, | |||
34 | 34 | ||
35 | extern struct of_device_id __iommu_of_table; | 35 | extern struct of_device_id __iommu_of_table; |
36 | 36 | ||
37 | typedef int (*of_iommu_init_fn)(struct device_node *); | 37 | #define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL) |
38 | |||
39 | #define IOMMU_OF_DECLARE(name, compat, fn) \ | ||
40 | _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) | ||
41 | 38 | ||
42 | #endif /* __OF_IOMMU_H */ | 39 | #endif /* __OF_IOMMU_H */ |
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h index bf588a05d0d0..88865e0ebf4d 100644 --- a/include/linux/of_pci.h +++ b/include/linux/of_pci.h | |||
@@ -9,8 +9,7 @@ struct pci_dev; | |||
9 | struct of_phandle_args; | 9 | struct of_phandle_args; |
10 | struct device_node; | 10 | struct device_node; |
11 | 11 | ||
12 | #ifdef CONFIG_OF_PCI | 12 | #if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI) |
13 | int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq); | ||
14 | struct device_node *of_pci_find_child_device(struct device_node *parent, | 13 | struct device_node *of_pci_find_child_device(struct device_node *parent, |
15 | unsigned int devfn); | 14 | unsigned int devfn); |
16 | int of_pci_get_devfn(struct device_node *np); | 15 | int of_pci_get_devfn(struct device_node *np); |
@@ -23,11 +22,6 @@ int of_pci_map_rid(struct device_node *np, u32 rid, | |||
23 | const char *map_name, const char *map_mask_name, | 22 | const char *map_name, const char *map_mask_name, |
24 | struct device_node **target, u32 *id_out); | 23 | struct device_node **target, u32 *id_out); |
25 | #else | 24 | #else |
26 | static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) | ||
27 | { | ||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static inline struct device_node *of_pci_find_child_device(struct device_node *parent, | 25 | static inline struct device_node *of_pci_find_child_device(struct device_node *parent, |
32 | unsigned int devfn) | 26 | unsigned int devfn) |
33 | { | 27 | { |
diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h index 7e09244bb679..d0b183ab65c6 100644 --- a/include/linux/of_pdt.h +++ b/include/linux/of_pdt.h | |||
@@ -1,13 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * Definitions for building a device tree by calling into the | 3 | * Definitions for building a device tree by calling into the |
3 | * Open Firmware PROM. | 4 | * Open Firmware PROM. |
4 | * | 5 | * |
5 | * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> | 6 | * Copyright (C) 2010 Andres Salomon <dilinger@queued.net> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | 7 | */ |
12 | 8 | ||
13 | #ifndef _LINUX_OF_PDT_H | 9 | #ifndef _LINUX_OF_PDT_H |
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h index fb908e598348..84a966623e78 100644 --- a/include/linux/of_platform.h +++ b/include/linux/of_platform.h | |||
@@ -1,14 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | #ifndef _LINUX_OF_PLATFORM_H | 2 | #ifndef _LINUX_OF_PLATFORM_H |
2 | #define _LINUX_OF_PLATFORM_H | 3 | #define _LINUX_OF_PLATFORM_H |
3 | /* | 4 | /* |
4 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. | 5 | * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corp. |
5 | * <benh@kernel.crashing.org> | 6 | * <benh@kernel.crashing.org> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | * | ||
12 | */ | 7 | */ |
13 | 8 | ||
14 | #include <linux/device.h> | 9 | #include <linux/device.h> |
diff --git a/include/linux/omap-gpmc.h b/include/linux/omap-gpmc.h index edfa280c3d56..053feb41510a 100644 --- a/include/linux/omap-gpmc.h +++ b/include/linux/omap-gpmc.h | |||
@@ -25,15 +25,43 @@ struct gpmc_nand_ops { | |||
25 | 25 | ||
26 | struct gpmc_nand_regs; | 26 | struct gpmc_nand_regs; |
27 | 27 | ||
28 | struct gpmc_onenand_info { | ||
29 | bool sync_read; | ||
30 | bool sync_write; | ||
31 | int burst_len; | ||
32 | }; | ||
33 | |||
28 | #if IS_ENABLED(CONFIG_OMAP_GPMC) | 34 | #if IS_ENABLED(CONFIG_OMAP_GPMC) |
29 | struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, | 35 | struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, |
30 | int cs); | 36 | int cs); |
37 | /** | ||
38 | * gpmc_omap_onenand_set_timings - set optimized sync timings. | ||
39 | * @cs: Chip Select Region | ||
40 | * @freq: Chip frequency | ||
41 | * @latency: Burst latency cycle count | ||
42 | * @info: Structure describing parameters used | ||
43 | * | ||
44 | * Sets optimized timings for the @cs region based on @freq and @latency. | ||
45 | * Updates the @info structure based on the GPMC settings. | ||
46 | */ | ||
47 | int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, | ||
48 | int latency, | ||
49 | struct gpmc_onenand_info *info); | ||
50 | |||
31 | #else | 51 | #else |
32 | static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, | 52 | static inline struct gpmc_nand_ops *gpmc_omap_get_nand_ops(struct gpmc_nand_regs *regs, |
33 | int cs) | 53 | int cs) |
34 | { | 54 | { |
35 | return NULL; | 55 | return NULL; |
36 | } | 56 | } |
57 | |||
58 | static inline | ||
59 | int gpmc_omap_onenand_set_timings(struct device *dev, int cs, int freq, | ||
60 | int latency, | ||
61 | struct gpmc_onenand_info *info) | ||
62 | { | ||
63 | return -EINVAL; | ||
64 | } | ||
37 | #endif /* CONFIG_OMAP_GPMC */ | 65 | #endif /* CONFIG_OMAP_GPMC */ |
38 | 66 | ||
39 | extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, | 67 | extern int gpmc_calc_timings(struct gpmc_timings *gpmc_t, |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 3ec44e27aa9d..50c2b8786831 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
@@ -46,11 +46,6 @@ | |||
46 | * guarantees that this bit is cleared for a page when it first is entered into | 46 | * guarantees that this bit is cleared for a page when it first is entered into |
47 | * the page cache. | 47 | * the page cache. |
48 | * | 48 | * |
49 | * PG_highmem pages are not permanently mapped into the kernel virtual address | ||
50 | * space, they need to be kmapped separately for doing IO on the pages. The | ||
51 | * struct page (these bits with information) are always mapped into kernel | ||
52 | * address space... | ||
53 | * | ||
54 | * PG_hwpoison indicates that a page got corrupted in hardware and contains | 49 | * PG_hwpoison indicates that a page got corrupted in hardware and contains |
55 | * data with incorrect ECC bits that triggered a machine check. Accessing is | 50 | * data with incorrect ECC bits that triggered a machine check. Accessing is |
56 | * not safe since it may cause another machine check. Don't touch! | 51 | * not safe since it may cause another machine check. Don't touch! |
diff --git a/include/linux/pagevec.h b/include/linux/pagevec.h index 5fb6580f7f23..6dc456ac6136 100644 --- a/include/linux/pagevec.h +++ b/include/linux/pagevec.h | |||
@@ -9,14 +9,14 @@ | |||
9 | #ifndef _LINUX_PAGEVEC_H | 9 | #ifndef _LINUX_PAGEVEC_H |
10 | #define _LINUX_PAGEVEC_H | 10 | #define _LINUX_PAGEVEC_H |
11 | 11 | ||
12 | /* 14 pointers + two long's align the pagevec structure to a power of two */ | 12 | /* 15 pointers + header align the pagevec structure to a power of two */ |
13 | #define PAGEVEC_SIZE 14 | 13 | #define PAGEVEC_SIZE 15 |
14 | 14 | ||
15 | struct page; | 15 | struct page; |
16 | struct address_space; | 16 | struct address_space; |
17 | 17 | ||
18 | struct pagevec { | 18 | struct pagevec { |
19 | unsigned long nr; | 19 | unsigned char nr; |
20 | bool percpu_pvec_drained; | 20 | bool percpu_pvec_drained; |
21 | struct page *pages[PAGEVEC_SIZE]; | 21 | struct page *pages[PAGEVEC_SIZE]; |
22 | }; | 22 | }; |
diff --git a/include/linux/pci-aspm.h b/include/linux/pci-aspm.h index 3cc06b059017..df28af5cef21 100644 --- a/include/linux/pci-aspm.h +++ b/include/linux/pci-aspm.h | |||
@@ -24,43 +24,12 @@ | |||
24 | #define PCIE_LINK_STATE_CLKPM 4 | 24 | #define PCIE_LINK_STATE_CLKPM 4 |
25 | 25 | ||
26 | #ifdef CONFIG_PCIEASPM | 26 | #ifdef CONFIG_PCIEASPM |
27 | void pcie_aspm_init_link_state(struct pci_dev *pdev); | ||
28 | void pcie_aspm_exit_link_state(struct pci_dev *pdev); | ||
29 | void pcie_aspm_pm_state_change(struct pci_dev *pdev); | ||
30 | void pcie_aspm_powersave_config_link(struct pci_dev *pdev); | ||
31 | void pci_disable_link_state(struct pci_dev *pdev, int state); | 27 | void pci_disable_link_state(struct pci_dev *pdev, int state); |
32 | void pci_disable_link_state_locked(struct pci_dev *pdev, int state); | 28 | void pci_disable_link_state_locked(struct pci_dev *pdev, int state); |
33 | void pcie_no_aspm(void); | 29 | void pcie_no_aspm(void); |
34 | #else | 30 | #else |
35 | static inline void pcie_aspm_init_link_state(struct pci_dev *pdev) | 31 | static inline void pci_disable_link_state(struct pci_dev *pdev, int state) { } |
36 | { | 32 | static inline void pcie_no_aspm(void) { } |
37 | } | ||
38 | static inline void pcie_aspm_exit_link_state(struct pci_dev *pdev) | ||
39 | { | ||
40 | } | ||
41 | static inline void pcie_aspm_pm_state_change(struct pci_dev *pdev) | ||
42 | { | ||
43 | } | ||
44 | static inline void pcie_aspm_powersave_config_link(struct pci_dev *pdev) | ||
45 | { | ||
46 | } | ||
47 | static inline void pci_disable_link_state(struct pci_dev *pdev, int state) | ||
48 | { | ||
49 | } | ||
50 | static inline void pcie_no_aspm(void) | ||
51 | { | ||
52 | } | ||
53 | #endif | 33 | #endif |
54 | 34 | ||
55 | #ifdef CONFIG_PCIEASPM_DEBUG /* this depends on CONFIG_PCIEASPM */ | ||
56 | void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev); | ||
57 | void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev); | ||
58 | #else | ||
59 | static inline void pcie_aspm_create_sysfs_dev_files(struct pci_dev *pdev) | ||
60 | { | ||
61 | } | ||
62 | static inline void pcie_aspm_remove_sysfs_dev_files(struct pci_dev *pdev) | ||
63 | { | ||
64 | } | ||
65 | #endif | ||
66 | #endif /* LINUX_ASPM_H */ | 35 | #endif /* LINUX_ASPM_H */ |
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index d1f9fdade1e0..0dd1a3f7b309 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h | |||
@@ -17,91 +17,90 @@ static inline void * | |||
17 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, | 17 | pci_alloc_consistent(struct pci_dev *hwdev, size_t size, |
18 | dma_addr_t *dma_handle) | 18 | dma_addr_t *dma_handle) |
19 | { | 19 | { |
20 | return dma_alloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, dma_handle, GFP_ATOMIC); | 20 | return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); |
21 | } | 21 | } |
22 | 22 | ||
23 | static inline void * | 23 | static inline void * |
24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, | 24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, |
25 | dma_addr_t *dma_handle) | 25 | dma_addr_t *dma_handle) |
26 | { | 26 | { |
27 | return dma_zalloc_coherent(hwdev == NULL ? NULL : &hwdev->dev, | 27 | return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); |
28 | size, dma_handle, GFP_ATOMIC); | ||
29 | } | 28 | } |
30 | 29 | ||
31 | static inline void | 30 | static inline void |
32 | pci_free_consistent(struct pci_dev *hwdev, size_t size, | 31 | pci_free_consistent(struct pci_dev *hwdev, size_t size, |
33 | void *vaddr, dma_addr_t dma_handle) | 32 | void *vaddr, dma_addr_t dma_handle) |
34 | { | 33 | { |
35 | dma_free_coherent(hwdev == NULL ? NULL : &hwdev->dev, size, vaddr, dma_handle); | 34 | dma_free_coherent(&hwdev->dev, size, vaddr, dma_handle); |
36 | } | 35 | } |
37 | 36 | ||
38 | static inline dma_addr_t | 37 | static inline dma_addr_t |
39 | pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) | 38 | pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) |
40 | { | 39 | { |
41 | return dma_map_single(hwdev == NULL ? NULL : &hwdev->dev, ptr, size, (enum dma_data_direction)direction); | 40 | return dma_map_single(&hwdev->dev, ptr, size, (enum dma_data_direction)direction); |
42 | } | 41 | } |
43 | 42 | ||
44 | static inline void | 43 | static inline void |
45 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, | 44 | pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, |
46 | size_t size, int direction) | 45 | size_t size, int direction) |
47 | { | 46 | { |
48 | dma_unmap_single(hwdev == NULL ? NULL : &hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); | 47 | dma_unmap_single(&hwdev->dev, dma_addr, size, (enum dma_data_direction)direction); |
49 | } | 48 | } |
50 | 49 | ||
51 | static inline dma_addr_t | 50 | static inline dma_addr_t |
52 | pci_map_page(struct pci_dev *hwdev, struct page *page, | 51 | pci_map_page(struct pci_dev *hwdev, struct page *page, |
53 | unsigned long offset, size_t size, int direction) | 52 | unsigned long offset, size_t size, int direction) |
54 | { | 53 | { |
55 | return dma_map_page(hwdev == NULL ? NULL : &hwdev->dev, page, offset, size, (enum dma_data_direction)direction); | 54 | return dma_map_page(&hwdev->dev, page, offset, size, (enum dma_data_direction)direction); |
56 | } | 55 | } |
57 | 56 | ||
58 | static inline void | 57 | static inline void |
59 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, | 58 | pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address, |
60 | size_t size, int direction) | 59 | size_t size, int direction) |
61 | { | 60 | { |
62 | dma_unmap_page(hwdev == NULL ? NULL : &hwdev->dev, dma_address, size, (enum dma_data_direction)direction); | 61 | dma_unmap_page(&hwdev->dev, dma_address, size, (enum dma_data_direction)direction); |
63 | } | 62 | } |
64 | 63 | ||
65 | static inline int | 64 | static inline int |
66 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 65 | pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, |
67 | int nents, int direction) | 66 | int nents, int direction) |
68 | { | 67 | { |
69 | return dma_map_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | 68 | return dma_map_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); |
70 | } | 69 | } |
71 | 70 | ||
72 | static inline void | 71 | static inline void |
73 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, | 72 | pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, |
74 | int nents, int direction) | 73 | int nents, int direction) |
75 | { | 74 | { |
76 | dma_unmap_sg(hwdev == NULL ? NULL : &hwdev->dev, sg, nents, (enum dma_data_direction)direction); | 75 | dma_unmap_sg(&hwdev->dev, sg, nents, (enum dma_data_direction)direction); |
77 | } | 76 | } |
78 | 77 | ||
79 | static inline void | 78 | static inline void |
80 | pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, | 79 | pci_dma_sync_single_for_cpu(struct pci_dev *hwdev, dma_addr_t dma_handle, |
81 | size_t size, int direction) | 80 | size_t size, int direction) |
82 | { | 81 | { |
83 | dma_sync_single_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | 82 | dma_sync_single_for_cpu(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); |
84 | } | 83 | } |
85 | 84 | ||
86 | static inline void | 85 | static inline void |
87 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, | 86 | pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t dma_handle, |
88 | size_t size, int direction) | 87 | size_t size, int direction) |
89 | { | 88 | { |
90 | dma_sync_single_for_device(hwdev == NULL ? NULL : &hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); | 89 | dma_sync_single_for_device(&hwdev->dev, dma_handle, size, (enum dma_data_direction)direction); |
91 | } | 90 | } |
92 | 91 | ||
93 | static inline void | 92 | static inline void |
94 | pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, | 93 | pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, |
95 | int nelems, int direction) | 94 | int nelems, int direction) |
96 | { | 95 | { |
97 | dma_sync_sg_for_cpu(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | 96 | dma_sync_sg_for_cpu(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); |
98 | } | 97 | } |
99 | 98 | ||
100 | static inline void | 99 | static inline void |
101 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, | 100 | pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, |
102 | int nelems, int direction) | 101 | int nelems, int direction) |
103 | { | 102 | { |
104 | dma_sync_sg_for_device(hwdev == NULL ? NULL : &hwdev->dev, sg, nelems, (enum dma_data_direction)direction); | 103 | dma_sync_sg_for_device(&hwdev->dev, sg, nelems, (enum dma_data_direction)direction); |
105 | } | 104 | } |
106 | 105 | ||
107 | static inline int | 106 | static inline int |
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h index 809c2f1873ac..baadad1aabbc 100644 --- a/include/linux/pci-ecam.h +++ b/include/linux/pci-ecam.h | |||
@@ -1,17 +1,6 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /* | 2 | /* |
2 | * Copyright 2016 Broadcom | 3 | * Copyright 2016 Broadcom |
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License, version 2, as | ||
6 | * published by the Free Software Foundation (the "GPL"). | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License version 2 (GPLv2) for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * version 2 (GPLv2) along with this source code. | ||
15 | */ | 4 | */ |
16 | #ifndef DRIVERS_PCI_ECAM_H | 5 | #ifndef DRIVERS_PCI_ECAM_H |
17 | #define DRIVERS_PCI_ECAM_H | 6 | #define DRIVERS_PCI_ECAM_H |
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h index 263b89ea5705..f42b0fd4b4bc 100644 --- a/include/linux/pci-ep-cfs.h +++ b/include/linux/pci-ep-cfs.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /** | 2 | /** |
2 | * PCI Endpoint ConfigFS header file | 3 | * PCI Endpoint ConfigFS header file |
3 | * | 4 | * |
4 | * Copyright (C) 2017 Texas Instruments | 5 | * Copyright (C) 2017 Texas Instruments |
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | 6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> |
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef __LINUX_PCI_EP_CFS_H | 9 | #ifndef __LINUX_PCI_EP_CFS_H |
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index f7a04e1af112..a1a5e5df0f66 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /** | 2 | /** |
2 | * PCI Endpoint *Controller* (EPC) header file | 3 | * PCI Endpoint *Controller* (EPC) header file |
3 | * | 4 | * |
4 | * Copyright (C) 2017 Texas Instruments | 5 | * Copyright (C) 2017 Texas Instruments |
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | 6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> |
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef __LINUX_PCI_EPC_H | 9 | #ifndef __LINUX_PCI_EPC_H |
@@ -39,17 +36,20 @@ enum pci_epc_irq_type { | |||
39 | * @owner: the module owner containing the ops | 36 | * @owner: the module owner containing the ops |
40 | */ | 37 | */ |
41 | struct pci_epc_ops { | 38 | struct pci_epc_ops { |
42 | int (*write_header)(struct pci_epc *pci_epc, | 39 | int (*write_header)(struct pci_epc *epc, u8 func_no, |
43 | struct pci_epf_header *hdr); | 40 | struct pci_epf_header *hdr); |
44 | int (*set_bar)(struct pci_epc *epc, enum pci_barno bar, | 41 | int (*set_bar)(struct pci_epc *epc, u8 func_no, |
42 | enum pci_barno bar, | ||
45 | dma_addr_t bar_phys, size_t size, int flags); | 43 | dma_addr_t bar_phys, size_t size, int flags); |
46 | void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar); | 44 | void (*clear_bar)(struct pci_epc *epc, u8 func_no, |
47 | int (*map_addr)(struct pci_epc *epc, phys_addr_t addr, | 45 | enum pci_barno bar); |
48 | u64 pci_addr, size_t size); | 46 | int (*map_addr)(struct pci_epc *epc, u8 func_no, |
49 | void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr); | 47 | phys_addr_t addr, u64 pci_addr, size_t size); |
50 | int (*set_msi)(struct pci_epc *epc, u8 interrupts); | 48 | void (*unmap_addr)(struct pci_epc *epc, u8 func_no, |
51 | int (*get_msi)(struct pci_epc *epc); | 49 | phys_addr_t addr); |
52 | int (*raise_irq)(struct pci_epc *pci_epc, | 50 | int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts); |
51 | int (*get_msi)(struct pci_epc *epc, u8 func_no); | ||
52 | int (*raise_irq)(struct pci_epc *epc, u8 func_no, | ||
53 | enum pci_epc_irq_type type, u8 interrupt_num); | 53 | enum pci_epc_irq_type type, u8 interrupt_num); |
54 | int (*start)(struct pci_epc *epc); | 54 | int (*start)(struct pci_epc *epc); |
55 | void (*stop)(struct pci_epc *epc); | 55 | void (*stop)(struct pci_epc *epc); |
@@ -124,17 +124,21 @@ void pci_epc_destroy(struct pci_epc *epc); | |||
124 | int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); | 124 | int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf); |
125 | void pci_epc_linkup(struct pci_epc *epc); | 125 | void pci_epc_linkup(struct pci_epc *epc); |
126 | void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); | 126 | void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf); |
127 | int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr); | 127 | int pci_epc_write_header(struct pci_epc *epc, u8 func_no, |
128 | int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar, | 128 | struct pci_epf_header *hdr); |
129 | int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, | ||
130 | enum pci_barno bar, | ||
129 | dma_addr_t bar_phys, size_t size, int flags); | 131 | dma_addr_t bar_phys, size_t size, int flags); |
130 | void pci_epc_clear_bar(struct pci_epc *epc, int bar); | 132 | void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, int bar); |
131 | int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr, | 133 | int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, |
134 | phys_addr_t phys_addr, | ||
132 | u64 pci_addr, size_t size); | 135 | u64 pci_addr, size_t size); |
133 | void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr); | 136 | void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, |
134 | int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts); | 137 | phys_addr_t phys_addr); |
135 | int pci_epc_get_msi(struct pci_epc *epc); | 138 | int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts); |
136 | int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type, | 139 | int pci_epc_get_msi(struct pci_epc *epc, u8 func_no); |
137 | u8 interrupt_num); | 140 | int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, |
141 | enum pci_epc_irq_type type, u8 interrupt_num); | ||
138 | int pci_epc_start(struct pci_epc *epc); | 142 | int pci_epc_start(struct pci_epc *epc); |
139 | void pci_epc_stop(struct pci_epc *epc); | 143 | void pci_epc_stop(struct pci_epc *epc); |
140 | struct pci_epc *pci_epc_get(const char *epc_name); | 144 | struct pci_epc *pci_epc_get(const char *epc_name); |
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h index 60d551a9a1ba..e897bf076701 100644 --- a/include/linux/pci-epf.h +++ b/include/linux/pci-epf.h | |||
@@ -1,12 +1,9 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
1 | /** | 2 | /** |
2 | * PCI Endpoint *Function* (EPF) header file | 3 | * PCI Endpoint *Function* (EPF) header file |
3 | * | 4 | * |
4 | * Copyright (C) 2017 Texas Instruments | 5 | * Copyright (C) 2017 Texas Instruments |
5 | * Author: Kishon Vijay Abraham I <kishon@ti.com> | 6 | * Author: Kishon Vijay Abraham I <kishon@ti.com> |
6 | * | ||
7 | * This program is free software: you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 of | ||
9 | * the License as published by the Free Software Foundation. | ||
10 | */ | 7 | */ |
11 | 8 | ||
12 | #ifndef __LINUX_PCI_EPF_H | 9 | #ifndef __LINUX_PCI_EPF_H |
diff --git a/include/linux/pci.h b/include/linux/pci.h index c170c9250c8b..024a1beda008 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
@@ -48,17 +48,17 @@ | |||
48 | * In the interest of not exposing interfaces to user-space unnecessarily, | 48 | * In the interest of not exposing interfaces to user-space unnecessarily, |
49 | * the following kernel-only defines are being added here. | 49 | * the following kernel-only defines are being added here. |
50 | */ | 50 | */ |
51 | #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) | 51 | #define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) |
52 | /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ | 52 | /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */ |
53 | #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) | 53 | #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff) |
54 | 54 | ||
55 | /* pci_slot represents a physical slot */ | 55 | /* pci_slot represents a physical slot */ |
56 | struct pci_slot { | 56 | struct pci_slot { |
57 | struct pci_bus *bus; /* The bus this slot is on */ | 57 | struct pci_bus *bus; /* Bus this slot is on */ |
58 | struct list_head list; /* node in list of slots on this bus */ | 58 | struct list_head list; /* Node in list of slots */ |
59 | struct hotplug_slot *hotplug; /* Hotplug info (migrate over time) */ | 59 | struct hotplug_slot *hotplug; /* Hotplug info (move here) */ |
60 | unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ | 60 | unsigned char number; /* PCI_SLOT(pci_dev->devfn) */ |
61 | struct kobject kobj; | 61 | struct kobject kobj; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static inline const char *pci_slot_name(const struct pci_slot *slot) | 64 | static inline const char *pci_slot_name(const struct pci_slot *slot) |
@@ -72,9 +72,7 @@ enum pci_mmap_state { | |||
72 | pci_mmap_mem | 72 | pci_mmap_mem |
73 | }; | 73 | }; |
74 | 74 | ||
75 | /* | 75 | /* For PCI devices, the region numbers are assigned this way: */ |
76 | * For PCI devices, the region numbers are assigned this way: | ||
77 | */ | ||
78 | enum { | 76 | enum { |
79 | /* #0-5: standard PCI resources */ | 77 | /* #0-5: standard PCI resources */ |
80 | PCI_STD_RESOURCES, | 78 | PCI_STD_RESOURCES, |
@@ -83,23 +81,23 @@ enum { | |||
83 | /* #6: expansion ROM resource */ | 81 | /* #6: expansion ROM resource */ |
84 | PCI_ROM_RESOURCE, | 82 | PCI_ROM_RESOURCE, |
85 | 83 | ||
86 | /* device specific resources */ | 84 | /* Device-specific resources */ |
87 | #ifdef CONFIG_PCI_IOV | 85 | #ifdef CONFIG_PCI_IOV |
88 | PCI_IOV_RESOURCES, | 86 | PCI_IOV_RESOURCES, |
89 | PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, | 87 | PCI_IOV_RESOURCE_END = PCI_IOV_RESOURCES + PCI_SRIOV_NUM_BARS - 1, |
90 | #endif | 88 | #endif |
91 | 89 | ||
92 | /* resources assigned to buses behind the bridge */ | 90 | /* Resources assigned to buses behind the bridge */ |
93 | #define PCI_BRIDGE_RESOURCE_NUM 4 | 91 | #define PCI_BRIDGE_RESOURCE_NUM 4 |
94 | 92 | ||
95 | PCI_BRIDGE_RESOURCES, | 93 | PCI_BRIDGE_RESOURCES, |
96 | PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + | 94 | PCI_BRIDGE_RESOURCE_END = PCI_BRIDGE_RESOURCES + |
97 | PCI_BRIDGE_RESOURCE_NUM - 1, | 95 | PCI_BRIDGE_RESOURCE_NUM - 1, |
98 | 96 | ||
99 | /* total resources associated with a PCI device */ | 97 | /* Total resources associated with a PCI device */ |
100 | PCI_NUM_RESOURCES, | 98 | PCI_NUM_RESOURCES, |
101 | 99 | ||
102 | /* preserve this for compatibility */ | 100 | /* Preserve this for compatibility */ |
103 | DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, | 101 | DEVICE_COUNT_RESOURCE = PCI_NUM_RESOURCES, |
104 | }; | 102 | }; |
105 | 103 | ||
@@ -152,9 +150,10 @@ static inline const char *pci_power_name(pci_power_t state) | |||
152 | #define PCI_PM_D3COLD_WAIT 100 | 150 | #define PCI_PM_D3COLD_WAIT 100 |
153 | #define PCI_PM_BUS_WAIT 50 | 151 | #define PCI_PM_BUS_WAIT 50 |
154 | 152 | ||
155 | /** The pci_channel state describes connectivity between the CPU and | 153 | /** |
156 | * the pci device. If some PCI bus between here and the pci device | 154 | * The pci_channel state describes connectivity between the CPU and |
157 | * has crashed or locked up, this info is reflected here. | 155 | * the PCI device. If some PCI bus between here and the PCI device |
156 | * has crashed or locked up, this info is reflected here. | ||
158 | */ | 157 | */ |
159 | typedef unsigned int __bitwise pci_channel_state_t; | 158 | typedef unsigned int __bitwise pci_channel_state_t; |
160 | 159 | ||
@@ -184,9 +183,7 @@ enum pcie_reset_state { | |||
184 | 183 | ||
185 | typedef unsigned short __bitwise pci_dev_flags_t; | 184 | typedef unsigned short __bitwise pci_dev_flags_t; |
186 | enum pci_dev_flags { | 185 | enum pci_dev_flags { |
187 | /* INTX_DISABLE in PCI_COMMAND register disables MSI | 186 | /* INTX_DISABLE in PCI_COMMAND register disables MSI too */ |
188 | * generation too. | ||
189 | */ | ||
190 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), | 187 | PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG = (__force pci_dev_flags_t) (1 << 0), |
191 | /* Device configuration is irrevocably lost if disabled into D3 */ | 188 | /* Device configuration is irrevocably lost if disabled into D3 */ |
192 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), | 189 | PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) (1 << 1), |
@@ -202,7 +199,7 @@ enum pci_dev_flags { | |||
202 | PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), | 199 | PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7), |
203 | /* Get VPD from function 0 VPD */ | 200 | /* Get VPD from function 0 VPD */ |
204 | PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), | 201 | PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8), |
205 | /* a non-root bridge where translation occurs, stop alias search here */ | 202 | /* A non-root bridge where translation occurs, stop alias search here */ |
206 | PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), | 203 | PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9), |
207 | /* Do not use FLR even if device advertises PCI_AF_CAP */ | 204 | /* Do not use FLR even if device advertises PCI_AF_CAP */ |
208 | PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), | 205 | PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10), |
@@ -222,17 +219,17 @@ enum pci_bus_flags { | |||
222 | PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, | 219 | PCI_BUS_FLAGS_NO_AERSID = (__force pci_bus_flags_t) 4, |
223 | }; | 220 | }; |
224 | 221 | ||
225 | /* These values come from the PCI Express Spec */ | 222 | /* Values from Link Status register, PCIe r3.1, sec 7.8.8 */ |
226 | enum pcie_link_width { | 223 | enum pcie_link_width { |
227 | PCIE_LNK_WIDTH_RESRV = 0x00, | 224 | PCIE_LNK_WIDTH_RESRV = 0x00, |
228 | PCIE_LNK_X1 = 0x01, | 225 | PCIE_LNK_X1 = 0x01, |
229 | PCIE_LNK_X2 = 0x02, | 226 | PCIE_LNK_X2 = 0x02, |
230 | PCIE_LNK_X4 = 0x04, | 227 | PCIE_LNK_X4 = 0x04, |
231 | PCIE_LNK_X8 = 0x08, | 228 | PCIE_LNK_X8 = 0x08, |
232 | PCIE_LNK_X12 = 0x0C, | 229 | PCIE_LNK_X12 = 0x0c, |
233 | PCIE_LNK_X16 = 0x10, | 230 | PCIE_LNK_X16 = 0x10, |
234 | PCIE_LNK_X32 = 0x20, | 231 | PCIE_LNK_X32 = 0x20, |
235 | PCIE_LNK_WIDTH_UNKNOWN = 0xFF, | 232 | PCIE_LNK_WIDTH_UNKNOWN = 0xff, |
236 | }; | 233 | }; |
237 | 234 | ||
238 | /* Based on the PCI Hotplug Spec, but some values are made up by us */ | 235 | /* Based on the PCI Hotplug Spec, but some values are made up by us */ |
@@ -263,15 +260,15 @@ enum pci_bus_speed { | |||
263 | }; | 260 | }; |
264 | 261 | ||
265 | struct pci_cap_saved_data { | 262 | struct pci_cap_saved_data { |
266 | u16 cap_nr; | 263 | u16 cap_nr; |
267 | bool cap_extended; | 264 | bool cap_extended; |
268 | unsigned int size; | 265 | unsigned int size; |
269 | u32 data[0]; | 266 | u32 data[0]; |
270 | }; | 267 | }; |
271 | 268 | ||
272 | struct pci_cap_saved_state { | 269 | struct pci_cap_saved_state { |
273 | struct hlist_node next; | 270 | struct hlist_node next; |
274 | struct pci_cap_saved_data cap; | 271 | struct pci_cap_saved_data cap; |
275 | }; | 272 | }; |
276 | 273 | ||
277 | struct irq_affinity; | 274 | struct irq_affinity; |
@@ -280,19 +277,17 @@ struct pci_vpd; | |||
280 | struct pci_sriov; | 277 | struct pci_sriov; |
281 | struct pci_ats; | 278 | struct pci_ats; |
282 | 279 | ||
283 | /* | 280 | /* The pci_dev structure describes PCI devices */ |
284 | * The pci_dev structure is used to describe PCI devices. | ||
285 | */ | ||
286 | struct pci_dev { | 281 | struct pci_dev { |
287 | struct list_head bus_list; /* node in per-bus list */ | 282 | struct list_head bus_list; /* Node in per-bus list */ |
288 | struct pci_bus *bus; /* bus this device is on */ | 283 | struct pci_bus *bus; /* Bus this device is on */ |
289 | struct pci_bus *subordinate; /* bus this device bridges to */ | 284 | struct pci_bus *subordinate; /* Bus this device bridges to */ |
290 | 285 | ||
291 | void *sysdata; /* hook for sys-specific extension */ | 286 | void *sysdata; /* Hook for sys-specific extension */ |
292 | struct proc_dir_entry *procent; /* device entry in /proc/bus/pci */ | 287 | struct proc_dir_entry *procent; /* Device entry in /proc/bus/pci */ |
293 | struct pci_slot *slot; /* Physical slot this device is in */ | 288 | struct pci_slot *slot; /* Physical slot this device is in */ |
294 | 289 | ||
295 | unsigned int devfn; /* encoded device & function index */ | 290 | unsigned int devfn; /* Encoded device & function index */ |
296 | unsigned short vendor; | 291 | unsigned short vendor; |
297 | unsigned short device; | 292 | unsigned short device; |
298 | unsigned short subsystem_vendor; | 293 | unsigned short subsystem_vendor; |
@@ -307,12 +302,12 @@ struct pci_dev { | |||
307 | u8 msi_cap; /* MSI capability offset */ | 302 | u8 msi_cap; /* MSI capability offset */ |
308 | u8 msix_cap; /* MSI-X capability offset */ | 303 | u8 msix_cap; /* MSI-X capability offset */ |
309 | u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ | 304 | u8 pcie_mpss:3; /* PCIe Max Payload Size Supported */ |
310 | u8 rom_base_reg; /* which config register controls the ROM */ | 305 | u8 rom_base_reg; /* Config register controlling ROM */ |
311 | u8 pin; /* which interrupt pin this device uses */ | 306 | u8 pin; /* Interrupt pin this device uses */ |
312 | u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ | 307 | u16 pcie_flags_reg; /* Cached PCIe Capabilities Register */ |
313 | unsigned long *dma_alias_mask;/* mask of enabled devfn aliases */ | 308 | unsigned long *dma_alias_mask;/* Mask of enabled devfn aliases */ |
314 | 309 | ||
315 | struct pci_driver *driver; /* which driver has allocated this device */ | 310 | struct pci_driver *driver; /* Driver bound to this device */ |
316 | u64 dma_mask; /* Mask of the bits of bus address this | 311 | u64 dma_mask; /* Mask of the bits of bus address this |
317 | device implements. Normally this is | 312 | device implements. Normally this is |
318 | 0xffffffff. You only need to change | 313 | 0xffffffff. You only need to change |
@@ -321,9 +316,9 @@ struct pci_dev { | |||
321 | 316 | ||
322 | struct device_dma_parameters dma_parms; | 317 | struct device_dma_parameters dma_parms; |
323 | 318 | ||
324 | pci_power_t current_state; /* Current operating state. In ACPI-speak, | 319 | pci_power_t current_state; /* Current operating state. In ACPI, |
325 | this is D0-D3, D0 being fully functional, | 320 | this is D0-D3, D0 being fully |
326 | and D3 being off. */ | 321 | functional, and D3 being off. */ |
327 | u8 pm_cap; /* PM capability offset */ | 322 | u8 pm_cap; /* PM capability offset */ |
328 | unsigned int pme_support:5; /* Bitmask of states from which PME# | 323 | unsigned int pme_support:5; /* Bitmask of states from which PME# |
329 | can be generated */ | 324 | can be generated */ |
@@ -334,10 +329,10 @@ struct pci_dev { | |||
334 | unsigned int no_d3cold:1; /* D3cold is forbidden */ | 329 | unsigned int no_d3cold:1; /* D3cold is forbidden */ |
335 | unsigned int bridge_d3:1; /* Allow D3 for bridge */ | 330 | unsigned int bridge_d3:1; /* Allow D3 for bridge */ |
336 | unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ | 331 | unsigned int d3cold_allowed:1; /* D3cold is allowed by user */ |
337 | unsigned int mmio_always_on:1; /* disallow turning off io/mem | 332 | unsigned int mmio_always_on:1; /* Disallow turning off io/mem |
338 | decoding during bar sizing */ | 333 | decoding during BAR sizing */ |
339 | unsigned int wakeup_prepared:1; | 334 | unsigned int wakeup_prepared:1; |
340 | unsigned int runtime_d3cold:1; /* whether go through runtime | 335 | unsigned int runtime_d3cold:1; /* Whether go through runtime |
341 | D3cold, not set for devices | 336 | D3cold, not set for devices |
342 | powered on/off by the | 337 | powered on/off by the |
343 | corresponding bridge */ | 338 | corresponding bridge */ |
@@ -350,12 +345,14 @@ struct pci_dev { | |||
350 | 345 | ||
351 | #ifdef CONFIG_PCIEASPM | 346 | #ifdef CONFIG_PCIEASPM |
352 | struct pcie_link_state *link_state; /* ASPM link state */ | 347 | struct pcie_link_state *link_state; /* ASPM link state */ |
348 | unsigned int ltr_path:1; /* Latency Tolerance Reporting | ||
349 | supported from root to here */ | ||
353 | #endif | 350 | #endif |
354 | 351 | ||
355 | pci_channel_state_t error_state; /* current connectivity state */ | 352 | pci_channel_state_t error_state; /* Current connectivity state */ |
356 | struct device dev; /* Generic device interface */ | 353 | struct device dev; /* Generic device interface */ |
357 | 354 | ||
358 | int cfg_size; /* Size of configuration space */ | 355 | int cfg_size; /* Size of config space */ |
359 | 356 | ||
360 | /* | 357 | /* |
361 | * Instead of touching interrupt line and base address registers | 358 | * Instead of touching interrupt line and base address registers |
@@ -364,47 +361,47 @@ struct pci_dev { | |||
364 | unsigned int irq; | 361 | unsigned int irq; |
365 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ | 362 | struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */ |
366 | 363 | ||
367 | bool match_driver; /* Skip attaching driver */ | 364 | bool match_driver; /* Skip attaching driver */ |
368 | /* These fields are used by common fixups */ | 365 | |
369 | unsigned int transparent:1; /* Subtractive decode PCI bridge */ | 366 | unsigned int transparent:1; /* Subtractive decode bridge */ |
370 | unsigned int multifunction:1;/* Part of multi-function device */ | 367 | unsigned int multifunction:1; /* Multi-function device */ |
371 | /* keep track of device state */ | 368 | |
372 | unsigned int is_added:1; | 369 | unsigned int is_added:1; |
373 | unsigned int is_busmaster:1; /* device is busmaster */ | 370 | unsigned int is_busmaster:1; /* Is busmaster */ |
374 | unsigned int no_msi:1; /* device may not use msi */ | 371 | unsigned int no_msi:1; /* May not use MSI */ |
375 | unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */ | 372 | unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */ |
376 | unsigned int block_cfg_access:1; /* config space access is blocked */ | 373 | unsigned int block_cfg_access:1; /* Config space access blocked */ |
377 | unsigned int broken_parity_status:1; /* Device generates false positive parity */ | 374 | unsigned int broken_parity_status:1; /* Generates false positive parity */ |
378 | unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ | 375 | unsigned int irq_reroute_variant:2; /* Needs IRQ rerouting variant */ |
379 | unsigned int msi_enabled:1; | 376 | unsigned int msi_enabled:1; |
380 | unsigned int msix_enabled:1; | 377 | unsigned int msix_enabled:1; |
381 | unsigned int ari_enabled:1; /* ARI forwarding */ | 378 | unsigned int ari_enabled:1; /* ARI forwarding */ |
382 | unsigned int ats_enabled:1; /* Address Translation Service */ | 379 | unsigned int ats_enabled:1; /* Address Translation Svc */ |
383 | unsigned int pasid_enabled:1; /* Process Address Space ID */ | 380 | unsigned int pasid_enabled:1; /* Process Address Space ID */ |
384 | unsigned int pri_enabled:1; /* Page Request Interface */ | 381 | unsigned int pri_enabled:1; /* Page Request Interface */ |
385 | unsigned int is_managed:1; | 382 | unsigned int is_managed:1; |
386 | unsigned int needs_freset:1; /* Dev requires fundamental reset */ | 383 | unsigned int needs_freset:1; /* Requires fundamental reset */ |
387 | unsigned int state_saved:1; | 384 | unsigned int state_saved:1; |
388 | unsigned int is_physfn:1; | 385 | unsigned int is_physfn:1; |
389 | unsigned int is_virtfn:1; | 386 | unsigned int is_virtfn:1; |
390 | unsigned int reset_fn:1; | 387 | unsigned int reset_fn:1; |
391 | unsigned int is_hotplug_bridge:1; | 388 | unsigned int is_hotplug_bridge:1; |
392 | unsigned int is_thunderbolt:1; /* Thunderbolt controller */ | 389 | unsigned int is_thunderbolt:1; /* Thunderbolt controller */ |
393 | unsigned int __aer_firmware_first_valid:1; | 390 | unsigned int __aer_firmware_first_valid:1; |
394 | unsigned int __aer_firmware_first:1; | 391 | unsigned int __aer_firmware_first:1; |
395 | unsigned int broken_intx_masking:1; /* INTx masking can't be used */ | 392 | unsigned int broken_intx_masking:1; /* INTx masking can't be used */ |
396 | unsigned int io_window_1k:1; /* Intel P2P bridge 1K I/O windows */ | 393 | unsigned int io_window_1k:1; /* Intel bridge 1K I/O windows */ |
397 | unsigned int irq_managed:1; | 394 | unsigned int irq_managed:1; |
398 | unsigned int has_secondary_link:1; | 395 | unsigned int has_secondary_link:1; |
399 | unsigned int non_compliant_bars:1; /* broken BARs; ignore them */ | 396 | unsigned int non_compliant_bars:1; /* Broken BARs; ignore them */ |
400 | unsigned int is_probed:1; /* device probing in progress */ | 397 | unsigned int is_probed:1; /* Device probing in progress */ |
401 | pci_dev_flags_t dev_flags; | 398 | pci_dev_flags_t dev_flags; |
402 | atomic_t enable_cnt; /* pci_enable_device has been called */ | 399 | atomic_t enable_cnt; /* pci_enable_device has been called */ |
403 | 400 | ||
404 | u32 saved_config_space[16]; /* config space saved at suspend time */ | 401 | u32 saved_config_space[16]; /* Config space saved at suspend time */ |
405 | struct hlist_head saved_cap_space; | 402 | struct hlist_head saved_cap_space; |
406 | struct bin_attribute *rom_attr; /* attribute descriptor for sysfs ROM entry */ | 403 | struct bin_attribute *rom_attr; /* Attribute descriptor for sysfs ROM entry */ |
407 | int rom_attr_enabled; /* has display of the rom attribute been enabled? */ | 404 | int rom_attr_enabled; /* Display of ROM attribute enabled? */ |
408 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ | 405 | struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ |
409 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ | 406 | struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ |
410 | 407 | ||
@@ -419,12 +416,12 @@ struct pci_dev { | |||
419 | struct pci_vpd *vpd; | 416 | struct pci_vpd *vpd; |
420 | #ifdef CONFIG_PCI_ATS | 417 | #ifdef CONFIG_PCI_ATS |
421 | union { | 418 | union { |
422 | struct pci_sriov *sriov; /* SR-IOV capability related */ | 419 | struct pci_sriov *sriov; /* PF: SR-IOV info */ |
423 | struct pci_dev *physfn; /* the PF this VF is associated with */ | 420 | struct pci_dev *physfn; /* VF: related PF */ |
424 | }; | 421 | }; |
425 | u16 ats_cap; /* ATS Capability offset */ | 422 | u16 ats_cap; /* ATS Capability offset */ |
426 | u8 ats_stu; /* ATS Smallest Translation Unit */ | 423 | u8 ats_stu; /* ATS Smallest Translation Unit */ |
427 | atomic_t ats_ref_cnt; /* number of VFs with ATS enabled */ | 424 | atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */ |
428 | #endif | 425 | #endif |
429 | #ifdef CONFIG_PCI_PRI | 426 | #ifdef CONFIG_PCI_PRI |
430 | u32 pri_reqs_alloc; /* Number of PRI requests allocated */ | 427 | u32 pri_reqs_alloc; /* Number of PRI requests allocated */ |
@@ -432,11 +429,11 @@ struct pci_dev { | |||
432 | #ifdef CONFIG_PCI_PASID | 429 | #ifdef CONFIG_PCI_PASID |
433 | u16 pasid_features; | 430 | u16 pasid_features; |
434 | #endif | 431 | #endif |
435 | phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */ | 432 | phys_addr_t rom; /* Physical address if not from BAR */ |
436 | size_t romlen; /* Length of ROM if it's not from the BAR */ | 433 | size_t romlen; /* Length if not from BAR */ |
437 | char *driver_override; /* Driver name to force a match */ | 434 | char *driver_override; /* Driver name to force a match */ |
438 | 435 | ||
439 | unsigned long priv_flags; /* Private flags for the pci driver */ | 436 | unsigned long priv_flags; /* Private flags for the PCI driver */ |
440 | }; | 437 | }; |
441 | 438 | ||
442 | static inline struct pci_dev *pci_physfn(struct pci_dev *dev) | 439 | static inline struct pci_dev *pci_physfn(struct pci_dev *dev) |
@@ -459,26 +456,26 @@ static inline int pci_channel_offline(struct pci_dev *pdev) | |||
459 | } | 456 | } |
460 | 457 | ||
461 | struct pci_host_bridge { | 458 | struct pci_host_bridge { |
462 | struct device dev; | 459 | struct device dev; |
463 | struct pci_bus *bus; /* root bus */ | 460 | struct pci_bus *bus; /* Root bus */ |
464 | struct pci_ops *ops; | 461 | struct pci_ops *ops; |
465 | void *sysdata; | 462 | void *sysdata; |
466 | int busnr; | 463 | int busnr; |
467 | struct list_head windows; /* resource_entry */ | 464 | struct list_head windows; /* resource_entry */ |
468 | u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* platform IRQ swizzler */ | 465 | u8 (*swizzle_irq)(struct pci_dev *, u8 *); /* Platform IRQ swizzler */ |
469 | int (*map_irq)(const struct pci_dev *, u8, u8); | 466 | int (*map_irq)(const struct pci_dev *, u8, u8); |
470 | void (*release_fn)(struct pci_host_bridge *); | 467 | void (*release_fn)(struct pci_host_bridge *); |
471 | void *release_data; | 468 | void *release_data; |
472 | struct msi_controller *msi; | 469 | struct msi_controller *msi; |
473 | unsigned int ignore_reset_delay:1; /* for entire hierarchy */ | 470 | unsigned int ignore_reset_delay:1; /* For entire hierarchy */ |
474 | unsigned int no_ext_tags:1; /* no Extended Tags */ | 471 | unsigned int no_ext_tags:1; /* No Extended Tags */ |
475 | /* Resource alignment requirements */ | 472 | /* Resource alignment requirements */ |
476 | resource_size_t (*align_resource)(struct pci_dev *dev, | 473 | resource_size_t (*align_resource)(struct pci_dev *dev, |
477 | const struct resource *res, | 474 | const struct resource *res, |
478 | resource_size_t start, | 475 | resource_size_t start, |
479 | resource_size_t size, | 476 | resource_size_t size, |
480 | resource_size_t align); | 477 | resource_size_t align); |
481 | unsigned long private[0] ____cacheline_aligned; | 478 | unsigned long private[0] ____cacheline_aligned; |
482 | }; | 479 | }; |
483 | 480 | ||
484 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) | 481 | #define to_pci_host_bridge(n) container_of(n, struct pci_host_bridge, dev) |
@@ -500,8 +497,8 @@ void pci_free_host_bridge(struct pci_host_bridge *bridge); | |||
500 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); | 497 | struct pci_host_bridge *pci_find_host_bridge(struct pci_bus *bus); |
501 | 498 | ||
502 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, | 499 | void pci_set_host_bridge_release(struct pci_host_bridge *bridge, |
503 | void (*release_fn)(struct pci_host_bridge *), | 500 | void (*release_fn)(struct pci_host_bridge *), |
504 | void *release_data); | 501 | void *release_data); |
505 | 502 | ||
506 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); | 503 | int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); |
507 | 504 | ||
@@ -521,32 +518,32 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge); | |||
521 | #define PCI_SUBTRACTIVE_DECODE 0x1 | 518 | #define PCI_SUBTRACTIVE_DECODE 0x1 |
522 | 519 | ||
523 | struct pci_bus_resource { | 520 | struct pci_bus_resource { |
524 | struct list_head list; | 521 | struct list_head list; |
525 | struct resource *res; | 522 | struct resource *res; |
526 | unsigned int flags; | 523 | unsigned int flags; |
527 | }; | 524 | }; |
528 | 525 | ||
529 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ | 526 | #define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ |
530 | 527 | ||
531 | struct pci_bus { | 528 | struct pci_bus { |
532 | struct list_head node; /* node in list of buses */ | 529 | struct list_head node; /* Node in list of buses */ |
533 | struct pci_bus *parent; /* parent bus this bridge is on */ | 530 | struct pci_bus *parent; /* Parent bus this bridge is on */ |
534 | struct list_head children; /* list of child buses */ | 531 | struct list_head children; /* List of child buses */ |
535 | struct list_head devices; /* list of devices on this bus */ | 532 | struct list_head devices; /* List of devices on this bus */ |
536 | struct pci_dev *self; /* bridge device as seen by parent */ | 533 | struct pci_dev *self; /* Bridge device as seen by parent */ |
537 | struct list_head slots; /* list of slots on this bus; | 534 | struct list_head slots; /* List of slots on this bus; |
538 | protected by pci_slot_mutex */ | 535 | protected by pci_slot_mutex */ |
539 | struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; | 536 | struct resource *resource[PCI_BRIDGE_RESOURCE_NUM]; |
540 | struct list_head resources; /* address space routed to this bus */ | 537 | struct list_head resources; /* Address space routed to this bus */ |
541 | struct resource busn_res; /* bus numbers routed to this bus */ | 538 | struct resource busn_res; /* Bus numbers routed to this bus */ |
542 | 539 | ||
543 | struct pci_ops *ops; /* configuration access functions */ | 540 | struct pci_ops *ops; /* Configuration access functions */ |
544 | struct msi_controller *msi; /* MSI controller */ | 541 | struct msi_controller *msi; /* MSI controller */ |
545 | void *sysdata; /* hook for sys-specific extension */ | 542 | void *sysdata; /* Hook for sys-specific extension */ |
546 | struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ | 543 | struct proc_dir_entry *procdir; /* Directory entry in /proc/bus/pci */ |
547 | 544 | ||
548 | unsigned char number; /* bus number */ | 545 | unsigned char number; /* Bus number */ |
549 | unsigned char primary; /* number of primary bridge */ | 546 | unsigned char primary; /* Number of primary bridge */ |
550 | unsigned char max_bus_speed; /* enum pci_bus_speed */ | 547 | unsigned char max_bus_speed; /* enum pci_bus_speed */ |
551 | unsigned char cur_bus_speed; /* enum pci_bus_speed */ | 548 | unsigned char cur_bus_speed; /* enum pci_bus_speed */ |
552 | #ifdef CONFIG_PCI_DOMAINS_GENERIC | 549 | #ifdef CONFIG_PCI_DOMAINS_GENERIC |
@@ -555,12 +552,12 @@ struct pci_bus { | |||
555 | 552 | ||
556 | char name[48]; | 553 | char name[48]; |
557 | 554 | ||
558 | unsigned short bridge_ctl; /* manage NO_ISA/FBB/et al behaviors */ | 555 | unsigned short bridge_ctl; /* Manage NO_ISA/FBB/et al behaviors */ |
559 | pci_bus_flags_t bus_flags; /* inherited by child buses */ | 556 | pci_bus_flags_t bus_flags; /* Inherited by child buses */ |
560 | struct device *bridge; | 557 | struct device *bridge; |
561 | struct device dev; | 558 | struct device dev; |
562 | struct bin_attribute *legacy_io; /* legacy I/O for this bus */ | 559 | struct bin_attribute *legacy_io; /* Legacy I/O for this bus */ |
563 | struct bin_attribute *legacy_mem; /* legacy mem */ | 560 | struct bin_attribute *legacy_mem; /* Legacy mem */ |
564 | unsigned int is_added:1; | 561 | unsigned int is_added:1; |
565 | }; | 562 | }; |
566 | 563 | ||
@@ -617,9 +614,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) | |||
617 | static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } | 614 | static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; } |
618 | #endif | 615 | #endif |
619 | 616 | ||
620 | /* | 617 | /* Error values that may be returned by PCI functions */ |
621 | * Error values that may be returned by PCI functions. | ||
622 | */ | ||
623 | #define PCIBIOS_SUCCESSFUL 0x00 | 618 | #define PCIBIOS_SUCCESSFUL 0x00 |
624 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 | 619 | #define PCIBIOS_FUNC_NOT_SUPPORTED 0x81 |
625 | #define PCIBIOS_BAD_VENDOR_ID 0x83 | 620 | #define PCIBIOS_BAD_VENDOR_ID 0x83 |
@@ -628,9 +623,7 @@ static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) { return false; | |||
628 | #define PCIBIOS_SET_FAILED 0x88 | 623 | #define PCIBIOS_SET_FAILED 0x88 |
629 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 | 624 | #define PCIBIOS_BUFFER_TOO_SMALL 0x89 |
630 | 625 | ||
631 | /* | 626 | /* Translate above to generic errno for passing back through non-PCI code */ |
632 | * Translate above to generic errno for passing back through non-PCI code. | ||
633 | */ | ||
634 | static inline int pcibios_err_to_errno(int err) | 627 | static inline int pcibios_err_to_errno(int err) |
635 | { | 628 | { |
636 | if (err <= PCIBIOS_SUCCESSFUL) | 629 | if (err <= PCIBIOS_SUCCESSFUL) |
@@ -680,13 +673,13 @@ typedef u32 pci_bus_addr_t; | |||
680 | #endif | 673 | #endif |
681 | 674 | ||
682 | struct pci_bus_region { | 675 | struct pci_bus_region { |
683 | pci_bus_addr_t start; | 676 | pci_bus_addr_t start; |
684 | pci_bus_addr_t end; | 677 | pci_bus_addr_t end; |
685 | }; | 678 | }; |
686 | 679 | ||
687 | struct pci_dynids { | 680 | struct pci_dynids { |
688 | spinlock_t lock; /* protects list, index */ | 681 | spinlock_t lock; /* Protects list, index */ |
689 | struct list_head list; /* for IDs added at runtime */ | 682 | struct list_head list; /* For IDs added at runtime */ |
690 | }; | 683 | }; |
691 | 684 | ||
692 | 685 | ||
@@ -700,13 +693,13 @@ struct pci_dynids { | |||
700 | typedef unsigned int __bitwise pci_ers_result_t; | 693 | typedef unsigned int __bitwise pci_ers_result_t; |
701 | 694 | ||
702 | enum pci_ers_result { | 695 | enum pci_ers_result { |
703 | /* no result/none/not supported in device driver */ | 696 | /* No result/none/not supported in device driver */ |
704 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, | 697 | PCI_ERS_RESULT_NONE = (__force pci_ers_result_t) 1, |
705 | 698 | ||
706 | /* Device driver can recover without slot reset */ | 699 | /* Device driver can recover without slot reset */ |
707 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, | 700 | PCI_ERS_RESULT_CAN_RECOVER = (__force pci_ers_result_t) 2, |
708 | 701 | ||
709 | /* Device driver wants slot to be reset. */ | 702 | /* Device driver wants slot to be reset */ |
710 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, | 703 | PCI_ERS_RESULT_NEED_RESET = (__force pci_ers_result_t) 3, |
711 | 704 | ||
712 | /* Device has completely failed, is unrecoverable */ | 705 | /* Device has completely failed, is unrecoverable */ |
@@ -742,27 +735,27 @@ struct pci_error_handlers { | |||
742 | 735 | ||
743 | struct module; | 736 | struct module; |
744 | struct pci_driver { | 737 | struct pci_driver { |
745 | struct list_head node; | 738 | struct list_head node; |
746 | const char *name; | 739 | const char *name; |
747 | const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */ | 740 | const struct pci_device_id *id_table; /* Must be non-NULL for probe to be called */ |
748 | int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ | 741 | int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */ |
749 | void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ | 742 | void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */ |
750 | int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */ | 743 | int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */ |
751 | int (*suspend_late) (struct pci_dev *dev, pm_message_t state); | 744 | int (*suspend_late)(struct pci_dev *dev, pm_message_t state); |
752 | int (*resume_early) (struct pci_dev *dev); | 745 | int (*resume_early)(struct pci_dev *dev); |
753 | int (*resume) (struct pci_dev *dev); /* Device woken up */ | 746 | int (*resume) (struct pci_dev *dev); /* Device woken up */ |
754 | void (*shutdown) (struct pci_dev *dev); | 747 | void (*shutdown) (struct pci_dev *dev); |
755 | int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* PF pdev */ | 748 | int (*sriov_configure) (struct pci_dev *dev, int num_vfs); /* On PF */ |
756 | const struct pci_error_handlers *err_handler; | 749 | const struct pci_error_handlers *err_handler; |
757 | const struct attribute_group **groups; | 750 | const struct attribute_group **groups; |
758 | struct device_driver driver; | 751 | struct device_driver driver; |
759 | struct pci_dynids dynids; | 752 | struct pci_dynids dynids; |
760 | }; | 753 | }; |
761 | 754 | ||
762 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) | 755 | #define to_pci_driver(drv) container_of(drv, struct pci_driver, driver) |
763 | 756 | ||
764 | /** | 757 | /** |
765 | * PCI_DEVICE - macro used to describe a specific pci device | 758 | * PCI_DEVICE - macro used to describe a specific PCI device |
766 | * @vend: the 16 bit PCI Vendor ID | 759 | * @vend: the 16 bit PCI Vendor ID |
767 | * @dev: the 16 bit PCI Device ID | 760 | * @dev: the 16 bit PCI Device ID |
768 | * | 761 | * |
@@ -775,7 +768,7 @@ struct pci_driver { | |||
775 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 768 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
776 | 769 | ||
777 | /** | 770 | /** |
778 | * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem | 771 | * PCI_DEVICE_SUB - macro used to describe a specific PCI device with subsystem |
779 | * @vend: the 16 bit PCI Vendor ID | 772 | * @vend: the 16 bit PCI Vendor ID |
780 | * @dev: the 16 bit PCI Device ID | 773 | * @dev: the 16 bit PCI Device ID |
781 | * @subvend: the 16 bit PCI Subvendor ID | 774 | * @subvend: the 16 bit PCI Subvendor ID |
@@ -789,7 +782,7 @@ struct pci_driver { | |||
789 | .subvendor = (subvend), .subdevice = (subdev) | 782 | .subvendor = (subvend), .subdevice = (subdev) |
790 | 783 | ||
791 | /** | 784 | /** |
792 | * PCI_DEVICE_CLASS - macro used to describe a specific pci device class | 785 | * PCI_DEVICE_CLASS - macro used to describe a specific PCI device class |
793 | * @dev_class: the class, subclass, prog-if triple for this device | 786 | * @dev_class: the class, subclass, prog-if triple for this device |
794 | * @dev_class_mask: the class mask for this device | 787 | * @dev_class_mask: the class mask for this device |
795 | * | 788 | * |
@@ -803,7 +796,7 @@ struct pci_driver { | |||
803 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID | 796 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID |
804 | 797 | ||
805 | /** | 798 | /** |
806 | * PCI_VDEVICE - macro used to describe a specific pci device in short form | 799 | * PCI_VDEVICE - macro used to describe a specific PCI device in short form |
807 | * @vend: the vendor name | 800 | * @vend: the vendor name |
808 | * @dev: the 16 bit PCI Device ID | 801 | * @dev: the 16 bit PCI Device ID |
809 | * | 802 | * |
@@ -812,22 +805,21 @@ struct pci_driver { | |||
812 | * to PCI_ANY_ID. The macro allows the next field to follow as the device | 805 | * to PCI_ANY_ID. The macro allows the next field to follow as the device |
813 | * private data. | 806 | * private data. |
814 | */ | 807 | */ |
815 | |||
816 | #define PCI_VDEVICE(vend, dev) \ | 808 | #define PCI_VDEVICE(vend, dev) \ |
817 | .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ | 809 | .vendor = PCI_VENDOR_ID_##vend, .device = (dev), \ |
818 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 | 810 | .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0 |
819 | 811 | ||
820 | enum { | 812 | enum { |
821 | PCI_REASSIGN_ALL_RSRC = 0x00000001, /* ignore firmware setup */ | 813 | PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */ |
822 | PCI_REASSIGN_ALL_BUS = 0x00000002, /* reassign all bus numbers */ | 814 | PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */ |
823 | PCI_PROBE_ONLY = 0x00000004, /* use existing setup */ | 815 | PCI_PROBE_ONLY = 0x00000004, /* Use existing setup */ |
824 | PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* don't do ISA alignment */ | 816 | PCI_CAN_SKIP_ISA_ALIGN = 0x00000008, /* Don't do ISA alignment */ |
825 | PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* enable domains in /proc */ | 817 | PCI_ENABLE_PROC_DOMAINS = 0x00000010, /* Enable domains in /proc */ |
826 | PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ | 818 | PCI_COMPAT_DOMAIN_0 = 0x00000020, /* ... except domain 0 */ |
827 | PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* scan all, not just dev 0 */ | 819 | PCI_SCAN_ALL_PCIE_DEVS = 0x00000040, /* Scan all, not just dev 0 */ |
828 | }; | 820 | }; |
829 | 821 | ||
830 | /* these external functions are only available when PCI support is enabled */ | 822 | /* These external functions are only available when PCI support is enabled */ |
831 | #ifdef CONFIG_PCI | 823 | #ifdef CONFIG_PCI |
832 | 824 | ||
833 | extern unsigned int pci_flags; | 825 | extern unsigned int pci_flags; |
@@ -840,11 +832,11 @@ static inline int pci_has_flag(int flag) { return pci_flags & flag; } | |||
840 | void pcie_bus_configure_settings(struct pci_bus *bus); | 832 | void pcie_bus_configure_settings(struct pci_bus *bus); |
841 | 833 | ||
842 | enum pcie_bus_config_types { | 834 | enum pcie_bus_config_types { |
843 | PCIE_BUS_TUNE_OFF, /* don't touch MPS at all */ | 835 | PCIE_BUS_TUNE_OFF, /* Don't touch MPS at all */ |
844 | PCIE_BUS_DEFAULT, /* ensure MPS matches upstream bridge */ | 836 | PCIE_BUS_DEFAULT, /* Ensure MPS matches upstream bridge */ |
845 | PCIE_BUS_SAFE, /* use largest MPS boot-time devices support */ | 837 | PCIE_BUS_SAFE, /* Use largest MPS boot-time devices support */ |
846 | PCIE_BUS_PERFORMANCE, /* use MPS and MRRS for best performance */ | 838 | PCIE_BUS_PERFORMANCE, /* Use MPS and MRRS for best performance */ |
847 | PCIE_BUS_PEER2PEER, /* set MPS = 128 for all devices */ | 839 | PCIE_BUS_PEER2PEER, /* Set MPS = 128 for all devices */ |
848 | }; | 840 | }; |
849 | 841 | ||
850 | extern enum pcie_bus_config_types pcie_bus_config; | 842 | extern enum pcie_bus_config_types pcie_bus_config; |
@@ -853,7 +845,7 @@ extern struct bus_type pci_bus_type; | |||
853 | 845 | ||
854 | /* Do NOT directly access these two variables, unless you are arch-specific PCI | 846 | /* Do NOT directly access these two variables, unless you are arch-specific PCI |
855 | * code, or PCI core code. */ | 847 | * code, or PCI core code. */ |
856 | extern struct list_head pci_root_buses; /* list of all known PCI buses */ | 848 | extern struct list_head pci_root_buses; /* List of all known PCI buses */ |
857 | /* Some device drivers need know if PCI is initiated */ | 849 | /* Some device drivers need know if PCI is initiated */ |
858 | int no_pci_devices(void); | 850 | int no_pci_devices(void); |
859 | 851 | ||
@@ -887,12 +879,13 @@ struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops, void *sysdata); | |||
887 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, | 879 | struct pci_bus *pci_create_root_bus(struct device *parent, int bus, |
888 | struct pci_ops *ops, void *sysdata, | 880 | struct pci_ops *ops, void *sysdata, |
889 | struct list_head *resources); | 881 | struct list_head *resources); |
882 | int pci_host_probe(struct pci_host_bridge *bridge); | ||
890 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); | 883 | int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int busmax); |
891 | int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); | 884 | int pci_bus_update_busn_res_end(struct pci_bus *b, int busmax); |
892 | void pci_bus_release_busn_res(struct pci_bus *b); | 885 | void pci_bus_release_busn_res(struct pci_bus *b); |
893 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, | 886 | struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, |
894 | struct pci_ops *ops, void *sysdata, | 887 | struct pci_ops *ops, void *sysdata, |
895 | struct list_head *resources); | 888 | struct list_head *resources); |
896 | int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); | 889 | int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge); |
897 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, | 890 | struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, |
898 | int busnr); | 891 | int busnr); |
@@ -949,10 +942,10 @@ int pci_find_next_ht_capability(struct pci_dev *dev, int pos, int ht_cap); | |||
949 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); | 942 | struct pci_bus *pci_find_next_bus(const struct pci_bus *from); |
950 | 943 | ||
951 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, | 944 | struct pci_dev *pci_get_device(unsigned int vendor, unsigned int device, |
952 | struct pci_dev *from); | 945 | struct pci_dev *from); |
953 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, | 946 | struct pci_dev *pci_get_subsys(unsigned int vendor, unsigned int device, |
954 | unsigned int ss_vendor, unsigned int ss_device, | 947 | unsigned int ss_vendor, unsigned int ss_device, |
955 | struct pci_dev *from); | 948 | struct pci_dev *from); |
956 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); | 949 | struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn); |
957 | struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, | 950 | struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus, |
958 | unsigned int devfn); | 951 | unsigned int devfn); |
@@ -1028,7 +1021,7 @@ static inline int pcie_capability_clear_dword(struct pci_dev *dev, int pos, | |||
1028 | return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); | 1021 | return pcie_capability_clear_and_set_dword(dev, pos, clear, 0); |
1029 | } | 1022 | } |
1030 | 1023 | ||
1031 | /* user-space driven config access */ | 1024 | /* User-space driven config access */ |
1032 | int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); | 1025 | int pci_user_read_config_byte(struct pci_dev *dev, int where, u8 *val); |
1033 | int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); | 1026 | int pci_user_read_config_word(struct pci_dev *dev, int where, u16 *val); |
1034 | int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); | 1027 | int pci_user_read_config_dword(struct pci_dev *dev, int where, u32 *val); |
@@ -1072,6 +1065,7 @@ int pci_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state); | |||
1072 | int pci_set_cacheline_size(struct pci_dev *dev); | 1065 | int pci_set_cacheline_size(struct pci_dev *dev); |
1073 | #define HAVE_PCI_SET_MWI | 1066 | #define HAVE_PCI_SET_MWI |
1074 | int __must_check pci_set_mwi(struct pci_dev *dev); | 1067 | int __must_check pci_set_mwi(struct pci_dev *dev); |
1068 | int __must_check pcim_set_mwi(struct pci_dev *dev); | ||
1075 | int pci_try_set_mwi(struct pci_dev *dev); | 1069 | int pci_try_set_mwi(struct pci_dev *dev); |
1076 | void pci_clear_mwi(struct pci_dev *dev); | 1070 | void pci_clear_mwi(struct pci_dev *dev); |
1077 | void pci_intx(struct pci_dev *dev, int enable); | 1071 | void pci_intx(struct pci_dev *dev, int enable); |
@@ -1170,7 +1164,7 @@ unsigned int pci_rescan_bus(struct pci_bus *bus); | |||
1170 | void pci_lock_rescan_remove(void); | 1164 | void pci_lock_rescan_remove(void); |
1171 | void pci_unlock_rescan_remove(void); | 1165 | void pci_unlock_rescan_remove(void); |
1172 | 1166 | ||
1173 | /* Vital product data routines */ | 1167 | /* Vital Product Data routines */ |
1174 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); | 1168 | ssize_t pci_read_vpd(struct pci_dev *dev, loff_t pos, size_t count, void *buf); |
1175 | ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); | 1169 | ssize_t pci_write_vpd(struct pci_dev *dev, loff_t pos, size_t count, const void *buf); |
1176 | int pci_set_vpd_size(struct pci_dev *dev, size_t len); | 1170 | int pci_set_vpd_size(struct pci_dev *dev, size_t len); |
@@ -1255,9 +1249,7 @@ static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar) | |||
1255 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, | 1249 | int __must_check __pci_register_driver(struct pci_driver *, struct module *, |
1256 | const char *mod_name); | 1250 | const char *mod_name); |
1257 | 1251 | ||
1258 | /* | 1252 | /* pci_register_driver() must be a macro so KBUILD_MODNAME can be expanded */ |
1259 | * pci_register_driver must be a macro so that KBUILD_MODNAME can be expanded | ||
1260 | */ | ||
1261 | #define pci_register_driver(driver) \ | 1253 | #define pci_register_driver(driver) \ |
1262 | __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) | 1254 | __pci_register_driver(driver, THIS_MODULE, KBUILD_MODNAME) |
1263 | 1255 | ||
@@ -1272,8 +1264,7 @@ void pci_unregister_driver(struct pci_driver *dev); | |||
1272 | * use this macro once, and calling it replaces module_init() and module_exit() | 1264 | * use this macro once, and calling it replaces module_init() and module_exit() |
1273 | */ | 1265 | */ |
1274 | #define module_pci_driver(__pci_driver) \ | 1266 | #define module_pci_driver(__pci_driver) \ |
1275 | module_driver(__pci_driver, pci_register_driver, \ | 1267 | module_driver(__pci_driver, pci_register_driver, pci_unregister_driver) |
1276 | pci_unregister_driver) | ||
1277 | 1268 | ||
1278 | /** | 1269 | /** |
1279 | * builtin_pci_driver() - Helper macro for registering a PCI driver | 1270 | * builtin_pci_driver() - Helper macro for registering a PCI driver |
@@ -1312,10 +1303,10 @@ resource_size_t pcibios_iov_resource_alignment(struct pci_dev *dev, int resno); | |||
1312 | int pci_set_vga_state(struct pci_dev *pdev, bool decode, | 1303 | int pci_set_vga_state(struct pci_dev *pdev, bool decode, |
1313 | unsigned int command_bits, u32 flags); | 1304 | unsigned int command_bits, u32 flags); |
1314 | 1305 | ||
1315 | #define PCI_IRQ_LEGACY (1 << 0) /* allow legacy interrupts */ | 1306 | #define PCI_IRQ_LEGACY (1 << 0) /* Allow legacy interrupts */ |
1316 | #define PCI_IRQ_MSI (1 << 1) /* allow MSI interrupts */ | 1307 | #define PCI_IRQ_MSI (1 << 1) /* Allow MSI interrupts */ |
1317 | #define PCI_IRQ_MSIX (1 << 2) /* allow MSI-X interrupts */ | 1308 | #define PCI_IRQ_MSIX (1 << 2) /* Allow MSI-X interrupts */ |
1318 | #define PCI_IRQ_AFFINITY (1 << 3) /* auto-assign affinity */ | 1309 | #define PCI_IRQ_AFFINITY (1 << 3) /* Auto-assign affinity */ |
1319 | #define PCI_IRQ_ALL_TYPES \ | 1310 | #define PCI_IRQ_ALL_TYPES \ |
1320 | (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) | 1311 | (PCI_IRQ_LEGACY | PCI_IRQ_MSI | PCI_IRQ_MSIX) |
1321 | 1312 | ||
@@ -1334,8 +1325,8 @@ int pci_set_vga_state(struct pci_dev *pdev, bool decode, | |||
1334 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) | 1325 | #define pci_pool_free(pool, vaddr, addr) dma_pool_free(pool, vaddr, addr) |
1335 | 1326 | ||
1336 | struct msix_entry { | 1327 | struct msix_entry { |
1337 | u32 vector; /* kernel uses to write allocated vector */ | 1328 | u32 vector; /* Kernel uses to write allocated vector */ |
1338 | u16 entry; /* driver uses to specify entry, OS writes */ | 1329 | u16 entry; /* Driver uses to specify entry, OS writes */ |
1339 | }; | 1330 | }; |
1340 | 1331 | ||
1341 | #ifdef CONFIG_PCI_MSI | 1332 | #ifdef CONFIG_PCI_MSI |
@@ -1375,10 +1366,10 @@ static inline int pci_msi_enabled(void) { return 0; } | |||
1375 | static inline int pci_enable_msi(struct pci_dev *dev) | 1366 | static inline int pci_enable_msi(struct pci_dev *dev) |
1376 | { return -ENOSYS; } | 1367 | { return -ENOSYS; } |
1377 | static inline int pci_enable_msix_range(struct pci_dev *dev, | 1368 | static inline int pci_enable_msix_range(struct pci_dev *dev, |
1378 | struct msix_entry *entries, int minvec, int maxvec) | 1369 | struct msix_entry *entries, int minvec, int maxvec) |
1379 | { return -ENOSYS; } | 1370 | { return -ENOSYS; } |
1380 | static inline int pci_enable_msix_exact(struct pci_dev *dev, | 1371 | static inline int pci_enable_msix_exact(struct pci_dev *dev, |
1381 | struct msix_entry *entries, int nvec) | 1372 | struct msix_entry *entries, int nvec) |
1382 | { return -ENOSYS; } | 1373 | { return -ENOSYS; } |
1383 | 1374 | ||
1384 | static inline int | 1375 | static inline int |
@@ -1543,9 +1534,9 @@ static inline int acpi_pci_bus_find_domain_nr(struct pci_bus *bus) | |||
1543 | int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); | 1534 | int pci_bus_find_domain_nr(struct pci_bus *bus, struct device *parent); |
1544 | #endif | 1535 | #endif |
1545 | 1536 | ||
1546 | /* some architectures require additional setup to direct VGA traffic */ | 1537 | /* Some architectures require additional setup to direct VGA traffic */ |
1547 | typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, | 1538 | typedef int (*arch_set_vga_state_t)(struct pci_dev *pdev, bool decode, |
1548 | unsigned int command_bits, u32 flags); | 1539 | unsigned int command_bits, u32 flags); |
1549 | void pci_register_set_vga_state(arch_set_vga_state_t func); | 1540 | void pci_register_set_vga_state(arch_set_vga_state_t func); |
1550 | 1541 | ||
1551 | static inline int | 1542 | static inline int |
@@ -1584,10 +1575,9 @@ static inline void pci_clear_flags(int flags) { } | |||
1584 | static inline int pci_has_flag(int flag) { return 0; } | 1575 | static inline int pci_has_flag(int flag) { return 0; } |
1585 | 1576 | ||
1586 | /* | 1577 | /* |
1587 | * If the system does not have PCI, clearly these return errors. Define | 1578 | * If the system does not have PCI, clearly these return errors. Define |
1588 | * these as simple inline functions to avoid hair in drivers. | 1579 | * these as simple inline functions to avoid hair in drivers. |
1589 | */ | 1580 | */ |
1590 | |||
1591 | #define _PCI_NOP(o, s, t) \ | 1581 | #define _PCI_NOP(o, s, t) \ |
1592 | static inline int pci_##o##_config_##s(struct pci_dev *dev, \ | 1582 | static inline int pci_##o##_config_##s(struct pci_dev *dev, \ |
1593 | int where, t val) \ | 1583 | int where, t val) \ |
@@ -1686,6 +1676,13 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; } | |||
1686 | #define dev_is_pf(d) (false) | 1676 | #define dev_is_pf(d) (false) |
1687 | static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) | 1677 | static inline bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags) |
1688 | { return false; } | 1678 | { return false; } |
1679 | static inline int pci_irqd_intx_xlate(struct irq_domain *d, | ||
1680 | struct device_node *node, | ||
1681 | const u32 *intspec, | ||
1682 | unsigned int intsize, | ||
1683 | unsigned long *out_hwirq, | ||
1684 | unsigned int *out_type) | ||
1685 | { return -EINVAL; } | ||
1689 | #endif /* CONFIG_PCI */ | 1686 | #endif /* CONFIG_PCI */ |
1690 | 1687 | ||
1691 | /* Include architecture-dependent settings and functions */ | 1688 | /* Include architecture-dependent settings and functions */ |
@@ -1726,8 +1723,10 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); | |||
1726 | #define pci_root_bus_fwnode(bus) NULL | 1723 | #define pci_root_bus_fwnode(bus) NULL |
1727 | #endif | 1724 | #endif |
1728 | 1725 | ||
1729 | /* these helpers provide future and backwards compatibility | 1726 | /* |
1730 | * for accessing popular PCI BAR info */ | 1727 | * These helpers provide future and backwards compatibility |
1728 | * for accessing popular PCI BAR info | ||
1729 | */ | ||
1731 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) | 1730 | #define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) |
1732 | #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) | 1731 | #define pci_resource_end(dev, bar) ((dev)->resource[(bar)].end) |
1733 | #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) | 1732 | #define pci_resource_flags(dev, bar) ((dev)->resource[(bar)].flags) |
@@ -1739,7 +1738,8 @@ int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma); | |||
1739 | (pci_resource_end((dev), (bar)) - \ | 1738 | (pci_resource_end((dev), (bar)) - \ |
1740 | pci_resource_start((dev), (bar)) + 1)) | 1739 | pci_resource_start((dev), (bar)) + 1)) |
1741 | 1740 | ||
1742 | /* Similar to the helpers above, these manipulate per-pci_dev | 1741 | /* |
1742 | * Similar to the helpers above, these manipulate per-pci_dev | ||
1743 | * driver-specific data. They are really just a wrapper around | 1743 | * driver-specific data. They are really just a wrapper around |
1744 | * the generic device structure functions of these calls. | 1744 | * the generic device structure functions of these calls. |
1745 | */ | 1745 | */ |
@@ -1753,16 +1753,14 @@ static inline void pci_set_drvdata(struct pci_dev *pdev, void *data) | |||
1753 | dev_set_drvdata(&pdev->dev, data); | 1753 | dev_set_drvdata(&pdev->dev, data); |
1754 | } | 1754 | } |
1755 | 1755 | ||
1756 | /* If you want to know what to call your pci_dev, ask this function. | ||
1757 | * Again, it's a wrapper around the generic device. | ||
1758 | */ | ||
1759 | static inline const char *pci_name(const struct pci_dev *pdev) | 1756 | static inline const char *pci_name(const struct pci_dev *pdev) |
1760 | { | 1757 | { |
1761 | return dev_name(&pdev->dev); | 1758 | return dev_name(&pdev->dev); |
1762 | } | 1759 | } |
1763 | 1760 | ||
1764 | 1761 | ||
1765 | /* Some archs don't want to expose struct resource to userland as-is | 1762 | /* |
1763 | * Some archs don't want to expose struct resource to userland as-is | ||
1766 | * in sysfs and /proc | 1764 | * in sysfs and /proc |
1767 | */ | 1765 | */ |
1768 | #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER | 1766 | #ifdef HAVE_ARCH_PCI_RESOURCE_TO_USER |
@@ -1781,16 +1779,16 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
1781 | 1779 | ||
1782 | 1780 | ||
1783 | /* | 1781 | /* |
1784 | * The world is not perfect and supplies us with broken PCI devices. | 1782 | * The world is not perfect and supplies us with broken PCI devices. |
1785 | * For at least a part of these bugs we need a work-around, so both | 1783 | * For at least a part of these bugs we need a work-around, so both |
1786 | * generic (drivers/pci/quirks.c) and per-architecture code can define | 1784 | * generic (drivers/pci/quirks.c) and per-architecture code can define |
1787 | * fixup hooks to be called for particular buggy devices. | 1785 | * fixup hooks to be called for particular buggy devices. |
1788 | */ | 1786 | */ |
1789 | 1787 | ||
1790 | struct pci_fixup { | 1788 | struct pci_fixup { |
1791 | u16 vendor; /* You can use PCI_ANY_ID here of course */ | 1789 | u16 vendor; /* Or PCI_ANY_ID */ |
1792 | u16 device; /* You can use PCI_ANY_ID here of course */ | 1790 | u16 device; /* Or PCI_ANY_ID */ |
1793 | u32 class; /* You can use PCI_ANY_ID here too */ | 1791 | u32 class; /* Or PCI_ANY_ID */ |
1794 | unsigned int class_shift; /* should be 0, 8, 16 */ | 1792 | unsigned int class_shift; /* should be 0, 8, 16 */ |
1795 | void (*hook)(struct pci_dev *dev); | 1793 | void (*hook)(struct pci_dev *dev); |
1796 | }; | 1794 | }; |
@@ -1832,23 +1830,19 @@ enum pci_fixup_pass { | |||
1832 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ | 1830 | #define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \ |
1833 | class_shift, hook) \ | 1831 | class_shift, hook) \ |
1834 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1832 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1835 | resume##hook, vendor, device, class, \ | 1833 | resume##hook, vendor, device, class, class_shift, hook) |
1836 | class_shift, hook) | ||
1837 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ | 1834 | #define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \ |
1838 | class_shift, hook) \ | 1835 | class_shift, hook) \ |
1839 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1836 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1840 | resume_early##hook, vendor, device, \ | 1837 | resume_early##hook, vendor, device, class, class_shift, hook) |
1841 | class, class_shift, hook) | ||
1842 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ | 1838 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \ |
1843 | class_shift, hook) \ | 1839 | class_shift, hook) \ |
1844 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1840 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1845 | suspend##hook, vendor, device, class, \ | 1841 | suspend##hook, vendor, device, class, class_shift, hook) |
1846 | class_shift, hook) | ||
1847 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ | 1842 | #define DECLARE_PCI_FIXUP_CLASS_SUSPEND_LATE(vendor, device, class, \ |
1848 | class_shift, hook) \ | 1843 | class_shift, hook) \ |
1849 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ | 1844 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ |
1850 | suspend_late##hook, vendor, device, \ | 1845 | suspend_late##hook, vendor, device, class, class_shift, hook) |
1851 | class, class_shift, hook) | ||
1852 | 1846 | ||
1853 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ | 1847 | #define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \ |
1854 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ | 1848 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \ |
@@ -1864,20 +1858,16 @@ enum pci_fixup_pass { | |||
1864 | hook, vendor, device, PCI_ANY_ID, 0, hook) | 1858 | hook, vendor, device, PCI_ANY_ID, 0, hook) |
1865 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ | 1859 | #define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \ |
1866 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ | 1860 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \ |
1867 | resume##hook, vendor, device, \ | 1861 | resume##hook, vendor, device, PCI_ANY_ID, 0, hook) |
1868 | PCI_ANY_ID, 0, hook) | ||
1869 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ | 1862 | #define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \ |
1870 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ | 1863 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \ |
1871 | resume_early##hook, vendor, device, \ | 1864 | resume_early##hook, vendor, device, PCI_ANY_ID, 0, hook) |
1872 | PCI_ANY_ID, 0, hook) | ||
1873 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ | 1865 | #define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \ |
1874 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ | 1866 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \ |
1875 | suspend##hook, vendor, device, \ | 1867 | suspend##hook, vendor, device, PCI_ANY_ID, 0, hook) |
1876 | PCI_ANY_ID, 0, hook) | ||
1877 | #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ | 1868 | #define DECLARE_PCI_FIXUP_SUSPEND_LATE(vendor, device, hook) \ |
1878 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ | 1869 | DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend_late, \ |
1879 | suspend_late##hook, vendor, device, \ | 1870 | suspend_late##hook, vendor, device, PCI_ANY_ID, 0, hook) |
1880 | PCI_ANY_ID, 0, hook) | ||
1881 | 1871 | ||
1882 | #ifdef CONFIG_PCI_QUIRKS | 1872 | #ifdef CONFIG_PCI_QUIRKS |
1883 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); | 1873 | void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev); |
@@ -1964,6 +1954,7 @@ int pci_vfs_assigned(struct pci_dev *dev); | |||
1964 | int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); | 1954 | int pci_sriov_set_totalvfs(struct pci_dev *dev, u16 numvfs); |
1965 | int pci_sriov_get_totalvfs(struct pci_dev *dev); | 1955 | int pci_sriov_get_totalvfs(struct pci_dev *dev); |
1966 | resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); | 1956 | resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno); |
1957 | void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe); | ||
1967 | #else | 1958 | #else |
1968 | static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) | 1959 | static inline int pci_iov_virtfn_bus(struct pci_dev *dev, int id) |
1969 | { | 1960 | { |
@@ -1991,6 +1982,7 @@ static inline int pci_sriov_get_totalvfs(struct pci_dev *dev) | |||
1991 | { return 0; } | 1982 | { return 0; } |
1992 | static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) | 1983 | static inline resource_size_t pci_iov_resource_size(struct pci_dev *dev, int resno) |
1993 | { return 0; } | 1984 | { return 0; } |
1985 | static inline void pci_vf_drivers_autoprobe(struct pci_dev *dev, bool probe) { } | ||
1994 | #endif | 1986 | #endif |
1995 | 1987 | ||
1996 | #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) | 1988 | #if defined(CONFIG_HOTPLUG_PCI) || defined(CONFIG_HOTPLUG_PCI_MODULE) |
@@ -2061,6 +2053,7 @@ void pci_request_acs(void); | |||
2061 | bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); | 2053 | bool pci_acs_enabled(struct pci_dev *pdev, u16 acs_flags); |
2062 | bool pci_acs_path_enabled(struct pci_dev *start, | 2054 | bool pci_acs_path_enabled(struct pci_dev *start, |
2063 | struct pci_dev *end, u16 acs_flags); | 2055 | struct pci_dev *end, u16 acs_flags); |
2056 | int pci_enable_atomic_ops_to_root(struct pci_dev *dev, u32 cap_mask); | ||
2064 | 2057 | ||
2065 | #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ | 2058 | #define PCI_VPD_LRDT 0x80 /* Large Resource Data Type */ |
2066 | #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) | 2059 | #define PCI_VPD_LRDT_ID(x) ((x) | PCI_VPD_LRDT) |
@@ -2112,7 +2105,7 @@ static inline u16 pci_vpd_lrdt_size(const u8 *lrdt) | |||
2112 | */ | 2105 | */ |
2113 | static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) | 2106 | static inline u16 pci_vpd_lrdt_tag(const u8 *lrdt) |
2114 | { | 2107 | { |
2115 | return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); | 2108 | return (u16)(lrdt[0] & PCI_VPD_LRDT_TIN_MASK); |
2116 | } | 2109 | } |
2117 | 2110 | ||
2118 | /** | 2111 | /** |
@@ -2182,6 +2175,9 @@ void pci_release_of_node(struct pci_dev *dev); | |||
2182 | void pci_set_bus_of_node(struct pci_bus *bus); | 2175 | void pci_set_bus_of_node(struct pci_bus *bus); |
2183 | void pci_release_bus_of_node(struct pci_bus *bus); | 2176 | void pci_release_bus_of_node(struct pci_bus *bus); |
2184 | struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); | 2177 | struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus); |
2178 | int pci_parse_request_of_pci_ranges(struct device *dev, | ||
2179 | struct list_head *resources, | ||
2180 | struct resource **bus_range); | ||
2185 | 2181 | ||
2186 | /* Arch may override this (weak) */ | 2182 | /* Arch may override this (weak) */ |
2187 | struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); | 2183 | struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus); |
@@ -2197,7 +2193,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | |||
2197 | return bus ? bus->dev.of_node : NULL; | 2193 | return bus ? bus->dev.of_node : NULL; |
2198 | } | 2194 | } |
2199 | 2195 | ||
2200 | #else /* CONFIG_OF */ | 2196 | #else /* CONFIG_OF */ |
2201 | static inline void pci_set_of_node(struct pci_dev *dev) { } | 2197 | static inline void pci_set_of_node(struct pci_dev *dev) { } |
2202 | static inline void pci_release_of_node(struct pci_dev *dev) { } | 2198 | static inline void pci_release_of_node(struct pci_dev *dev) { } |
2203 | static inline void pci_set_bus_of_node(struct pci_bus *bus) { } | 2199 | static inline void pci_set_bus_of_node(struct pci_bus *bus) { } |
@@ -2206,6 +2202,12 @@ static inline struct device_node * | |||
2206 | pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } | 2202 | pci_device_to_OF_node(const struct pci_dev *pdev) { return NULL; } |
2207 | static inline struct irq_domain * | 2203 | static inline struct irq_domain * |
2208 | pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } | 2204 | pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; } |
2205 | static inline int pci_parse_request_of_pci_ranges(struct device *dev, | ||
2206 | struct list_head *resources, | ||
2207 | struct resource **bus_range) | ||
2208 | { | ||
2209 | return -EINVAL; | ||
2210 | } | ||
2209 | #endif /* CONFIG_OF */ | 2211 | #endif /* CONFIG_OF */ |
2210 | 2212 | ||
2211 | #ifdef CONFIG_ACPI | 2213 | #ifdef CONFIG_ACPI |
@@ -2231,7 +2233,7 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, | |||
2231 | int (*fn)(struct pci_dev *pdev, | 2233 | int (*fn)(struct pci_dev *pdev, |
2232 | u16 alias, void *data), void *data); | 2234 | u16 alias, void *data), void *data); |
2233 | 2235 | ||
2234 | /* helper functions for operation of device flag */ | 2236 | /* Helper functions for operation of device flag */ |
2235 | static inline void pci_set_dev_assigned(struct pci_dev *pdev) | 2237 | static inline void pci_set_dev_assigned(struct pci_dev *pdev) |
2236 | { | 2238 | { |
2237 | pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; | 2239 | pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED; |
@@ -2278,7 +2280,55 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev) | |||
2278 | return false; | 2280 | return false; |
2279 | } | 2281 | } |
2280 | 2282 | ||
2281 | /* provide the legacy pci_dma_* API */ | 2283 | /** |
2284 | * pci_uevent_ers - emit a uevent during recovery path of pci device | ||
2285 | * @pdev: pci device to check | ||
2286 | * @err_type: type of error event | ||
2287 | * | ||
2288 | */ | ||
2289 | static inline void pci_uevent_ers(struct pci_dev *pdev, | ||
2290 | enum pci_ers_result err_type) | ||
2291 | { | ||
2292 | int idx = 0; | ||
2293 | char *envp[3]; | ||
2294 | |||
2295 | switch (err_type) { | ||
2296 | case PCI_ERS_RESULT_NONE: | ||
2297 | case PCI_ERS_RESULT_CAN_RECOVER: | ||
2298 | envp[idx++] = "ERROR_EVENT=BEGIN_RECOVERY"; | ||
2299 | envp[idx++] = "DEVICE_ONLINE=0"; | ||
2300 | break; | ||
2301 | case PCI_ERS_RESULT_RECOVERED: | ||
2302 | envp[idx++] = "ERROR_EVENT=SUCCESSFUL_RECOVERY"; | ||
2303 | envp[idx++] = "DEVICE_ONLINE=1"; | ||
2304 | break; | ||
2305 | case PCI_ERS_RESULT_DISCONNECT: | ||
2306 | envp[idx++] = "ERROR_EVENT=FAILED_RECOVERY"; | ||
2307 | envp[idx++] = "DEVICE_ONLINE=0"; | ||
2308 | break; | ||
2309 | default: | ||
2310 | break; | ||
2311 | } | ||
2312 | |||
2313 | if (idx > 0) { | ||
2314 | envp[idx++] = NULL; | ||
2315 | kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, envp); | ||
2316 | } | ||
2317 | } | ||
2318 | |||
2319 | /* Provide the legacy pci_dma_* API */ | ||
2282 | #include <linux/pci-dma-compat.h> | 2320 | #include <linux/pci-dma-compat.h> |
2283 | 2321 | ||
2322 | #define pci_printk(level, pdev, fmt, arg...) \ | ||
2323 | dev_printk(level, &(pdev)->dev, fmt, ##arg) | ||
2324 | |||
2325 | #define pci_emerg(pdev, fmt, arg...) dev_emerg(&(pdev)->dev, fmt, ##arg) | ||
2326 | #define pci_alert(pdev, fmt, arg...) dev_alert(&(pdev)->dev, fmt, ##arg) | ||
2327 | #define pci_crit(pdev, fmt, arg...) dev_crit(&(pdev)->dev, fmt, ##arg) | ||
2328 | #define pci_err(pdev, fmt, arg...) dev_err(&(pdev)->dev, fmt, ##arg) | ||
2329 | #define pci_warn(pdev, fmt, arg...) dev_warn(&(pdev)->dev, fmt, ##arg) | ||
2330 | #define pci_notice(pdev, fmt, arg...) dev_notice(&(pdev)->dev, fmt, ##arg) | ||
2331 | #define pci_info(pdev, fmt, arg...) dev_info(&(pdev)->dev, fmt, ##arg) | ||
2332 | #define pci_dbg(pdev, fmt, arg...) dev_dbg(&(pdev)->dev, fmt, ##arg) | ||
2333 | |||
2284 | #endif /* LINUX_PCI_H */ | 2334 | #endif /* LINUX_PCI_H */ |
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h index 2e855afa0212..26213024e81b 100644 --- a/include/linux/pci_hotplug.h +++ b/include/linux/pci_hotplug.h | |||
@@ -1,3 +1,4 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
1 | /* | 2 | /* |
2 | * PCI HotPlug Core Functions | 3 | * PCI HotPlug Core Functions |
3 | * | 4 | * |
@@ -7,21 +8,6 @@ | |||
7 | * | 8 | * |
8 | * All rights reserved. | 9 | * All rights reserved. |
9 | * | 10 | * |
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or (at | ||
13 | * your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
18 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
19 | * details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | * Send feedback to <kristen.c.accardi@intel.com> | 11 | * Send feedback to <kristen.c.accardi@intel.com> |
26 | * | 12 | * |
27 | */ | 13 | */ |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index ab20dc5db423..eb13e84e1fef 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
@@ -2381,6 +2381,8 @@ | |||
2381 | 2381 | ||
2382 | #define PCI_VENDOR_ID_LENOVO 0x17aa | 2382 | #define PCI_VENDOR_ID_LENOVO 0x17aa |
2383 | 2383 | ||
2384 | #define PCI_VENDOR_ID_CDNS 0x17cd | ||
2385 | |||
2384 | #define PCI_VENDOR_ID_ARECA 0x17d3 | 2386 | #define PCI_VENDOR_ID_ARECA 0x17d3 |
2385 | #define PCI_DEVICE_ID_ARECA_1110 0x1110 | 2387 | #define PCI_DEVICE_ID_ARECA_1110 0x1110 |
2386 | #define PCI_DEVICE_ID_ARECA_1120 0x1120 | 2388 | #define PCI_DEVICE_ID_ARECA_1120 0x1120 |
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h index 6658d9ee5257..864d167a1073 100644 --- a/include/linux/percpu-refcount.h +++ b/include/linux/percpu-refcount.h | |||
@@ -139,12 +139,12 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref, | |||
139 | * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in | 139 | * when using it as a pointer, __PERCPU_REF_ATOMIC may be set in |
140 | * between contaminating the pointer value, meaning that | 140 | * between contaminating the pointer value, meaning that |
141 | * READ_ONCE() is required when fetching it. | 141 | * READ_ONCE() is required when fetching it. |
142 | * | ||
143 | * The smp_read_barrier_depends() implied by READ_ONCE() pairs | ||
144 | * with smp_store_release() in __percpu_ref_switch_to_percpu(). | ||
142 | */ | 145 | */ |
143 | percpu_ptr = READ_ONCE(ref->percpu_count_ptr); | 146 | percpu_ptr = READ_ONCE(ref->percpu_count_ptr); |
144 | 147 | ||
145 | /* paired with smp_store_release() in __percpu_ref_switch_to_percpu() */ | ||
146 | smp_read_barrier_depends(); | ||
147 | |||
148 | /* | 148 | /* |
149 | * Theoretically, the following could test just ATOMIC; however, | 149 | * Theoretically, the following could test just ATOMIC; however, |
150 | * then we'd have to mask off DEAD separately as DEAD may be | 150 | * then we'd have to mask off DEAD separately as DEAD may be |
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h index 73a7bf30fe9a..4f052496cdfd 100644 --- a/include/linux/percpu_counter.h +++ b/include/linux/percpu_counter.h | |||
@@ -86,7 +86,7 @@ static inline s64 percpu_counter_read_positive(struct percpu_counter *fbc) | |||
86 | return 0; | 86 | return 0; |
87 | } | 87 | } |
88 | 88 | ||
89 | static inline int percpu_counter_initialized(struct percpu_counter *fbc) | 89 | static inline bool percpu_counter_initialized(struct percpu_counter *fbc) |
90 | { | 90 | { |
91 | return (fbc->counters != NULL); | 91 | return (fbc->counters != NULL); |
92 | } | 92 | } |
@@ -167,9 +167,9 @@ static inline s64 percpu_counter_sum(struct percpu_counter *fbc) | |||
167 | return percpu_counter_read(fbc); | 167 | return percpu_counter_read(fbc); |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline int percpu_counter_initialized(struct percpu_counter *fbc) | 170 | static inline bool percpu_counter_initialized(struct percpu_counter *fbc) |
171 | { | 171 | { |
172 | return 1; | 172 | return true; |
173 | } | 173 | } |
174 | 174 | ||
175 | #endif /* CONFIG_SMP */ | 175 | #endif /* CONFIG_SMP */ |
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h index 43b1d7648e82..a03c2642a87c 100644 --- a/include/linux/pfn_t.h +++ b/include/linux/pfn_t.h | |||
@@ -15,8 +15,10 @@ | |||
15 | #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) | 15 | #define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2)) |
16 | #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) | 16 | #define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3)) |
17 | #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) | 17 | #define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4)) |
18 | #define PFN_SPECIAL (1ULL << (BITS_PER_LONG_LONG - 5)) | ||
18 | 19 | ||
19 | #define PFN_FLAGS_TRACE \ | 20 | #define PFN_FLAGS_TRACE \ |
21 | { PFN_SPECIAL, "SPECIAL" }, \ | ||
20 | { PFN_SG_CHAIN, "SG_CHAIN" }, \ | 22 | { PFN_SG_CHAIN, "SG_CHAIN" }, \ |
21 | { PFN_SG_LAST, "SG_LAST" }, \ | 23 | { PFN_SG_LAST, "SG_LAST" }, \ |
22 | { PFN_DEV, "DEV" }, \ | 24 | { PFN_DEV, "DEV" }, \ |
@@ -120,4 +122,15 @@ pud_t pud_mkdevmap(pud_t pud); | |||
120 | #endif | 122 | #endif |
121 | #endif /* __HAVE_ARCH_PTE_DEVMAP */ | 123 | #endif /* __HAVE_ARCH_PTE_DEVMAP */ |
122 | 124 | ||
125 | #ifdef __HAVE_ARCH_PTE_SPECIAL | ||
126 | static inline bool pfn_t_special(pfn_t pfn) | ||
127 | { | ||
128 | return (pfn.val & PFN_SPECIAL) == PFN_SPECIAL; | ||
129 | } | ||
130 | #else | ||
131 | static inline bool pfn_t_special(pfn_t pfn) | ||
132 | { | ||
133 | return false; | ||
134 | } | ||
135 | #endif /* __HAVE_ARCH_PTE_SPECIAL */ | ||
123 | #endif /* _LINUX_PFN_T_H_ */ | 136 | #endif /* _LINUX_PFN_T_H_ */ |
diff --git a/include/linux/phy.h b/include/linux/phy.h index dc82a07cb4fd..5a0c3e53e7c2 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
@@ -59,6 +59,7 @@ | |||
59 | 59 | ||
60 | #define PHY_HAS_INTERRUPT 0x00000001 | 60 | #define PHY_HAS_INTERRUPT 0x00000001 |
61 | #define PHY_IS_INTERNAL 0x00000002 | 61 | #define PHY_IS_INTERNAL 0x00000002 |
62 | #define PHY_RST_AFTER_CLK_EN 0x00000004 | ||
62 | #define MDIO_DEVICE_IS_PHY 0x80000000 | 63 | #define MDIO_DEVICE_IS_PHY 0x80000000 |
63 | 64 | ||
64 | /* Interface Mode definitions */ | 65 | /* Interface Mode definitions */ |
@@ -468,7 +469,6 @@ struct phy_device { | |||
468 | /* Interrupt and Polling infrastructure */ | 469 | /* Interrupt and Polling infrastructure */ |
469 | struct work_struct phy_queue; | 470 | struct work_struct phy_queue; |
470 | struct delayed_work state_queue; | 471 | struct delayed_work state_queue; |
471 | atomic_t irq_disable; | ||
472 | 472 | ||
473 | struct mutex lock; | 473 | struct mutex lock; |
474 | 474 | ||
@@ -497,19 +497,19 @@ struct phy_device { | |||
497 | * flags: A bitfield defining certain other features this PHY | 497 | * flags: A bitfield defining certain other features this PHY |
498 | * supports (like interrupts) | 498 | * supports (like interrupts) |
499 | * | 499 | * |
500 | * The drivers must implement config_aneg and read_status. All | 500 | * All functions are optional. If config_aneg or read_status |
501 | * other functions are optional. Note that none of these | 501 | * are not implemented, the phy core uses the genphy versions. |
502 | * functions should be called from interrupt time. The goal is | 502 | * Note that none of these functions should be called from |
503 | * for the bus read/write functions to be able to block when the | 503 | * interrupt time. The goal is for the bus read/write functions |
504 | * bus transaction is happening, and be freed up by an interrupt | 504 | * to be able to block when the bus transaction is happening, |
505 | * (The MPC85xx has this ability, though it is not currently | 505 | * and be freed up by an interrupt (The MPC85xx has this ability, |
506 | * supported in the driver). | 506 | * though it is not currently supported in the driver). |
507 | */ | 507 | */ |
508 | struct phy_driver { | 508 | struct phy_driver { |
509 | struct mdio_driver_common mdiodrv; | 509 | struct mdio_driver_common mdiodrv; |
510 | u32 phy_id; | 510 | u32 phy_id; |
511 | char *name; | 511 | char *name; |
512 | unsigned int phy_id_mask; | 512 | u32 phy_id_mask; |
513 | u32 features; | 513 | u32 features; |
514 | u32 flags; | 514 | u32 flags; |
515 | const void *driver_data; | 515 | const void *driver_data; |
@@ -634,6 +634,9 @@ struct phy_driver { | |||
634 | int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, | 634 | int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum, |
635 | u16 val); | 635 | u16 val); |
636 | 636 | ||
637 | int (*read_page)(struct phy_device *dev); | ||
638 | int (*write_page)(struct phy_device *dev, int page); | ||
639 | |||
637 | /* Get the size and type of the eeprom contained within a plug-in | 640 | /* Get the size and type of the eeprom contained within a plug-in |
638 | * module */ | 641 | * module */ |
639 | int (*module_info)(struct phy_device *dev, | 642 | int (*module_info)(struct phy_device *dev, |
@@ -690,6 +693,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, | |||
690 | size_t phy_speeds(unsigned int *speeds, size_t size, | 693 | size_t phy_speeds(unsigned int *speeds, size_t size, |
691 | unsigned long *mask, size_t maxbit); | 694 | unsigned long *mask, size_t maxbit); |
692 | 695 | ||
696 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); | ||
697 | |||
693 | /** | 698 | /** |
694 | * phy_read_mmd - Convenience function for reading a register | 699 | * phy_read_mmd - Convenience function for reading a register |
695 | * from an MMD on a given PHY. | 700 | * from an MMD on a given PHY. |
@@ -716,6 +721,18 @@ static inline int phy_read(struct phy_device *phydev, u32 regnum) | |||
716 | } | 721 | } |
717 | 722 | ||
718 | /** | 723 | /** |
724 | * __phy_read - convenience function for reading a given PHY register | ||
725 | * @phydev: the phy_device struct | ||
726 | * @regnum: register number to read | ||
727 | * | ||
728 | * The caller must have taken the MDIO bus lock. | ||
729 | */ | ||
730 | static inline int __phy_read(struct phy_device *phydev, u32 regnum) | ||
731 | { | ||
732 | return __mdiobus_read(phydev->mdio.bus, phydev->mdio.addr, regnum); | ||
733 | } | ||
734 | |||
735 | /** | ||
719 | * phy_write - Convenience function for writing a given PHY register | 736 | * phy_write - Convenience function for writing a given PHY register |
720 | * @phydev: the phy_device struct | 737 | * @phydev: the phy_device struct |
721 | * @regnum: register number to write | 738 | * @regnum: register number to write |
@@ -731,6 +748,72 @@ static inline int phy_write(struct phy_device *phydev, u32 regnum, u16 val) | |||
731 | } | 748 | } |
732 | 749 | ||
733 | /** | 750 | /** |
751 | * __phy_write - Convenience function for writing a given PHY register | ||
752 | * @phydev: the phy_device struct | ||
753 | * @regnum: register number to write | ||
754 | * @val: value to write to @regnum | ||
755 | * | ||
756 | * The caller must have taken the MDIO bus lock. | ||
757 | */ | ||
758 | static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) | ||
759 | { | ||
760 | return __mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, | ||
761 | val); | ||
762 | } | ||
763 | |||
764 | int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); | ||
765 | int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); | ||
766 | |||
767 | /** | ||
768 | * __phy_set_bits - Convenience function for setting bits in a PHY register | ||
769 | * @phydev: the phy_device struct | ||
770 | * @regnum: register number to write | ||
771 | * @val: bits to set | ||
772 | * | ||
773 | * The caller must have taken the MDIO bus lock. | ||
774 | */ | ||
775 | static inline int __phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) | ||
776 | { | ||
777 | return __phy_modify(phydev, regnum, 0, val); | ||
778 | } | ||
779 | |||
780 | /** | ||
781 | * __phy_clear_bits - Convenience function for clearing bits in a PHY register | ||
782 | * @phydev: the phy_device struct | ||
783 | * @regnum: register number to write | ||
784 | * @val: bits to clear | ||
785 | * | ||
786 | * The caller must have taken the MDIO bus lock. | ||
787 | */ | ||
788 | static inline int __phy_clear_bits(struct phy_device *phydev, u32 regnum, | ||
789 | u16 val) | ||
790 | { | ||
791 | return __phy_modify(phydev, regnum, val, 0); | ||
792 | } | ||
793 | |||
794 | /** | ||
795 | * phy_set_bits - Convenience function for setting bits in a PHY register | ||
796 | * @phydev: the phy_device struct | ||
797 | * @regnum: register number to write | ||
798 | * @val: bits to set | ||
799 | */ | ||
800 | static inline int phy_set_bits(struct phy_device *phydev, u32 regnum, u16 val) | ||
801 | { | ||
802 | return phy_modify(phydev, regnum, 0, val); | ||
803 | } | ||
804 | |||
805 | /** | ||
806 | * phy_clear_bits - Convenience function for clearing bits in a PHY register | ||
807 | * @phydev: the phy_device struct | ||
808 | * @regnum: register number to write | ||
809 | * @val: bits to clear | ||
810 | */ | ||
811 | static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) | ||
812 | { | ||
813 | return phy_modify(phydev, regnum, val, 0); | ||
814 | } | ||
815 | |||
816 | /** | ||
734 | * phy_interrupt_is_valid - Convenience function for testing a given PHY irq | 817 | * phy_interrupt_is_valid - Convenience function for testing a given PHY irq |
735 | * @phydev: the phy_device struct | 818 | * @phydev: the phy_device struct |
736 | * | 819 | * |
@@ -763,6 +846,20 @@ static inline bool phy_interface_mode_is_rgmii(phy_interface_t mode) | |||
763 | }; | 846 | }; |
764 | 847 | ||
765 | /** | 848 | /** |
849 | * phy_interface_mode_is_8023z() - does the phy interface mode use 802.3z | ||
850 | * negotiation | ||
851 | * @mode: one of &enum phy_interface_t | ||
852 | * | ||
853 | * Returns true if the phy interface mode uses the 16-bit negotiation | ||
854 | * word as defined in 802.3z. (See 802.3-2015 37.2.1 Config_Reg encoding) | ||
855 | */ | ||
856 | static inline bool phy_interface_mode_is_8023z(phy_interface_t mode) | ||
857 | { | ||
858 | return mode == PHY_INTERFACE_MODE_1000BASEX || | ||
859 | mode == PHY_INTERFACE_MODE_2500BASEX; | ||
860 | } | ||
861 | |||
862 | /** | ||
766 | * phy_interface_is_rgmii - Convenience function for testing if a PHY interface | 863 | * phy_interface_is_rgmii - Convenience function for testing if a PHY interface |
767 | * is RGMII (all variants) | 864 | * is RGMII (all variants) |
768 | * @phydev: the phy_device struct | 865 | * @phydev: the phy_device struct |
@@ -794,6 +891,14 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) | |||
794 | */ | 891 | */ |
795 | int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); | 892 | int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); |
796 | 893 | ||
894 | int phy_save_page(struct phy_device *phydev); | ||
895 | int phy_select_page(struct phy_device *phydev, int page); | ||
896 | int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); | ||
897 | int phy_read_paged(struct phy_device *phydev, int page, u32 regnum); | ||
898 | int phy_write_paged(struct phy_device *phydev, int page, u32 regnum, u16 val); | ||
899 | int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum, | ||
900 | u16 mask, u16 set); | ||
901 | |||
797 | struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, | 902 | struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, |
798 | bool is_c45, | 903 | bool is_c45, |
799 | struct phy_c45_device_ids *c45_ids); | 904 | struct phy_c45_device_ids *c45_ids); |
@@ -840,13 +945,11 @@ int phy_aneg_done(struct phy_device *phydev); | |||
840 | 945 | ||
841 | int phy_stop_interrupts(struct phy_device *phydev); | 946 | int phy_stop_interrupts(struct phy_device *phydev); |
842 | int phy_restart_aneg(struct phy_device *phydev); | 947 | int phy_restart_aneg(struct phy_device *phydev); |
948 | int phy_reset_after_clk_enable(struct phy_device *phydev); | ||
843 | 949 | ||
844 | static inline int phy_read_status(struct phy_device *phydev) | 950 | static inline void phy_device_reset(struct phy_device *phydev, int value) |
845 | { | 951 | { |
846 | if (!phydev->drv) | 952 | mdio_device_reset(&phydev->mdio, value); |
847 | return -EIO; | ||
848 | |||
849 | return phydev->drv->read_status(phydev); | ||
850 | } | 953 | } |
851 | 954 | ||
852 | #define phydev_err(_phydev, format, args...) \ | 955 | #define phydev_err(_phydev, format, args...) \ |
@@ -889,6 +992,18 @@ int genphy_c45_read_lpa(struct phy_device *phydev); | |||
889 | int genphy_c45_read_pma(struct phy_device *phydev); | 992 | int genphy_c45_read_pma(struct phy_device *phydev); |
890 | int genphy_c45_pma_setup_forced(struct phy_device *phydev); | 993 | int genphy_c45_pma_setup_forced(struct phy_device *phydev); |
891 | int genphy_c45_an_disable_aneg(struct phy_device *phydev); | 994 | int genphy_c45_an_disable_aneg(struct phy_device *phydev); |
995 | int genphy_c45_read_mdix(struct phy_device *phydev); | ||
996 | |||
997 | static inline int phy_read_status(struct phy_device *phydev) | ||
998 | { | ||
999 | if (!phydev->drv) | ||
1000 | return -EIO; | ||
1001 | |||
1002 | if (phydev->drv->read_status) | ||
1003 | return phydev->drv->read_status(phydev); | ||
1004 | else | ||
1005 | return genphy_read_status(phydev); | ||
1006 | } | ||
892 | 1007 | ||
893 | void phy_driver_unregister(struct phy_driver *drv); | 1008 | void phy_driver_unregister(struct phy_driver *drv); |
894 | void phy_drivers_unregister(struct phy_driver *drv, int n); | 1009 | void phy_drivers_unregister(struct phy_driver *drv, int n); |
@@ -898,7 +1013,7 @@ int phy_drivers_register(struct phy_driver *new_driver, int n, | |||
898 | void phy_state_machine(struct work_struct *work); | 1013 | void phy_state_machine(struct work_struct *work); |
899 | void phy_change(struct phy_device *phydev); | 1014 | void phy_change(struct phy_device *phydev); |
900 | void phy_change_work(struct work_struct *work); | 1015 | void phy_change_work(struct work_struct *work); |
901 | void phy_mac_interrupt(struct phy_device *phydev, int new_link); | 1016 | void phy_mac_interrupt(struct phy_device *phydev); |
902 | void phy_start_machine(struct phy_device *phydev); | 1017 | void phy_start_machine(struct phy_device *phydev); |
903 | void phy_stop_machine(struct phy_device *phydev); | 1018 | void phy_stop_machine(struct phy_device *phydev); |
904 | void phy_trigger_machine(struct phy_device *phydev, bool sync); | 1019 | void phy_trigger_machine(struct phy_device *phydev, bool sync); |
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index cf6392de6eb0..ee54453a40a0 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h | |||
@@ -24,9 +24,6 @@ extern void fixed_phy_unregister(struct phy_device *phydev); | |||
24 | extern int fixed_phy_set_link_update(struct phy_device *phydev, | 24 | extern int fixed_phy_set_link_update(struct phy_device *phydev, |
25 | int (*link_update)(struct net_device *, | 25 | int (*link_update)(struct net_device *, |
26 | struct fixed_phy_status *)); | 26 | struct fixed_phy_status *)); |
27 | extern int fixed_phy_update_state(struct phy_device *phydev, | ||
28 | const struct fixed_phy_status *status, | ||
29 | const struct fixed_phy_status *changed); | ||
30 | #else | 27 | #else |
31 | static inline int fixed_phy_add(unsigned int irq, int phy_id, | 28 | static inline int fixed_phy_add(unsigned int irq, int phy_id, |
32 | struct fixed_phy_status *status, | 29 | struct fixed_phy_status *status, |
@@ -50,12 +47,6 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, | |||
50 | { | 47 | { |
51 | return -ENODEV; | 48 | return -ENODEV; |
52 | } | 49 | } |
53 | static inline int fixed_phy_update_state(struct phy_device *phydev, | ||
54 | const struct fixed_phy_status *status, | ||
55 | const struct fixed_phy_status *changed) | ||
56 | { | ||
57 | return -ENODEV; | ||
58 | } | ||
59 | #endif /* CONFIG_FIXED_PHY */ | 50 | #endif /* CONFIG_FIXED_PHY */ |
60 | 51 | ||
61 | #endif /* __PHY_FIXED_H */ | 52 | #endif /* __PHY_FIXED_H */ |
diff --git a/include/linux/phylink.h b/include/linux/phylink.h index af67edd4ae38..bd137c273d38 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h | |||
@@ -7,6 +7,7 @@ | |||
7 | 7 | ||
8 | struct device_node; | 8 | struct device_node; |
9 | struct ethtool_cmd; | 9 | struct ethtool_cmd; |
10 | struct fwnode_handle; | ||
10 | struct net_device; | 11 | struct net_device; |
11 | 12 | ||
12 | enum { | 13 | enum { |
@@ -20,19 +21,31 @@ enum { | |||
20 | 21 | ||
21 | MLO_AN_PHY = 0, /* Conventional PHY */ | 22 | MLO_AN_PHY = 0, /* Conventional PHY */ |
22 | MLO_AN_FIXED, /* Fixed-link mode */ | 23 | MLO_AN_FIXED, /* Fixed-link mode */ |
23 | MLO_AN_SGMII, /* Cisco SGMII protocol */ | 24 | MLO_AN_INBAND, /* In-band protocol */ |
24 | MLO_AN_8023Z, /* 1000base-X protocol */ | ||
25 | }; | 25 | }; |
26 | 26 | ||
27 | static inline bool phylink_autoneg_inband(unsigned int mode) | 27 | static inline bool phylink_autoneg_inband(unsigned int mode) |
28 | { | 28 | { |
29 | return mode == MLO_AN_SGMII || mode == MLO_AN_8023Z; | 29 | return mode == MLO_AN_INBAND; |
30 | } | 30 | } |
31 | 31 | ||
32 | /** | ||
33 | * struct phylink_link_state - link state structure | ||
34 | * @advertising: ethtool bitmask containing advertised link modes | ||
35 | * @lp_advertising: ethtool bitmask containing link partner advertised link | ||
36 | * modes | ||
37 | * @interface: link &typedef phy_interface_t mode | ||
38 | * @speed: link speed, one of the SPEED_* constants. | ||
39 | * @duplex: link duplex mode, one of DUPLEX_* constants. | ||
40 | * @pause: link pause state, described by MLO_PAUSE_* constants. | ||
41 | * @link: true if the link is up. | ||
42 | * @an_enabled: true if autonegotiation is enabled/desired. | ||
43 | * @an_complete: true if autonegotiation has completed. | ||
44 | */ | ||
32 | struct phylink_link_state { | 45 | struct phylink_link_state { |
33 | __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); | 46 | __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising); |
34 | __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); | 47 | __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_advertising); |
35 | phy_interface_t interface; /* PHY_INTERFACE_xxx */ | 48 | phy_interface_t interface; |
36 | int speed; | 49 | int speed; |
37 | int duplex; | 50 | int duplex; |
38 | int pause; | 51 | int pause; |
@@ -41,72 +54,145 @@ struct phylink_link_state { | |||
41 | unsigned int an_complete:1; | 54 | unsigned int an_complete:1; |
42 | }; | 55 | }; |
43 | 56 | ||
57 | /** | ||
58 | * struct phylink_mac_ops - MAC operations structure. | ||
59 | * @validate: Validate and update the link configuration. | ||
60 | * @mac_link_state: Read the current link state from the hardware. | ||
61 | * @mac_config: configure the MAC for the selected mode and state. | ||
62 | * @mac_an_restart: restart 802.3z BaseX autonegotiation. | ||
63 | * @mac_link_down: take the link down. | ||
64 | * @mac_link_up: allow the link to come up. | ||
65 | * | ||
66 | * The individual methods are described more fully below. | ||
67 | */ | ||
44 | struct phylink_mac_ops { | 68 | struct phylink_mac_ops { |
45 | /** | ||
46 | * validate: validate and update the link configuration | ||
47 | * @ndev: net_device structure associated with MAC | ||
48 | * @config: configuration to validate | ||
49 | * | ||
50 | * Update the %config->supported and %config->advertised masks | ||
51 | * clearing bits that can not be supported. | ||
52 | * | ||
53 | * Note: the PHY may be able to transform from one connection | ||
54 | * technology to another, so, eg, don't clear 1000BaseX just | ||
55 | * because the MAC is unable to support it. This is more about | ||
56 | * clearing unsupported speeds and duplex settings. | ||
57 | * | ||
58 | * If the %config->interface mode is %PHY_INTERFACE_MODE_1000BASEX | ||
59 | * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode | ||
60 | * based on %config->advertised and/or %config->speed. | ||
61 | */ | ||
62 | void (*validate)(struct net_device *ndev, unsigned long *supported, | 69 | void (*validate)(struct net_device *ndev, unsigned long *supported, |
63 | struct phylink_link_state *state); | 70 | struct phylink_link_state *state); |
64 | 71 | int (*mac_link_state)(struct net_device *ndev, | |
65 | /* Read the current link state from the hardware */ | 72 | struct phylink_link_state *state); |
66 | int (*mac_link_state)(struct net_device *, struct phylink_link_state *); | ||
67 | |||
68 | /* Configure the MAC */ | ||
69 | /** | ||
70 | * mac_config: configure the MAC for the selected mode and state | ||
71 | * @ndev: net_device structure for the MAC | ||
72 | * @mode: one of MLO_AN_FIXED, MLO_AN_PHY, MLO_AN_8023Z, MLO_AN_SGMII | ||
73 | * @state: state structure | ||
74 | * | ||
75 | * The action performed depends on the currently selected mode: | ||
76 | * | ||
77 | * %MLO_AN_FIXED, %MLO_AN_PHY: | ||
78 | * set the specified speed, duplex, pause mode, and phy interface | ||
79 | * mode in the provided @state. | ||
80 | * %MLO_AN_8023Z: | ||
81 | * place the link in 1000base-X mode, advertising the parameters | ||
82 | * given in advertising in @state. | ||
83 | * %MLO_AN_SGMII: | ||
84 | * place the link in Cisco SGMII mode - there is no advertisment | ||
85 | * to make as the PHY communicates the speed and duplex to the | ||
86 | * MAC over the in-band control word. Configuration of the pause | ||
87 | * mode is as per MLO_AN_PHY since this is not included. | ||
88 | */ | ||
89 | void (*mac_config)(struct net_device *ndev, unsigned int mode, | 73 | void (*mac_config)(struct net_device *ndev, unsigned int mode, |
90 | const struct phylink_link_state *state); | 74 | const struct phylink_link_state *state); |
91 | |||
92 | /** | ||
93 | * mac_an_restart: restart 802.3z BaseX autonegotiation | ||
94 | * @ndev: net_device structure for the MAC | ||
95 | */ | ||
96 | void (*mac_an_restart)(struct net_device *ndev); | 75 | void (*mac_an_restart)(struct net_device *ndev); |
97 | 76 | void (*mac_link_down)(struct net_device *ndev, unsigned int mode); | |
98 | void (*mac_link_down)(struct net_device *, unsigned int mode); | 77 | void (*mac_link_up)(struct net_device *ndev, unsigned int mode, |
99 | void (*mac_link_up)(struct net_device *, unsigned int mode, | 78 | struct phy_device *phy); |
100 | struct phy_device *); | ||
101 | }; | 79 | }; |
102 | 80 | ||
103 | struct phylink *phylink_create(struct net_device *, struct device_node *, | 81 | #if 0 /* For kernel-doc purposes only. */ |
82 | /** | ||
83 | * validate - Validate and update the link configuration | ||
84 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
85 | * @supported: ethtool bitmask for supported link modes. | ||
86 | * @state: a pointer to a &struct phylink_link_state. | ||
87 | * | ||
88 | * Clear bits in the @supported and @state->advertising masks that | ||
89 | * are not supportable by the MAC. | ||
90 | * | ||
91 | * Note that the PHY may be able to transform from one connection | ||
92 | * technology to another, so, eg, don't clear 1000BaseX just | ||
93 | * because the MAC is unable to BaseX mode. This is more about | ||
94 | * clearing unsupported speeds and duplex settings. | ||
95 | * | ||
96 | * If the @state->interface mode is %PHY_INTERFACE_MODE_1000BASEX | ||
97 | * or %PHY_INTERFACE_MODE_2500BASEX, select the appropriate mode | ||
98 | * based on @state->advertising and/or @state->speed and update | ||
99 | * @state->interface accordingly. | ||
100 | */ | ||
101 | void validate(struct net_device *ndev, unsigned long *supported, | ||
102 | struct phylink_link_state *state); | ||
103 | |||
104 | /** | ||
105 | * mac_link_state() - Read the current link state from the hardware | ||
106 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
107 | * @state: a pointer to a &struct phylink_link_state. | ||
108 | * | ||
109 | * Read the current link state from the MAC, reporting the current | ||
110 | * speed in @state->speed, duplex mode in @state->duplex, pause mode | ||
111 | * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits, | ||
112 | * negotiation completion state in @state->an_complete, and link | ||
113 | * up state in @state->link. | ||
114 | */ | ||
115 | int mac_link_state(struct net_device *ndev, | ||
116 | struct phylink_link_state *state); | ||
117 | |||
118 | /** | ||
119 | * mac_config() - configure the MAC for the selected mode and state | ||
120 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
121 | * @mode: one of %MLO_AN_FIXED, %MLO_AN_PHY, %MLO_AN_INBAND. | ||
122 | * @state: a pointer to a &struct phylink_link_state. | ||
123 | * | ||
124 | * The action performed depends on the currently selected mode: | ||
125 | * | ||
126 | * %MLO_AN_FIXED, %MLO_AN_PHY: | ||
127 | * Configure the specified @state->speed, @state->duplex and | ||
128 | * @state->pause (%MLO_PAUSE_TX / %MLO_PAUSE_RX) mode. | ||
129 | * | ||
130 | * %MLO_AN_INBAND: | ||
131 | * place the link in an inband negotiation mode (such as 802.3z | ||
132 | * 1000base-X or Cisco SGMII mode depending on the @state->interface | ||
133 | * mode). In both cases, link state management (whether the link | ||
134 | * is up or not) is performed by the MAC, and reported via the | ||
135 | * mac_link_state() callback. Changes in link state must be made | ||
136 | * by calling phylink_mac_change(). | ||
137 | * | ||
138 | * If in 802.3z mode, the link speed is fixed, dependent on the | ||
139 | * @state->interface. Duplex is negotiated, and pause is advertised | ||
140 | * according to @state->an_enabled, @state->pause and | ||
141 | * @state->advertising flags. Beware of MACs which only support full | ||
142 | * duplex at gigabit and higher speeds. | ||
143 | * | ||
144 | * If in Cisco SGMII mode, the link speed and duplex mode are passed | ||
145 | * in the serial bitstream 16-bit configuration word, and the MAC | ||
146 | * should be configured to read these bits and acknowledge the | ||
147 | * configuration word. Nothing is advertised by the MAC. The MAC is | ||
148 | * responsible for reading the configuration word and configuring | ||
149 | * itself accordingly. | ||
150 | */ | ||
151 | void mac_config(struct net_device *ndev, unsigned int mode, | ||
152 | const struct phylink_link_state *state); | ||
153 | |||
154 | /** | ||
155 | * mac_an_restart() - restart 802.3z BaseX autonegotiation | ||
156 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
157 | */ | ||
158 | void mac_an_restart(struct net_device *ndev); | ||
159 | |||
160 | /** | ||
161 | * mac_link_down() - take the link down | ||
162 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
163 | * @mode: link autonegotiation mode | ||
164 | * | ||
165 | * If @mode is not an in-band negotiation mode (as defined by | ||
166 | * phylink_autoneg_inband()), force the link down and disable any | ||
167 | * Energy Efficient Ethernet MAC configuration. | ||
168 | */ | ||
169 | void mac_link_down(struct net_device *ndev, unsigned int mode); | ||
170 | |||
171 | /** | ||
172 | * mac_link_up() - allow the link to come up | ||
173 | * @ndev: a pointer to a &struct net_device for the MAC. | ||
174 | * @mode: link autonegotiation mode | ||
175 | * @phy: any attached phy | ||
176 | * | ||
177 | * If @mode is not an in-band negotiation mode (as defined by | ||
178 | * phylink_autoneg_inband()), allow the link to come up. If @phy | ||
179 | * is non-%NULL, configure Energy Efficient Ethernet by calling | ||
180 | * phy_init_eee() and perform appropriate MAC configuration for EEE. | ||
181 | */ | ||
182 | void mac_link_up(struct net_device *ndev, unsigned int mode, | ||
183 | struct phy_device *phy); | ||
184 | #endif | ||
185 | |||
186 | struct phylink *phylink_create(struct net_device *, struct fwnode_handle *, | ||
104 | phy_interface_t iface, const struct phylink_mac_ops *ops); | 187 | phy_interface_t iface, const struct phylink_mac_ops *ops); |
105 | void phylink_destroy(struct phylink *); | 188 | void phylink_destroy(struct phylink *); |
106 | 189 | ||
107 | int phylink_connect_phy(struct phylink *, struct phy_device *); | 190 | int phylink_connect_phy(struct phylink *, struct phy_device *); |
108 | int phylink_of_phy_connect(struct phylink *, struct device_node *); | 191 | int phylink_of_phy_connect(struct phylink *, struct device_node *, u32 flags); |
109 | void phylink_disconnect_phy(struct phylink *); | 192 | void phylink_disconnect_phy(struct phylink *); |
193 | int phylink_fixed_state_cb(struct phylink *, | ||
194 | void (*cb)(struct net_device *dev, | ||
195 | struct phylink_link_state *)); | ||
110 | 196 | ||
111 | void phylink_mac_change(struct phylink *, bool up); | 197 | void phylink_mac_change(struct phylink *, bool up); |
112 | 198 | ||
@@ -128,7 +214,6 @@ int phylink_ethtool_set_pauseparam(struct phylink *, | |||
128 | int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); | 214 | int phylink_ethtool_get_module_info(struct phylink *, struct ethtool_modinfo *); |
129 | int phylink_ethtool_get_module_eeprom(struct phylink *, | 215 | int phylink_ethtool_get_module_eeprom(struct phylink *, |
130 | struct ethtool_eeprom *, u8 *); | 216 | struct ethtool_eeprom *, u8 *); |
131 | int phylink_init_eee(struct phylink *, bool); | ||
132 | int phylink_get_eee_err(struct phylink *); | 217 | int phylink_get_eee_err(struct phylink *); |
133 | int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); | 218 | int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); |
134 | int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); | 219 | int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); |
diff --git a/include/linux/pinctrl/devinfo.h b/include/linux/pinctrl/devinfo.h index 05082e407c4a..d01a8638bb45 100644 --- a/include/linux/pinctrl/devinfo.h +++ b/include/linux/pinctrl/devinfo.h | |||
@@ -43,6 +43,8 @@ extern int pinctrl_init_done(struct device *dev); | |||
43 | 43 | ||
44 | #else | 44 | #else |
45 | 45 | ||
46 | struct device; | ||
47 | |||
46 | /* Stubs if we're not using pinctrl */ | 48 | /* Stubs if we're not using pinctrl */ |
47 | 49 | ||
48 | static inline int pinctrl_bind_pins(struct device *dev) | 50 | static inline int pinctrl_bind_pins(struct device *dev) |
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h index ec6dadcc1fde..6c0680641108 100644 --- a/include/linux/pinctrl/pinconf-generic.h +++ b/include/linux/pinctrl/pinconf-generic.h | |||
@@ -94,6 +94,7 @@ | |||
94 | * or latch delay (on outputs) this parameter (in a custom format) | 94 | * or latch delay (on outputs) this parameter (in a custom format) |
95 | * specifies the clock skew or latch delay. It typically controls how | 95 | * specifies the clock skew or latch delay. It typically controls how |
96 | * many double inverters are put in front of the line. | 96 | * many double inverters are put in front of the line. |
97 | * @PIN_CONFIG_PERSIST_STATE: retain pin state across sleep or controller reset | ||
97 | * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if | 98 | * @PIN_CONFIG_END: this is the last enumerator for pin configurations, if |
98 | * you need to pass in custom configurations to the pin controller, use | 99 | * you need to pass in custom configurations to the pin controller, use |
99 | * PIN_CONFIG_END+1 as the base offset. | 100 | * PIN_CONFIG_END+1 as the base offset. |
@@ -122,6 +123,7 @@ enum pin_config_param { | |||
122 | PIN_CONFIG_SLEEP_HARDWARE_STATE, | 123 | PIN_CONFIG_SLEEP_HARDWARE_STATE, |
123 | PIN_CONFIG_SLEW_RATE, | 124 | PIN_CONFIG_SLEW_RATE, |
124 | PIN_CONFIG_SKEW_DELAY, | 125 | PIN_CONFIG_SKEW_DELAY, |
126 | PIN_CONFIG_PERSIST_STATE, | ||
125 | PIN_CONFIG_END = 0x7F, | 127 | PIN_CONFIG_END = 0x7F, |
126 | PIN_CONFIG_MAX = 0xFF, | 128 | PIN_CONFIG_MAX = 0xFF, |
127 | }; | 129 | }; |
diff --git a/include/linux/pinctrl/pinctrl.h b/include/linux/pinctrl/pinctrl.h index 5e45385c5bdc..8f5dbb84547a 100644 --- a/include/linux/pinctrl/pinctrl.h +++ b/include/linux/pinctrl/pinctrl.h | |||
@@ -18,6 +18,7 @@ | |||
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
20 | #include <linux/pinctrl/pinctrl-state.h> | 20 | #include <linux/pinctrl/pinctrl-state.h> |
21 | #include <linux/pinctrl/devinfo.h> | ||
21 | 22 | ||
22 | struct device; | 23 | struct device; |
23 | struct pinctrl_dev; | 24 | struct pinctrl_dev; |
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 2dc5e9870fcd..5a3bb3b7c9ad 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
@@ -167,10 +167,9 @@ void pipe_lock(struct pipe_inode_info *); | |||
167 | void pipe_unlock(struct pipe_inode_info *); | 167 | void pipe_unlock(struct pipe_inode_info *); |
168 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); | 168 | void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *); |
169 | 169 | ||
170 | extern unsigned int pipe_max_size, pipe_min_size; | 170 | extern unsigned int pipe_max_size; |
171 | extern unsigned long pipe_user_pages_hard; | 171 | extern unsigned long pipe_user_pages_hard; |
172 | extern unsigned long pipe_user_pages_soft; | 172 | extern unsigned long pipe_user_pages_soft; |
173 | int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *); | ||
174 | 173 | ||
175 | /* Drop the inode semaphore and wait for a pipe event, atomically */ | 174 | /* Drop the inode semaphore and wait for a pipe event, atomically */ |
176 | void pipe_wait(struct pipe_inode_info *pipe); | 175 | void pipe_wait(struct pipe_inode_info *pipe); |
@@ -191,6 +190,6 @@ long pipe_fcntl(struct file *, unsigned int, unsigned long arg); | |||
191 | struct pipe_inode_info *get_pipe_info(struct file *file); | 190 | struct pipe_inode_info *get_pipe_info(struct file *file); |
192 | 191 | ||
193 | int create_pipe_files(struct file **, int); | 192 | int create_pipe_files(struct file **, int); |
194 | unsigned int round_pipe_size(unsigned int size); | 193 | unsigned int round_pipe_size(unsigned long size); |
195 | 194 | ||
196 | #endif | 195 | #endif |
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h index 271a4e25af67..63507ff464ee 100644 --- a/include/linux/platform_data/at24.h +++ b/include/linux/platform_data/at24.h | |||
@@ -50,6 +50,8 @@ struct at24_platform_data { | |||
50 | #define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ | 50 | #define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ |
51 | #define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ | 51 | #define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ |
52 | #define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ | 52 | #define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ |
53 | #define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */ | ||
54 | /* the next slave address */ | ||
53 | 55 | ||
54 | void (*setup)(struct nvmem_device *nvmem, void *context); | 56 | void (*setup)(struct nvmem_device *nvmem, void *context); |
55 | void *context; | 57 | void *context; |
diff --git a/include/linux/platform_data/i2c-davinci.h b/include/linux/platform_data/i2c-davinci.h index 89fd34727a24..98967df07468 100644 --- a/include/linux/platform_data/i2c-davinci.h +++ b/include/linux/platform_data/i2c-davinci.h | |||
@@ -16,9 +16,8 @@ | |||
16 | struct davinci_i2c_platform_data { | 16 | struct davinci_i2c_platform_data { |
17 | unsigned int bus_freq; /* standard bus frequency (kHz) */ | 17 | unsigned int bus_freq; /* standard bus frequency (kHz) */ |
18 | unsigned int bus_delay; /* post-transaction delay (usec) */ | 18 | unsigned int bus_delay; /* post-transaction delay (usec) */ |
19 | unsigned int sda_pin; /* GPIO pin ID to use for SDA */ | 19 | bool gpio_recovery; /* Use GPIO recovery method */ |
20 | unsigned int scl_pin; /* GPIO pin ID to use for SCL */ | 20 | bool has_pfunc; /* Chip has a ICPFUNC register */ |
21 | bool has_pfunc; /*chip has a ICPFUNC register */ | ||
22 | }; | 21 | }; |
23 | 22 | ||
24 | /* for board setup code */ | 23 | /* for board setup code */ |
diff --git a/include/linux/i2c/pxa-i2c.h b/include/linux/platform_data/i2c-pxa.h index 53aab243cbd8..5236f216dfae 100644 --- a/include/linux/i2c/pxa-i2c.h +++ b/include/linux/platform_data/i2c-pxa.h | |||
@@ -71,15 +71,4 @@ struct i2c_pxa_platform_data { | |||
71 | unsigned char master_code; | 71 | unsigned char master_code; |
72 | unsigned long rate; | 72 | unsigned long rate; |
73 | }; | 73 | }; |
74 | |||
75 | extern void pxa_set_i2c_info(struct i2c_pxa_platform_data *info); | ||
76 | |||
77 | #ifdef CONFIG_PXA27x | ||
78 | extern void pxa27x_set_i2c_power_info(struct i2c_pxa_platform_data *info); | ||
79 | #endif | ||
80 | |||
81 | #ifdef CONFIG_PXA3xx | ||
82 | extern void pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info); | ||
83 | #endif | ||
84 | |||
85 | #endif | 74 | #endif |
diff --git a/include/linux/platform_data/mlxcpld-hotplug.h b/include/linux/platform_data/mlxcpld-hotplug.h deleted file mode 100644 index e4cfcffaa6f4..000000000000 --- a/include/linux/platform_data/mlxcpld-hotplug.h +++ /dev/null | |||
@@ -1,99 +0,0 @@ | |||
1 | /* | ||
2 | * include/linux/platform_data/mlxcpld-hotplug.h | ||
3 | * Copyright (c) 2016 Mellanox Technologies. All rights reserved. | ||
4 | * Copyright (c) 2016 Vadim Pasternak <vadimp@mellanox.com> | ||
5 | * | ||
6 | * Redistribution and use in source and binary forms, with or without | ||
7 | * modification, are permitted provided that the following conditions are met: | ||
8 | * | ||
9 | * 1. Redistributions of source code must retain the above copyright | ||
10 | * notice, this list of conditions and the following disclaimer. | ||
11 | * 2. Redistributions in binary form must reproduce the above copyright | ||
12 | * notice, this list of conditions and the following disclaimer in the | ||
13 | * documentation and/or other materials provided with the distribution. | ||
14 | * 3. Neither the names of the copyright holders nor the names of its | ||
15 | * contributors may be used to endorse or promote products derived from | ||
16 | * this software without specific prior written permission. | ||
17 | * | ||
18 | * Alternatively, this software may be distributed under the terms of the | ||
19 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
20 | * Software Foundation. | ||
21 | * | ||
22 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
23 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
24 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
25 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
26 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
27 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
28 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
29 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
30 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
31 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
32 | * POSSIBILITY OF SUCH DAMAGE. | ||
33 | */ | ||
34 | |||
35 | #ifndef __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H | ||
36 | #define __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H | ||
37 | |||
38 | /** | ||
39 | * struct mlxcpld_hotplug_device - I2C device data: | ||
40 | * @adapter: I2C device adapter; | ||
41 | * @client: I2C device client; | ||
42 | * @brdinfo: device board information; | ||
43 | * @bus: I2C bus, where device is attached; | ||
44 | * | ||
45 | * Structure represents I2C hotplug device static data (board topology) and | ||
46 | * dynamic data (related kernel objects handles). | ||
47 | */ | ||
48 | struct mlxcpld_hotplug_device { | ||
49 | struct i2c_adapter *adapter; | ||
50 | struct i2c_client *client; | ||
51 | struct i2c_board_info brdinfo; | ||
52 | u16 bus; | ||
53 | }; | ||
54 | |||
55 | /** | ||
56 | * struct mlxcpld_hotplug_platform_data - device platform data: | ||
57 | * @top_aggr_offset: offset of top aggregation interrupt register; | ||
58 | * @top_aggr_mask: top aggregation interrupt common mask; | ||
59 | * @top_aggr_psu_mask: top aggregation interrupt PSU mask; | ||
60 | * @psu_reg_offset: offset of PSU interrupt register; | ||
61 | * @psu_mask: PSU interrupt mask; | ||
62 | * @psu_count: number of equipped replaceable PSUs; | ||
63 | * @psu: pointer to PSU devices data array; | ||
64 | * @top_aggr_pwr_mask: top aggregation interrupt power mask; | ||
65 | * @pwr_reg_offset: offset of power interrupt register | ||
66 | * @pwr_mask: power interrupt mask; | ||
67 | * @pwr_count: number of power sources; | ||
68 | * @pwr: pointer to power devices data array; | ||
69 | * @top_aggr_fan_mask: top aggregation interrupt FAN mask; | ||
70 | * @fan_reg_offset: offset of FAN interrupt register; | ||
71 | * @fan_mask: FAN interrupt mask; | ||
72 | * @fan_count: number of equipped replaceable FANs; | ||
73 | * @fan: pointer to FAN devices data array; | ||
74 | * | ||
75 | * Structure represents board platform data, related to system hotplug events, | ||
76 | * like FAN, PSU, power cable insertion and removing. This data provides the | ||
77 | * number of hot-pluggable devices and hardware description for event handling. | ||
78 | */ | ||
79 | struct mlxcpld_hotplug_platform_data { | ||
80 | u16 top_aggr_offset; | ||
81 | u8 top_aggr_mask; | ||
82 | u8 top_aggr_psu_mask; | ||
83 | u16 psu_reg_offset; | ||
84 | u8 psu_mask; | ||
85 | u8 psu_count; | ||
86 | struct mlxcpld_hotplug_device *psu; | ||
87 | u8 top_aggr_pwr_mask; | ||
88 | u16 pwr_reg_offset; | ||
89 | u8 pwr_mask; | ||
90 | u8 pwr_count; | ||
91 | struct mlxcpld_hotplug_device *pwr; | ||
92 | u8 top_aggr_fan_mask; | ||
93 | u16 fan_reg_offset; | ||
94 | u8 fan_mask; | ||
95 | u8 fan_count; | ||
96 | struct mlxcpld_hotplug_device *fan; | ||
97 | }; | ||
98 | |||
99 | #endif /* __LINUX_PLATFORM_DATA_MLXCPLD_HOTPLUG_H */ | ||
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h new file mode 100644 index 000000000000..fcdc707eab99 --- /dev/null +++ b/include/linux/platform_data/mlxreg.h | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2017 Mellanox Technologies. All rights reserved. | ||
3 | * Copyright (c) 2017 Vadim Pasternak <vadimp@mellanox.com> | ||
4 | * | ||
5 | * Redistribution and use in source and binary forms, with or without | ||
6 | * modification, are permitted provided that the following conditions are met: | ||
7 | * | ||
8 | * 1. Redistributions of source code must retain the above copyright | ||
9 | * notice, this list of conditions and the following disclaimer. | ||
10 | * 2. Redistributions in binary form must reproduce the above copyright | ||
11 | * notice, this list of conditions and the following disclaimer in the | ||
12 | * documentation and/or other materials provided with the distribution. | ||
13 | * 3. Neither the names of the copyright holders nor the names of its | ||
14 | * contributors may be used to endorse or promote products derived from | ||
15 | * this software without specific prior written permission. | ||
16 | * | ||
17 | * Alternatively, this software may be distributed under the terms of the | ||
18 | * GNU General Public License ("GPL") version 2 as published by the Free | ||
19 | * Software Foundation. | ||
20 | * | ||
21 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
22 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
23 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
24 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
25 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
26 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
27 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | ||
28 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | ||
29 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
30 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
31 | * POSSIBILITY OF SUCH DAMAGE. | ||
32 | */ | ||
33 | |||
34 | #ifndef __LINUX_PLATFORM_DATA_MLXREG_H | ||
35 | #define __LINUX_PLATFORM_DATA_MLXREG_H | ||
36 | |||
37 | #define MLXREG_CORE_LABEL_MAX_SIZE 32 | ||
38 | |||
39 | /** | ||
40 | * struct mlxreg_hotplug_device - I2C device data: | ||
41 | * | ||
42 | * @adapter: I2C device adapter; | ||
43 | * @client: I2C device client; | ||
44 | * @brdinfo: device board information; | ||
45 | * @nr: I2C device adapter number, to which device is to be attached; | ||
46 | * | ||
47 | * Structure represents I2C hotplug device static data (board topology) and | ||
48 | * dynamic data (related kernel objects handles). | ||
49 | */ | ||
50 | struct mlxreg_hotplug_device { | ||
51 | struct i2c_adapter *adapter; | ||
52 | struct i2c_client *client; | ||
53 | struct i2c_board_info *brdinfo; | ||
54 | int nr; | ||
55 | }; | ||
56 | |||
57 | /** | ||
58 | * struct mlxreg_core_data - attributes control data: | ||
59 | * | ||
60 | * @label: attribute label; | ||
61 | * @label: attribute register offset; | ||
62 | * @reg: attribute register; | ||
63 | * @mask: attribute access mask; | ||
64 | * @mode: access mode; | ||
65 | * @bit: attribute effective bit; | ||
66 | * @np - pointer to node platform associated with attribute; | ||
67 | * @hpdev - hotplug device data; | ||
68 | * @health_cntr: dynamic device health indication counter; | ||
69 | * @attached: true if device has been attached after good health indication; | ||
70 | */ | ||
71 | struct mlxreg_core_data { | ||
72 | char label[MLXREG_CORE_LABEL_MAX_SIZE]; | ||
73 | u32 reg; | ||
74 | u32 mask; | ||
75 | u32 bit; | ||
76 | umode_t mode; | ||
77 | struct device_node *np; | ||
78 | struct mlxreg_hotplug_device hpdev; | ||
79 | u8 health_cntr; | ||
80 | bool attached; | ||
81 | }; | ||
82 | |||
83 | /** | ||
84 | * struct mlxreg_core_item - same type components controlled by the driver: | ||
85 | * | ||
86 | * @data: component data; | ||
87 | * @aggr_mask: group aggregation mask; | ||
88 | * @reg: group interrupt status register; | ||
89 | * @mask: group interrupt mask; | ||
90 | * @cache: last status value for elements fro the same group; | ||
91 | * @count: number of available elements in the group; | ||
92 | * @ind: element's index inside the group; | ||
93 | * @inversed: if 0: 0 for signal status is OK, if 1 - 1 is OK; | ||
94 | * @health: true if device has health indication, false in other case; | ||
95 | */ | ||
96 | struct mlxreg_core_item { | ||
97 | struct mlxreg_core_data *data; | ||
98 | u32 aggr_mask; | ||
99 | u32 reg; | ||
100 | u32 mask; | ||
101 | u32 cache; | ||
102 | u8 count; | ||
103 | u8 ind; | ||
104 | u8 inversed; | ||
105 | u8 health; | ||
106 | }; | ||
107 | |||
108 | /** | ||
109 | * struct mlxreg_core_platform_data - platform data: | ||
110 | * | ||
111 | * @led_data: led private data; | ||
112 | * @regmap: register map of parent device; | ||
113 | * @counter: number of led instances; | ||
114 | */ | ||
115 | struct mlxreg_core_platform_data { | ||
116 | struct mlxreg_core_data *data; | ||
117 | void *regmap; | ||
118 | int counter; | ||
119 | }; | ||
120 | |||
121 | /** | ||
122 | * struct mlxreg_core_hotplug_platform_data - hotplug platform data: | ||
123 | * | ||
124 | * @items: same type components with the hotplug capability; | ||
125 | * @irq: platform interrupt number; | ||
126 | * @regmap: register map of parent device; | ||
127 | * @counter: number of the components with the hotplug capability; | ||
128 | * @cell: location of top aggregation interrupt register; | ||
129 | * @mask: top aggregation interrupt common mask; | ||
130 | * @cell_low: location of low aggregation interrupt register; | ||
131 | * @mask_low: low aggregation interrupt common mask; | ||
132 | */ | ||
133 | struct mlxreg_core_hotplug_platform_data { | ||
134 | struct mlxreg_core_item *items; | ||
135 | int irq; | ||
136 | void *regmap; | ||
137 | int counter; | ||
138 | u32 cell; | ||
139 | u32 mask; | ||
140 | u32 cell_low; | ||
141 | u32 mask_low; | ||
142 | }; | ||
143 | |||
144 | #endif /* __LINUX_PLATFORM_DATA_MLXREG_H */ | ||
diff --git a/include/linux/platform_data/mms114.h b/include/linux/platform_data/mms114.h deleted file mode 100644 index 5722ebfb2738..000000000000 --- a/include/linux/platform_data/mms114.h +++ /dev/null | |||
@@ -1,24 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2012 Samsung Electronics Co.Ltd | ||
3 | * Author: Joonyoung Shim <jy0922.shim@samsung.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundationr | ||
8 | */ | ||
9 | |||
10 | #ifndef __LINUX_MMS114_H | ||
11 | #define __LINUX_MMS114_H | ||
12 | |||
13 | struct mms114_platform_data { | ||
14 | unsigned int x_size; | ||
15 | unsigned int y_size; | ||
16 | unsigned int contact_threshold; | ||
17 | unsigned int moving_threshold; | ||
18 | bool x_invert; | ||
19 | bool y_invert; | ||
20 | |||
21 | void (*cfg_pin)(bool); | ||
22 | }; | ||
23 | |||
24 | #endif /* __LINUX_MMS114_H */ | ||
diff --git a/include/linux/platform_data/mtd-onenand-omap2.h b/include/linux/platform_data/mtd-onenand-omap2.h deleted file mode 100644 index 56ff0e6f5ad1..000000000000 --- a/include/linux/platform_data/mtd-onenand-omap2.h +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2006 Nokia Corporation | ||
3 | * Author: Juha Yrjola | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | ||
9 | |||
10 | #ifndef __MTD_ONENAND_OMAP2_H | ||
11 | #define __MTD_ONENAND_OMAP2_H | ||
12 | |||
13 | #include <linux/mtd/mtd.h> | ||
14 | #include <linux/mtd/partitions.h> | ||
15 | |||
16 | #define ONENAND_SYNC_READ (1 << 0) | ||
17 | #define ONENAND_SYNC_READWRITE (1 << 1) | ||
18 | #define ONENAND_IN_OMAP34XX (1 << 2) | ||
19 | |||
20 | struct omap_onenand_platform_data { | ||
21 | int cs; | ||
22 | int gpio_irq; | ||
23 | struct mtd_partition *parts; | ||
24 | int nr_parts; | ||
25 | int (*onenand_setup)(void __iomem *, int *freq_ptr); | ||
26 | int dma_channel; | ||
27 | u8 flags; | ||
28 | u8 regulator_can_sleep; | ||
29 | u8 skip_initial_unlocking; | ||
30 | |||
31 | /* for passing the partitions */ | ||
32 | struct device_node *of_node; | ||
33 | }; | ||
34 | #endif | ||
diff --git a/include/linux/platform_data/si5351.h b/include/linux/platform_data/si5351.h index 818c5c6e203f..c71a2dd66143 100644 --- a/include/linux/platform_data/si5351.h +++ b/include/linux/platform_data/si5351.h | |||
@@ -86,6 +86,7 @@ enum si5351_disable_state { | |||
86 | * @multisynth_src: multisynth source clock | 86 | * @multisynth_src: multisynth source clock |
87 | * @clkout_src: clkout source clock | 87 | * @clkout_src: clkout source clock |
88 | * @pll_master: if true, clkout can also change pll rate | 88 | * @pll_master: if true, clkout can also change pll rate |
89 | * @pll_reset: if true, clkout can reset its pll | ||
89 | * @drive: output drive strength | 90 | * @drive: output drive strength |
90 | * @rate: initial clkout rate, or default if 0 | 91 | * @rate: initial clkout rate, or default if 0 |
91 | */ | 92 | */ |
@@ -95,6 +96,7 @@ struct si5351_clkout_config { | |||
95 | enum si5351_drive_strength drive; | 96 | enum si5351_drive_strength drive; |
96 | enum si5351_disable_state disable_state; | 97 | enum si5351_disable_state disable_state; |
97 | bool pll_master; | 98 | bool pll_master; |
99 | bool pll_reset; | ||
98 | unsigned long rate; | 100 | unsigned long rate; |
99 | }; | 101 | }; |
100 | 102 | ||
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h index da79774078a7..773daf7915a3 100644 --- a/include/linux/platform_data/spi-s3c64xx.h +++ b/include/linux/platform_data/spi-s3c64xx.h | |||
@@ -1,10 +1,8 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
1 | /* | 3 | /* |
2 | * Copyright (C) 2009 Samsung Electronics Ltd. | 4 | * Copyright (C) 2009 Samsung Electronics Ltd. |
3 | * Jaswinder Singh <jassi.brar@samsung.com> | 5 | * Jaswinder Singh <jassi.brar@samsung.com> |
4 | * | ||
5 | * This program is free software; you can redistribute it and/or modify | ||
6 | * it under the terms of the GNU General Public License version 2 as | ||
7 | * published by the Free Software Foundation. | ||
8 | */ | 6 | */ |
9 | 7 | ||
10 | #ifndef __SPI_S3C64XX_H | 8 | #ifndef __SPI_S3C64XX_H |
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h new file mode 100644 index 000000000000..1be356330b96 --- /dev/null +++ b/include/linux/platform_data/ti-sysc.h | |||
@@ -0,0 +1,86 @@ | |||
1 | #ifndef __TI_SYSC_DATA_H__ | ||
2 | #define __TI_SYSC_DATA_H__ | ||
3 | |||
4 | enum ti_sysc_module_type { | ||
5 | TI_SYSC_OMAP2, | ||
6 | TI_SYSC_OMAP2_TIMER, | ||
7 | TI_SYSC_OMAP3_SHAM, | ||
8 | TI_SYSC_OMAP3_AES, | ||
9 | TI_SYSC_OMAP4, | ||
10 | TI_SYSC_OMAP4_TIMER, | ||
11 | TI_SYSC_OMAP4_SIMPLE, | ||
12 | TI_SYSC_OMAP34XX_SR, | ||
13 | TI_SYSC_OMAP36XX_SR, | ||
14 | TI_SYSC_OMAP4_SR, | ||
15 | TI_SYSC_OMAP4_MCASP, | ||
16 | TI_SYSC_OMAP4_USB_HOST_FS, | ||
17 | }; | ||
18 | |||
19 | /** | ||
20 | * struct sysc_regbits - TI OCP_SYSCONFIG register field offsets | ||
21 | * @midle_shift: Offset of the midle bit | ||
22 | * @clkact_shift: Offset of the clockactivity bit | ||
23 | * @sidle_shift: Offset of the sidle bit | ||
24 | * @enwkup_shift: Offset of the enawakeup bit | ||
25 | * @srst_shift: Offset of the softreset bit | ||
26 | * @autoidle_shift: Offset of the autoidle bit | ||
27 | * @dmadisable_shift: Offset of the dmadisable bit | ||
28 | * @emufree_shift; Offset of the emufree bit | ||
29 | * | ||
30 | * Note that 0 is a valid shift, and for ti-sysc.c -ENODEV can be used if a | ||
31 | * feature is not available. | ||
32 | */ | ||
33 | struct sysc_regbits { | ||
34 | s8 midle_shift; | ||
35 | s8 clkact_shift; | ||
36 | s8 sidle_shift; | ||
37 | s8 enwkup_shift; | ||
38 | s8 srst_shift; | ||
39 | s8 autoidle_shift; | ||
40 | s8 dmadisable_shift; | ||
41 | s8 emufree_shift; | ||
42 | }; | ||
43 | |||
44 | #define SYSC_QUIRK_RESET_STATUS BIT(7) | ||
45 | #define SYSC_QUIRK_NO_IDLE_ON_INIT BIT(6) | ||
46 | #define SYSC_QUIRK_NO_RESET_ON_INIT BIT(5) | ||
47 | #define SYSC_QUIRK_OPT_CLKS_NEEDED BIT(4) | ||
48 | #define SYSC_QUIRK_OPT_CLKS_IN_RESET BIT(3) | ||
49 | #define SYSC_QUIRK_16BIT BIT(2) | ||
50 | #define SYSC_QUIRK_UNCACHED BIT(1) | ||
51 | #define SYSC_QUIRK_USE_CLOCKACT BIT(0) | ||
52 | |||
53 | #define SYSC_NR_IDLEMODES 4 | ||
54 | |||
55 | /** | ||
56 | * struct sysc_capabilities - capabilities for an interconnect target module | ||
57 | * | ||
58 | * @sysc_mask: bitmask of supported SYSCONFIG register bits | ||
59 | * @regbits: bitmask of SYSCONFIG register bits | ||
60 | * @mod_quirks: bitmask of module specific quirks | ||
61 | */ | ||
62 | struct sysc_capabilities { | ||
63 | const enum ti_sysc_module_type type; | ||
64 | const u32 sysc_mask; | ||
65 | const struct sysc_regbits *regbits; | ||
66 | const u32 mod_quirks; | ||
67 | }; | ||
68 | |||
69 | /** | ||
70 | * struct sysc_config - configuration for an interconnect target module | ||
71 | * @sysc_val: configured value for sysc register | ||
72 | * @midlemodes: bitmask of supported master idle modes | ||
73 | * @sidlemodes: bitmask of supported master idle modes | ||
74 | * @srst_udelay: optional delay needed after OCP soft reset | ||
75 | * @quirks: bitmask of enabled quirks | ||
76 | */ | ||
77 | struct sysc_config { | ||
78 | u32 sysc_val; | ||
79 | u32 syss_mask; | ||
80 | u8 midlemodes; | ||
81 | u8 sidlemodes; | ||
82 | u8 srst_udelay; | ||
83 | u32 quirks; | ||
84 | }; | ||
85 | |||
86 | #endif /* __TI_SYSC_DATA_H__ */ | ||
diff --git a/include/linux/poll.h b/include/linux/poll.h index d384f12abdd5..04781a753326 100644 --- a/include/linux/poll.h +++ b/include/linux/poll.h | |||
@@ -37,7 +37,7 @@ typedef void (*poll_queue_proc)(struct file *, wait_queue_head_t *, struct poll_ | |||
37 | */ | 37 | */ |
38 | typedef struct poll_table_struct { | 38 | typedef struct poll_table_struct { |
39 | poll_queue_proc _qproc; | 39 | poll_queue_proc _qproc; |
40 | unsigned long _key; | 40 | __poll_t _key; |
41 | } poll_table; | 41 | } poll_table; |
42 | 42 | ||
43 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) | 43 | static inline void poll_wait(struct file * filp, wait_queue_head_t * wait_address, poll_table *p) |
@@ -62,20 +62,20 @@ static inline bool poll_does_not_wait(const poll_table *p) | |||
62 | * to be started implicitly on poll(). You typically only want to do that | 62 | * to be started implicitly on poll(). You typically only want to do that |
63 | * if the application is actually polling for POLLIN and/or POLLOUT. | 63 | * if the application is actually polling for POLLIN and/or POLLOUT. |
64 | */ | 64 | */ |
65 | static inline unsigned long poll_requested_events(const poll_table *p) | 65 | static inline __poll_t poll_requested_events(const poll_table *p) |
66 | { | 66 | { |
67 | return p ? p->_key : ~0UL; | 67 | return p ? p->_key : ~(__poll_t)0; |
68 | } | 68 | } |
69 | 69 | ||
70 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) | 70 | static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc) |
71 | { | 71 | { |
72 | pt->_qproc = qproc; | 72 | pt->_qproc = qproc; |
73 | pt->_key = ~0UL; /* all events enabled */ | 73 | pt->_key = ~(__poll_t)0; /* all events enabled */ |
74 | } | 74 | } |
75 | 75 | ||
76 | struct poll_table_entry { | 76 | struct poll_table_entry { |
77 | struct file *filp; | 77 | struct file *filp; |
78 | unsigned long key; | 78 | __poll_t key; |
79 | wait_queue_entry_t wait; | 79 | wait_queue_entry_t wait; |
80 | wait_queue_head_t *wait_address; | 80 | wait_queue_head_t *wait_address; |
81 | }; | 81 | }; |
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 38d8225510f1..3a3bc71017d5 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h | |||
@@ -68,7 +68,7 @@ struct posix_clock_operations { | |||
68 | 68 | ||
69 | int (*open) (struct posix_clock *pc, fmode_t f_mode); | 69 | int (*open) (struct posix_clock *pc, fmode_t f_mode); |
70 | 70 | ||
71 | uint (*poll) (struct posix_clock *pc, | 71 | __poll_t (*poll) (struct posix_clock *pc, |
72 | struct file *file, poll_table *wait); | 72 | struct file *file, poll_table *wait); |
73 | 73 | ||
74 | int (*release) (struct posix_clock *pc); | 74 | int (*release) (struct posix_clock *pc); |
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index 672c4f32311e..c85704fcdbd2 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
@@ -42,13 +42,26 @@ struct cpu_timer_list { | |||
42 | #define CLOCKFD CPUCLOCK_MAX | 42 | #define CLOCKFD CPUCLOCK_MAX |
43 | #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) | 43 | #define CLOCKFD_MASK (CPUCLOCK_PERTHREAD_MASK|CPUCLOCK_CLOCK_MASK) |
44 | 44 | ||
45 | #define MAKE_PROCESS_CPUCLOCK(pid, clock) \ | 45 | static inline clockid_t make_process_cpuclock(const unsigned int pid, |
46 | ((~(clockid_t) (pid) << 3) | (clockid_t) (clock)) | 46 | const clockid_t clock) |
47 | #define MAKE_THREAD_CPUCLOCK(tid, clock) \ | 47 | { |
48 | MAKE_PROCESS_CPUCLOCK((tid), (clock) | CPUCLOCK_PERTHREAD_MASK) | 48 | return ((~pid) << 3) | clock; |
49 | } | ||
50 | static inline clockid_t make_thread_cpuclock(const unsigned int tid, | ||
51 | const clockid_t clock) | ||
52 | { | ||
53 | return make_process_cpuclock(tid, clock | CPUCLOCK_PERTHREAD_MASK); | ||
54 | } | ||
49 | 55 | ||
50 | #define FD_TO_CLOCKID(fd) ((~(clockid_t) (fd) << 3) | CLOCKFD) | 56 | static inline clockid_t fd_to_clockid(const int fd) |
51 | #define CLOCKID_TO_FD(clk) ((unsigned int) ~((clk) >> 3)) | 57 | { |
58 | return make_process_cpuclock((unsigned int) fd, CLOCKFD); | ||
59 | } | ||
60 | |||
61 | static inline int clockid_to_fd(const clockid_t clk) | ||
62 | { | ||
63 | return ~(clk >> 3); | ||
64 | } | ||
52 | 65 | ||
53 | #define REQUEUE_PENDING 1 | 66 | #define REQUEUE_PENDING 1 |
54 | 67 | ||
diff --git a/include/linux/posix_acl.h b/include/linux/posix_acl.h index b2b7255ec7f5..540595a321a7 100644 --- a/include/linux/posix_acl.h +++ b/include/linux/posix_acl.h | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/bug.h> | 12 | #include <linux/bug.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | #include <linux/rcupdate.h> | 14 | #include <linux/rcupdate.h> |
15 | #include <linux/refcount.h> | ||
15 | #include <uapi/linux/posix_acl.h> | 16 | #include <uapi/linux/posix_acl.h> |
16 | 17 | ||
17 | struct posix_acl_entry { | 18 | struct posix_acl_entry { |
@@ -24,7 +25,7 @@ struct posix_acl_entry { | |||
24 | }; | 25 | }; |
25 | 26 | ||
26 | struct posix_acl { | 27 | struct posix_acl { |
27 | atomic_t a_refcount; | 28 | refcount_t a_refcount; |
28 | struct rcu_head a_rcu; | 29 | struct rcu_head a_rcu; |
29 | unsigned int a_count; | 30 | unsigned int a_count; |
30 | struct posix_acl_entry a_entries[0]; | 31 | struct posix_acl_entry a_entries[0]; |
@@ -41,7 +42,7 @@ static inline struct posix_acl * | |||
41 | posix_acl_dup(struct posix_acl *acl) | 42 | posix_acl_dup(struct posix_acl *acl) |
42 | { | 43 | { |
43 | if (acl) | 44 | if (acl) |
44 | atomic_inc(&acl->a_refcount); | 45 | refcount_inc(&acl->a_refcount); |
45 | return acl; | 46 | return acl; |
46 | } | 47 | } |
47 | 48 | ||
@@ -51,7 +52,7 @@ posix_acl_dup(struct posix_acl *acl) | |||
51 | static inline void | 52 | static inline void |
52 | posix_acl_release(struct posix_acl *acl) | 53 | posix_acl_release(struct posix_acl *acl) |
53 | { | 54 | { |
54 | if (acl && atomic_dec_and_test(&acl->a_refcount)) | 55 | if (acl && refcount_dec_and_test(&acl->a_refcount)) |
55 | kfree_rcu(acl, a_rcu); | 56 | kfree_rcu(acl, a_rcu); |
56 | } | 57 | } |
57 | 58 | ||
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h index e6187f524f2c..01fbf1b16258 100644 --- a/include/linux/power/bq27xxx_battery.h +++ b/include/linux/power/bq27xxx_battery.h | |||
@@ -16,6 +16,7 @@ enum bq27xxx_chip { | |||
16 | BQ27520G2, /* bq27520G2 */ | 16 | BQ27520G2, /* bq27520G2 */ |
17 | BQ27520G3, /* bq27520G3 */ | 17 | BQ27520G3, /* bq27520G3 */ |
18 | BQ27520G4, /* bq27520G4 */ | 18 | BQ27520G4, /* bq27520G4 */ |
19 | BQ27521, /* bq27521 */ | ||
19 | BQ27530, /* bq27530, bq27531 */ | 20 | BQ27530, /* bq27530, bq27531 */ |
20 | BQ27531, | 21 | BQ27531, |
21 | BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ | 22 | BQ27541, /* bq27541, bq27542, bq27546, bq27742 */ |
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h index 2ff18c9840a7..d31cb6215905 100644 --- a/include/linux/proc_ns.h +++ b/include/linux/proc_ns.h | |||
@@ -78,6 +78,9 @@ extern struct file *proc_ns_fget(int fd); | |||
78 | #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) | 78 | #define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private) |
79 | extern void *ns_get_path(struct path *path, struct task_struct *task, | 79 | extern void *ns_get_path(struct path *path, struct task_struct *task, |
80 | const struct proc_ns_operations *ns_ops); | 80 | const struct proc_ns_operations *ns_ops); |
81 | typedef struct ns_common *ns_get_path_helper_t(void *); | ||
82 | extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb, | ||
83 | void *private_data); | ||
81 | 84 | ||
82 | extern int ns_get_name(char *buf, size_t size, struct task_struct *task, | 85 | extern int ns_get_name(char *buf, size_t size, struct task_struct *task, |
83 | const struct proc_ns_operations *ns_ops); | 86 | const struct proc_ns_operations *ns_ops); |
diff --git a/include/linux/property.h b/include/linux/property.h index f6189a3ac63c..769d372c1edf 100644 --- a/include/linux/property.h +++ b/include/linux/property.h | |||
@@ -83,11 +83,17 @@ struct fwnode_handle *fwnode_get_next_parent( | |||
83 | struct fwnode_handle *fwnode); | 83 | struct fwnode_handle *fwnode); |
84 | struct fwnode_handle *fwnode_get_next_child_node( | 84 | struct fwnode_handle *fwnode_get_next_child_node( |
85 | const struct fwnode_handle *fwnode, struct fwnode_handle *child); | 85 | const struct fwnode_handle *fwnode, struct fwnode_handle *child); |
86 | struct fwnode_handle *fwnode_get_next_available_child_node( | ||
87 | const struct fwnode_handle *fwnode, struct fwnode_handle *child); | ||
86 | 88 | ||
87 | #define fwnode_for_each_child_node(fwnode, child) \ | 89 | #define fwnode_for_each_child_node(fwnode, child) \ |
88 | for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ | 90 | for (child = fwnode_get_next_child_node(fwnode, NULL); child; \ |
89 | child = fwnode_get_next_child_node(fwnode, child)) | 91 | child = fwnode_get_next_child_node(fwnode, child)) |
90 | 92 | ||
93 | #define fwnode_for_each_available_child_node(fwnode, child) \ | ||
94 | for (child = fwnode_get_next_available_child_node(fwnode, NULL); child;\ | ||
95 | child = fwnode_get_next_available_child_node(fwnode, child)) | ||
96 | |||
91 | struct fwnode_handle *device_get_next_child_node( | 97 | struct fwnode_handle *device_get_next_child_node( |
92 | struct device *dev, struct fwnode_handle *child); | 98 | struct device *dev, struct fwnode_handle *child); |
93 | 99 | ||
@@ -103,6 +109,8 @@ struct fwnode_handle *device_get_named_child_node(struct device *dev, | |||
103 | struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); | 109 | struct fwnode_handle *fwnode_handle_get(struct fwnode_handle *fwnode); |
104 | void fwnode_handle_put(struct fwnode_handle *fwnode); | 110 | void fwnode_handle_put(struct fwnode_handle *fwnode); |
105 | 111 | ||
112 | int fwnode_irq_get(struct fwnode_handle *fwnode, unsigned int index); | ||
113 | |||
106 | unsigned int device_get_child_node_count(struct device *dev); | 114 | unsigned int device_get_child_node_count(struct device *dev); |
107 | 115 | ||
108 | static inline bool device_property_read_bool(struct device *dev, | 116 | static inline bool device_property_read_bool(struct device *dev, |
@@ -206,7 +214,7 @@ struct property_entry { | |||
206 | */ | 214 | */ |
207 | 215 | ||
208 | #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ | 216 | #define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _val_) \ |
209 | { \ | 217 | (struct property_entry) { \ |
210 | .name = _name_, \ | 218 | .name = _name_, \ |
211 | .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ | 219 | .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ |
212 | .is_array = true, \ | 220 | .is_array = true, \ |
@@ -224,7 +232,7 @@ struct property_entry { | |||
224 | PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) | 232 | PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, _val_) |
225 | 233 | ||
226 | #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ | 234 | #define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \ |
227 | { \ | 235 | (struct property_entry) { \ |
228 | .name = _name_, \ | 236 | .name = _name_, \ |
229 | .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ | 237 | .length = ARRAY_SIZE(_val_) * sizeof(const char *), \ |
230 | .is_array = true, \ | 238 | .is_array = true, \ |
@@ -233,7 +241,7 @@ struct property_entry { | |||
233 | } | 241 | } |
234 | 242 | ||
235 | #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ | 243 | #define PROPERTY_ENTRY_INTEGER(_name_, _type_, _val_) \ |
236 | { \ | 244 | (struct property_entry) { \ |
237 | .name = _name_, \ | 245 | .name = _name_, \ |
238 | .length = sizeof(_type_), \ | 246 | .length = sizeof(_type_), \ |
239 | .is_string = false, \ | 247 | .is_string = false, \ |
@@ -250,7 +258,7 @@ struct property_entry { | |||
250 | PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) | 258 | PROPERTY_ENTRY_INTEGER(_name_, u64, _val_) |
251 | 259 | ||
252 | #define PROPERTY_ENTRY_STRING(_name_, _val_) \ | 260 | #define PROPERTY_ENTRY_STRING(_name_, _val_) \ |
253 | { \ | 261 | (struct property_entry) { \ |
254 | .name = _name_, \ | 262 | .name = _name_, \ |
255 | .length = sizeof(_val_), \ | 263 | .length = sizeof(_val_), \ |
256 | .is_string = true, \ | 264 | .is_string = true, \ |
@@ -258,7 +266,7 @@ struct property_entry { | |||
258 | } | 266 | } |
259 | 267 | ||
260 | #define PROPERTY_ENTRY_BOOL(_name_) \ | 268 | #define PROPERTY_ENTRY_BOOL(_name_) \ |
261 | { \ | 269 | (struct property_entry) { \ |
262 | .name = _name_, \ | 270 | .name = _name_, \ |
263 | } | 271 | } |
264 | 272 | ||
@@ -275,10 +283,15 @@ bool device_dma_supported(struct device *dev); | |||
275 | 283 | ||
276 | enum dev_dma_attr device_get_dma_attr(struct device *dev); | 284 | enum dev_dma_attr device_get_dma_attr(struct device *dev); |
277 | 285 | ||
286 | void *device_get_match_data(struct device *dev); | ||
287 | |||
278 | int device_get_phy_mode(struct device *dev); | 288 | int device_get_phy_mode(struct device *dev); |
279 | 289 | ||
280 | void *device_get_mac_address(struct device *dev, char *addr, int alen); | 290 | void *device_get_mac_address(struct device *dev, char *addr, int alen); |
281 | 291 | ||
292 | int fwnode_get_phy_mode(struct fwnode_handle *fwnode); | ||
293 | void *fwnode_get_mac_address(struct fwnode_handle *fwnode, | ||
294 | char *addr, int alen); | ||
282 | struct fwnode_handle *fwnode_graph_get_next_endpoint( | 295 | struct fwnode_handle *fwnode_graph_get_next_endpoint( |
283 | const struct fwnode_handle *fwnode, struct fwnode_handle *prev); | 296 | const struct fwnode_handle *fwnode, struct fwnode_handle *prev); |
284 | struct fwnode_handle * | 297 | struct fwnode_handle * |
diff --git a/include/linux/psci.h b/include/linux/psci.h index bdea1cb5e1db..8b1b3b5935ab 100644 --- a/include/linux/psci.h +++ b/include/linux/psci.h | |||
@@ -25,7 +25,19 @@ bool psci_tos_resident_on(int cpu); | |||
25 | int psci_cpu_init_idle(unsigned int cpu); | 25 | int psci_cpu_init_idle(unsigned int cpu); |
26 | int psci_cpu_suspend_enter(unsigned long index); | 26 | int psci_cpu_suspend_enter(unsigned long index); |
27 | 27 | ||
28 | enum psci_conduit { | ||
29 | PSCI_CONDUIT_NONE, | ||
30 | PSCI_CONDUIT_SMC, | ||
31 | PSCI_CONDUIT_HVC, | ||
32 | }; | ||
33 | |||
34 | enum smccc_version { | ||
35 | SMCCC_VERSION_1_0, | ||
36 | SMCCC_VERSION_1_1, | ||
37 | }; | ||
38 | |||
28 | struct psci_operations { | 39 | struct psci_operations { |
40 | u32 (*get_version)(void); | ||
29 | int (*cpu_suspend)(u32 state, unsigned long entry_point); | 41 | int (*cpu_suspend)(u32 state, unsigned long entry_point); |
30 | int (*cpu_off)(u32 state); | 42 | int (*cpu_off)(u32 state); |
31 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); | 43 | int (*cpu_on)(unsigned long cpuid, unsigned long entry_point); |
@@ -33,6 +45,8 @@ struct psci_operations { | |||
33 | int (*affinity_info)(unsigned long target_affinity, | 45 | int (*affinity_info)(unsigned long target_affinity, |
34 | unsigned long lowest_affinity_level); | 46 | unsigned long lowest_affinity_level); |
35 | int (*migrate_info_type)(void); | 47 | int (*migrate_info_type)(void); |
48 | enum psci_conduit conduit; | ||
49 | enum smccc_version smccc_version; | ||
36 | }; | 50 | }; |
37 | 51 | ||
38 | extern struct psci_operations psci_ops; | 52 | extern struct psci_operations psci_ops; |
@@ -46,10 +60,11 @@ static inline int psci_dt_init(void) { return 0; } | |||
46 | #if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) | 60 | #if defined(CONFIG_ARM_PSCI_FW) && defined(CONFIG_ACPI) |
47 | int __init psci_acpi_init(void); | 61 | int __init psci_acpi_init(void); |
48 | bool __init acpi_psci_present(void); | 62 | bool __init acpi_psci_present(void); |
49 | bool __init acpi_psci_use_hvc(void); | 63 | bool acpi_psci_use_hvc(void); |
50 | #else | 64 | #else |
51 | static inline int psci_acpi_init(void) { return 0; } | 65 | static inline int psci_acpi_init(void) { return 0; } |
52 | static inline bool acpi_psci_present(void) { return false; } | 66 | static inline bool acpi_psci_present(void) { return false; } |
67 | static inline bool acpi_psci_use_hvc(void) {return false; } | ||
53 | #endif | 68 | #endif |
54 | 69 | ||
55 | #endif /* __LINUX_PSCI_H */ | 70 | #endif /* __LINUX_PSCI_H */ |
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index d72b2e7dd500..1883d6137e9b 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
@@ -45,9 +45,10 @@ struct ptr_ring { | |||
45 | }; | 45 | }; |
46 | 46 | ||
47 | /* Note: callers invoking this in a loop must use a compiler barrier, | 47 | /* Note: callers invoking this in a loop must use a compiler barrier, |
48 | * for example cpu_relax(). If ring is ever resized, callers must hold | 48 | * for example cpu_relax(). |
49 | * producer_lock - see e.g. ptr_ring_full. Otherwise, if callers don't hold | 49 | * |
50 | * producer_lock, the next call to __ptr_ring_produce may fail. | 50 | * NB: this is unlike __ptr_ring_empty in that callers must hold producer_lock: |
51 | * see e.g. ptr_ring_full. | ||
51 | */ | 52 | */ |
52 | static inline bool __ptr_ring_full(struct ptr_ring *r) | 53 | static inline bool __ptr_ring_full(struct ptr_ring *r) |
53 | { | 54 | { |
@@ -113,7 +114,7 @@ static inline int __ptr_ring_produce(struct ptr_ring *r, void *ptr) | |||
113 | /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ | 114 | /* Pairs with smp_read_barrier_depends in __ptr_ring_consume. */ |
114 | smp_wmb(); | 115 | smp_wmb(); |
115 | 116 | ||
116 | r->queue[r->producer++] = ptr; | 117 | WRITE_ONCE(r->queue[r->producer++], ptr); |
117 | if (unlikely(r->producer >= r->size)) | 118 | if (unlikely(r->producer >= r->size)) |
118 | r->producer = 0; | 119 | r->producer = 0; |
119 | return 0; | 120 | return 0; |
@@ -169,32 +170,36 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr) | |||
169 | return ret; | 170 | return ret; |
170 | } | 171 | } |
171 | 172 | ||
172 | /* Note: callers invoking this in a loop must use a compiler barrier, | ||
173 | * for example cpu_relax(). Callers must take consumer_lock | ||
174 | * if they dereference the pointer - see e.g. PTR_RING_PEEK_CALL. | ||
175 | * If ring is never resized, and if the pointer is merely | ||
176 | * tested, there's no need to take the lock - see e.g. __ptr_ring_empty. | ||
177 | * However, if called outside the lock, and if some other CPU | ||
178 | * consumes ring entries at the same time, the value returned | ||
179 | * is not guaranteed to be correct. | ||
180 | * In this case - to avoid incorrectly detecting the ring | ||
181 | * as empty - the CPU consuming the ring entries is responsible | ||
182 | * for either consuming all ring entries until the ring is empty, | ||
183 | * or synchronizing with some other CPU and causing it to | ||
184 | * execute __ptr_ring_peek and/or consume the ring enteries | ||
185 | * after the synchronization point. | ||
186 | */ | ||
187 | static inline void *__ptr_ring_peek(struct ptr_ring *r) | 173 | static inline void *__ptr_ring_peek(struct ptr_ring *r) |
188 | { | 174 | { |
189 | if (likely(r->size)) | 175 | if (likely(r->size)) |
190 | return r->queue[r->consumer_head]; | 176 | return READ_ONCE(r->queue[r->consumer_head]); |
191 | return NULL; | 177 | return NULL; |
192 | } | 178 | } |
193 | 179 | ||
194 | /* See __ptr_ring_peek above for locking rules. */ | 180 | /* |
181 | * Test ring empty status without taking any locks. | ||
182 | * | ||
183 | * NB: This is only safe to call if ring is never resized. | ||
184 | * | ||
185 | * However, if some other CPU consumes ring entries at the same time, the value | ||
186 | * returned is not guaranteed to be correct. | ||
187 | * | ||
188 | * In this case - to avoid incorrectly detecting the ring | ||
189 | * as empty - the CPU consuming the ring entries is responsible | ||
190 | * for either consuming all ring entries until the ring is empty, | ||
191 | * or synchronizing with some other CPU and causing it to | ||
192 | * re-test __ptr_ring_empty and/or consume the ring enteries | ||
193 | * after the synchronization point. | ||
194 | * | ||
195 | * Note: callers invoking this in a loop must use a compiler barrier, | ||
196 | * for example cpu_relax(). | ||
197 | */ | ||
195 | static inline bool __ptr_ring_empty(struct ptr_ring *r) | 198 | static inline bool __ptr_ring_empty(struct ptr_ring *r) |
196 | { | 199 | { |
197 | return !__ptr_ring_peek(r); | 200 | if (likely(r->size)) |
201 | return !r->queue[READ_ONCE(r->consumer_head)]; | ||
202 | return true; | ||
198 | } | 203 | } |
199 | 204 | ||
200 | static inline bool ptr_ring_empty(struct ptr_ring *r) | 205 | static inline bool ptr_ring_empty(struct ptr_ring *r) |
@@ -248,22 +253,28 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) | |||
248 | /* Fundamentally, what we want to do is update consumer | 253 | /* Fundamentally, what we want to do is update consumer |
249 | * index and zero out the entry so producer can reuse it. | 254 | * index and zero out the entry so producer can reuse it. |
250 | * Doing it naively at each consume would be as simple as: | 255 | * Doing it naively at each consume would be as simple as: |
251 | * r->queue[r->consumer++] = NULL; | 256 | * consumer = r->consumer; |
252 | * if (unlikely(r->consumer >= r->size)) | 257 | * r->queue[consumer++] = NULL; |
253 | * r->consumer = 0; | 258 | * if (unlikely(consumer >= r->size)) |
259 | * consumer = 0; | ||
260 | * r->consumer = consumer; | ||
254 | * but that is suboptimal when the ring is full as producer is writing | 261 | * but that is suboptimal when the ring is full as producer is writing |
255 | * out new entries in the same cache line. Defer these updates until a | 262 | * out new entries in the same cache line. Defer these updates until a |
256 | * batch of entries has been consumed. | 263 | * batch of entries has been consumed. |
257 | */ | 264 | */ |
258 | int head = r->consumer_head++; | 265 | /* Note: we must keep consumer_head valid at all times for __ptr_ring_empty |
266 | * to work correctly. | ||
267 | */ | ||
268 | int consumer_head = r->consumer_head; | ||
269 | int head = consumer_head++; | ||
259 | 270 | ||
260 | /* Once we have processed enough entries invalidate them in | 271 | /* Once we have processed enough entries invalidate them in |
261 | * the ring all at once so producer can reuse their space in the ring. | 272 | * the ring all at once so producer can reuse their space in the ring. |
262 | * We also do this when we reach end of the ring - not mandatory | 273 | * We also do this when we reach end of the ring - not mandatory |
263 | * but helps keep the implementation simple. | 274 | * but helps keep the implementation simple. |
264 | */ | 275 | */ |
265 | if (unlikely(r->consumer_head - r->consumer_tail >= r->batch || | 276 | if (unlikely(consumer_head - r->consumer_tail >= r->batch || |
266 | r->consumer_head >= r->size)) { | 277 | consumer_head >= r->size)) { |
267 | /* Zero out entries in the reverse order: this way we touch the | 278 | /* Zero out entries in the reverse order: this way we touch the |
268 | * cache line that producer might currently be reading the last; | 279 | * cache line that producer might currently be reading the last; |
269 | * producer won't make progress and touch other cache lines | 280 | * producer won't make progress and touch other cache lines |
@@ -271,12 +282,14 @@ static inline void __ptr_ring_discard_one(struct ptr_ring *r) | |||
271 | */ | 282 | */ |
272 | while (likely(head >= r->consumer_tail)) | 283 | while (likely(head >= r->consumer_tail)) |
273 | r->queue[head--] = NULL; | 284 | r->queue[head--] = NULL; |
274 | r->consumer_tail = r->consumer_head; | 285 | r->consumer_tail = consumer_head; |
275 | } | 286 | } |
276 | if (unlikely(r->consumer_head >= r->size)) { | 287 | if (unlikely(consumer_head >= r->size)) { |
277 | r->consumer_head = 0; | 288 | consumer_head = 0; |
278 | r->consumer_tail = 0; | 289 | r->consumer_tail = 0; |
279 | } | 290 | } |
291 | /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ | ||
292 | WRITE_ONCE(r->consumer_head, consumer_head); | ||
280 | } | 293 | } |
281 | 294 | ||
282 | static inline void *__ptr_ring_consume(struct ptr_ring *r) | 295 | static inline void *__ptr_ring_consume(struct ptr_ring *r) |
@@ -527,7 +540,9 @@ static inline void ptr_ring_unconsume(struct ptr_ring *r, void **batch, int n, | |||
527 | goto done; | 540 | goto done; |
528 | } | 541 | } |
529 | r->queue[head] = batch[--n]; | 542 | r->queue[head] = batch[--n]; |
530 | r->consumer_tail = r->consumer_head = head; | 543 | r->consumer_tail = head; |
544 | /* matching READ_ONCE in __ptr_ring_empty for lockless tests */ | ||
545 | WRITE_ONCE(r->consumer_head, head); | ||
531 | } | 546 | } |
532 | 547 | ||
533 | done: | 548 | done: |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 1fd27d68926b..b401b962afff 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
@@ -13,6 +13,9 @@ | |||
13 | #ifndef __QCOM_SCM_H | 13 | #ifndef __QCOM_SCM_H |
14 | #define __QCOM_SCM_H | 14 | #define __QCOM_SCM_H |
15 | 15 | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/cpumask.h> | ||
18 | |||
16 | #define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) | 19 | #define QCOM_SCM_VERSION(major, minor) (((major) << 16) | ((minor) & 0xFF)) |
17 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 | 20 | #define QCOM_SCM_CPU_PWR_DOWN_L2_ON 0x0 |
18 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 | 21 | #define QCOM_SCM_CPU_PWR_DOWN_L2_OFF 0x1 |
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h index 39e2a2ac2471..2b3b350e07b7 100644 --- a/include/linux/qed/common_hsi.h +++ b/include/linux/qed/common_hsi.h | |||
@@ -32,14 +32,15 @@ | |||
32 | 32 | ||
33 | #ifndef _COMMON_HSI_H | 33 | #ifndef _COMMON_HSI_H |
34 | #define _COMMON_HSI_H | 34 | #define _COMMON_HSI_H |
35 | |||
35 | #include <linux/types.h> | 36 | #include <linux/types.h> |
36 | #include <asm/byteorder.h> | 37 | #include <asm/byteorder.h> |
37 | #include <linux/bitops.h> | 38 | #include <linux/bitops.h> |
38 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
39 | 40 | ||
40 | /* dma_addr_t manip */ | 41 | /* dma_addr_t manip */ |
41 | #define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) | 42 | #define PTR_LO(x) ((u32)(((uintptr_t)(x)) & 0xffffffff)) |
42 | #define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) | 43 | #define PTR_HI(x) ((u32)((((uintptr_t)(x)) >> 16) >> 16)) |
43 | #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) | 44 | #define DMA_LO_LE(x) cpu_to_le32(lower_32_bits(x)) |
44 | #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) | 45 | #define DMA_HI_LE(x) cpu_to_le32(upper_32_bits(x)) |
45 | #define DMA_REGPAIR_LE(x, val) do { \ | 46 | #define DMA_REGPAIR_LE(x, val) do { \ |
@@ -47,39 +48,45 @@ | |||
47 | (x).lo = DMA_LO_LE((val)); \ | 48 | (x).lo = DMA_LO_LE((val)); \ |
48 | } while (0) | 49 | } while (0) |
49 | 50 | ||
50 | #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) | 51 | #define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) |
51 | #define HILO_64(hi, lo) HILO_GEN((le32_to_cpu(hi)), (le32_to_cpu(lo)), u64) | 52 | #define HILO_64(hi, lo) \ |
52 | #define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) | 53 | HILO_GEN(le32_to_cpu(hi), le32_to_cpu(lo), u64) |
54 | #define HILO_64_REGPAIR(regpair) ({ \ | ||
55 | typeof(regpair) __regpair = (regpair); \ | ||
56 | HILO_64(__regpair.hi, __regpair.lo); }) | ||
53 | #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) | 57 | #define HILO_DMA_REGPAIR(regpair) ((dma_addr_t)HILO_64_REGPAIR(regpair)) |
54 | 58 | ||
55 | #ifndef __COMMON_HSI__ | 59 | #ifndef __COMMON_HSI__ |
56 | #define __COMMON_HSI__ | 60 | #define __COMMON_HSI__ |
57 | 61 | ||
62 | /********************************/ | ||
63 | /* PROTOCOL COMMON FW CONSTANTS */ | ||
64 | /********************************/ | ||
58 | 65 | ||
59 | #define X_FINAL_CLEANUP_AGG_INT 1 | 66 | #define X_FINAL_CLEANUP_AGG_INT 1 |
60 | 67 | ||
61 | #define EVENT_RING_PAGE_SIZE_BYTES 4096 | 68 | #define EVENT_RING_PAGE_SIZE_BYTES 4096 |
62 | 69 | ||
63 | #define NUM_OF_GLOBAL_QUEUES 128 | 70 | #define NUM_OF_GLOBAL_QUEUES 128 |
64 | #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 | 71 | #define COMMON_QUEUE_ENTRY_MAX_BYTE_SIZE 64 |
65 | 72 | ||
66 | #define ISCSI_CDU_TASK_SEG_TYPE 0 | 73 | #define ISCSI_CDU_TASK_SEG_TYPE 0 |
67 | #define FCOE_CDU_TASK_SEG_TYPE 0 | 74 | #define FCOE_CDU_TASK_SEG_TYPE 0 |
68 | #define RDMA_CDU_TASK_SEG_TYPE 1 | 75 | #define RDMA_CDU_TASK_SEG_TYPE 1 |
69 | 76 | ||
70 | #define FW_ASSERT_GENERAL_ATTN_IDX 32 | 77 | #define FW_ASSERT_GENERAL_ATTN_IDX 32 |
71 | 78 | ||
72 | #define MAX_PINNED_CCFC 32 | 79 | #define MAX_PINNED_CCFC 32 |
73 | 80 | ||
74 | /* Queue Zone sizes in bytes */ | 81 | /* Queue Zone sizes in bytes */ |
75 | #define TSTORM_QZONE_SIZE 8 | 82 | #define TSTORM_QZONE_SIZE 8 |
76 | #define MSTORM_QZONE_SIZE 16 | 83 | #define MSTORM_QZONE_SIZE 16 |
77 | #define USTORM_QZONE_SIZE 8 | 84 | #define USTORM_QZONE_SIZE 8 |
78 | #define XSTORM_QZONE_SIZE 8 | 85 | #define XSTORM_QZONE_SIZE 8 |
79 | #define YSTORM_QZONE_SIZE 0 | 86 | #define YSTORM_QZONE_SIZE 0 |
80 | #define PSTORM_QZONE_SIZE 0 | 87 | #define PSTORM_QZONE_SIZE 0 |
81 | 88 | ||
82 | #define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 | 89 | #define MSTORM_VF_ZONE_DEFAULT_SIZE_LOG 7 |
83 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 | 90 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DEFAULT 16 |
84 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 | 91 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_DOUBLE 48 |
85 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 | 92 | #define ETH_MAX_NUM_RX_QUEUES_PER_VF_QUAD 112 |
@@ -102,8 +109,8 @@ | |||
102 | #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 | 109 | #define MAX_NUM_LL2_TX_STATS_COUNTERS 48 |
103 | 110 | ||
104 | #define FW_MAJOR_VERSION 8 | 111 | #define FW_MAJOR_VERSION 8 |
105 | #define FW_MINOR_VERSION 20 | 112 | #define FW_MINOR_VERSION 33 |
106 | #define FW_REVISION_VERSION 0 | 113 | #define FW_REVISION_VERSION 1 |
107 | #define FW_ENGINEERING_VERSION 0 | 114 | #define FW_ENGINEERING_VERSION 0 |
108 | 115 | ||
109 | /***********************/ | 116 | /***********************/ |
@@ -115,10 +122,10 @@ | |||
115 | #define MAX_NUM_PORTS_BB (2) | 122 | #define MAX_NUM_PORTS_BB (2) |
116 | #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) | 123 | #define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) |
117 | 124 | ||
118 | #define MAX_NUM_PFS_K2 (16) | 125 | #define MAX_NUM_PFS_K2 (16) |
119 | #define MAX_NUM_PFS_BB (8) | 126 | #define MAX_NUM_PFS_BB (8) |
120 | #define MAX_NUM_PFS (MAX_NUM_PFS_K2) | 127 | #define MAX_NUM_PFS (MAX_NUM_PFS_K2) |
121 | #define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ | 128 | #define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ |
122 | 129 | ||
123 | #define MAX_NUM_VFS_K2 (192) | 130 | #define MAX_NUM_VFS_K2 (192) |
124 | #define MAX_NUM_VFS_BB (120) | 131 | #define MAX_NUM_VFS_BB (120) |
@@ -141,29 +148,14 @@ | |||
141 | /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ | 148 | /* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ |
142 | #define NUM_PHYS_TCS_4PORT_K2 (4) | 149 | #define NUM_PHYS_TCS_4PORT_K2 (4) |
143 | #define NUM_OF_PHYS_TCS (8) | 150 | #define NUM_OF_PHYS_TCS (8) |
144 | 151 | #define PURE_LB_TC NUM_OF_PHYS_TCS | |
145 | #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) | 152 | #define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) |
146 | #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) | 153 | #define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) |
147 | 154 | ||
148 | #define LB_TC (NUM_OF_PHYS_TCS) | ||
149 | |||
150 | /* Num of possible traffic priority values */ | ||
151 | #define NUM_OF_PRIO (8) | ||
152 | |||
153 | #define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) | ||
154 | #define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) | ||
155 | #define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) | ||
156 | #define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) | ||
157 | |||
158 | /* CIDs */ | 155 | /* CIDs */ |
159 | #define NUM_OF_CONNECTION_TYPES (8) | 156 | #define NUM_OF_CONNECTION_TYPES_E4 (8) |
160 | #define NUM_OF_LCIDS (320) | 157 | #define NUM_OF_LCIDS (320) |
161 | #define NUM_OF_LTIDS (320) | 158 | #define NUM_OF_LTIDS (320) |
162 | |||
163 | /* Clock values */ | ||
164 | #define MASTER_CLK_FREQ_E4 (375e6) | ||
165 | #define STORM_CLK_FREQ_E4 (1000e6) | ||
166 | #define CLK25M_CLK_FREQ_E4 (25e6) | ||
167 | 159 | ||
168 | /* Global PXP windows (GTT) */ | 160 | /* Global PXP windows (GTT) */ |
169 | #define NUM_OF_GTT 19 | 161 | #define NUM_OF_GTT 19 |
@@ -172,17 +164,17 @@ | |||
172 | #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) | 164 | #define GTT_DWORD_SIZE BIT(GTT_DWORD_SIZE_BITS) |
173 | 165 | ||
174 | /* Tools Version */ | 166 | /* Tools Version */ |
175 | #define TOOLS_VERSION 10 | 167 | #define TOOLS_VERSION 10 |
176 | 168 | ||
177 | /*****************/ | 169 | /*****************/ |
178 | /* CDU CONSTANTS */ | 170 | /* CDU CONSTANTS */ |
179 | /*****************/ | 171 | /*****************/ |
180 | 172 | ||
181 | #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) | 173 | #define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) |
182 | #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) | 174 | #define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) |
183 | 175 | ||
184 | #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) | 176 | #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (12) |
185 | #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) | 177 | #define CDU_VF_FL_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0xfff) |
186 | 178 | ||
187 | #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) | 179 | #define CDU_CONTEXT_VALIDATION_CFG_ENABLE_SHIFT (0) |
188 | #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) | 180 | #define CDU_CONTEXT_VALIDATION_CFG_VALIDATION_TYPE_SHIFT (1) |
@@ -201,45 +193,45 @@ | |||
201 | #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 | 193 | #define DQ_DEMS_TOE_LOCAL_ADV_WND 4 |
202 | #define DQ_DEMS_ROCE_CQ_CONS 7 | 194 | #define DQ_DEMS_ROCE_CQ_CONS 7 |
203 | 195 | ||
204 | /* XCM agg val selection */ | 196 | /* XCM agg val selection (HW) */ |
205 | #define DQ_XCM_AGG_VAL_SEL_WORD2 0 | 197 | #define DQ_XCM_AGG_VAL_SEL_WORD2 0 |
206 | #define DQ_XCM_AGG_VAL_SEL_WORD3 1 | 198 | #define DQ_XCM_AGG_VAL_SEL_WORD3 1 |
207 | #define DQ_XCM_AGG_VAL_SEL_WORD4 2 | 199 | #define DQ_XCM_AGG_VAL_SEL_WORD4 2 |
208 | #define DQ_XCM_AGG_VAL_SEL_WORD5 3 | 200 | #define DQ_XCM_AGG_VAL_SEL_WORD5 3 |
209 | #define DQ_XCM_AGG_VAL_SEL_REG3 4 | 201 | #define DQ_XCM_AGG_VAL_SEL_REG3 4 |
210 | #define DQ_XCM_AGG_VAL_SEL_REG4 5 | 202 | #define DQ_XCM_AGG_VAL_SEL_REG4 5 |
211 | #define DQ_XCM_AGG_VAL_SEL_REG5 6 | 203 | #define DQ_XCM_AGG_VAL_SEL_REG5 6 |
212 | #define DQ_XCM_AGG_VAL_SEL_REG6 7 | 204 | #define DQ_XCM_AGG_VAL_SEL_REG6 7 |
213 | 205 | ||
214 | /* XCM agg val selection */ | 206 | /* XCM agg val selection (FW) */ |
215 | #define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 | 207 | #define DQ_XCM_CORE_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 |
216 | #define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 208 | #define DQ_XCM_CORE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
217 | #define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 209 | #define DQ_XCM_CORE_SPQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
218 | #define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 | 210 | #define DQ_XCM_ETH_EDPM_NUM_BDS_CMD DQ_XCM_AGG_VAL_SEL_WORD2 |
219 | #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 | 211 | #define DQ_XCM_ETH_TX_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 |
220 | #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 212 | #define DQ_XCM_ETH_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
221 | #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 | 213 | #define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 |
222 | #define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 | 214 | #define DQ_XCM_FCOE_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 |
223 | #define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 215 | #define DQ_XCM_FCOE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
224 | #define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 | 216 | #define DQ_XCM_FCOE_X_FERQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD5 |
225 | #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 | 217 | #define DQ_XCM_ISCSI_SQ_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD3 |
226 | #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 218 | #define DQ_XCM_ISCSI_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
227 | #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 | 219 | #define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 |
228 | #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 | 220 | #define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6 |
229 | #define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 221 | #define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
230 | #define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 | 222 | #define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4 |
231 | #define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 | 223 | #define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3 |
232 | #define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 | 224 | #define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4 |
233 | 225 | ||
234 | /* UCM agg val selection (HW) */ | 226 | /* UCM agg val selection (HW) */ |
235 | #define DQ_UCM_AGG_VAL_SEL_WORD0 0 | 227 | #define DQ_UCM_AGG_VAL_SEL_WORD0 0 |
236 | #define DQ_UCM_AGG_VAL_SEL_WORD1 1 | 228 | #define DQ_UCM_AGG_VAL_SEL_WORD1 1 |
237 | #define DQ_UCM_AGG_VAL_SEL_WORD2 2 | 229 | #define DQ_UCM_AGG_VAL_SEL_WORD2 2 |
238 | #define DQ_UCM_AGG_VAL_SEL_WORD3 3 | 230 | #define DQ_UCM_AGG_VAL_SEL_WORD3 3 |
239 | #define DQ_UCM_AGG_VAL_SEL_REG0 4 | 231 | #define DQ_UCM_AGG_VAL_SEL_REG0 4 |
240 | #define DQ_UCM_AGG_VAL_SEL_REG1 5 | 232 | #define DQ_UCM_AGG_VAL_SEL_REG1 5 |
241 | #define DQ_UCM_AGG_VAL_SEL_REG2 6 | 233 | #define DQ_UCM_AGG_VAL_SEL_REG2 6 |
242 | #define DQ_UCM_AGG_VAL_SEL_REG3 7 | 234 | #define DQ_UCM_AGG_VAL_SEL_REG3 7 |
243 | 235 | ||
244 | /* UCM agg val selection (FW) */ | 236 | /* UCM agg val selection (FW) */ |
245 | #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 | 237 | #define DQ_UCM_ETH_PMD_TX_CONS_CMD DQ_UCM_AGG_VAL_SEL_WORD2 |
@@ -263,7 +255,7 @@ | |||
263 | #define DQ_TCM_ROCE_RQ_PROD_CMD \ | 255 | #define DQ_TCM_ROCE_RQ_PROD_CMD \ |
264 | DQ_TCM_AGG_VAL_SEL_WORD0 | 256 | DQ_TCM_AGG_VAL_SEL_WORD0 |
265 | 257 | ||
266 | /* XCM agg counter flag selection */ | 258 | /* XCM agg counter flag selection (HW) */ |
267 | #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 | 259 | #define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 |
268 | #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 | 260 | #define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 |
269 | #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 | 261 | #define DQ_XCM_AGG_FLG_SHIFT_CF12 2 |
@@ -273,20 +265,20 @@ | |||
273 | #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 | 265 | #define DQ_XCM_AGG_FLG_SHIFT_CF22 6 |
274 | #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 | 266 | #define DQ_XCM_AGG_FLG_SHIFT_CF23 7 |
275 | 267 | ||
276 | /* XCM agg counter flag selection */ | 268 | /* XCM agg counter flag selection (FW) */ |
277 | #define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) | 269 | #define DQ_XCM_CORE_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) |
278 | #define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) | 270 | #define DQ_XCM_CORE_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) |
279 | #define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) | 271 | #define DQ_XCM_CORE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) |
280 | #define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) | 272 | #define DQ_XCM_ETH_DQ_CF_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF18) |
281 | #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) | 273 | #define DQ_XCM_ETH_TERMINATE_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) |
282 | #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) | 274 | #define DQ_XCM_ETH_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) |
283 | #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) | 275 | #define DQ_XCM_ETH_TPH_EN_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) |
284 | #define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) | 276 | #define DQ_XCM_FCOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) |
285 | #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) | 277 | #define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) |
286 | #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) | 278 | #define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) |
287 | #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) | 279 | #define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23) |
288 | #define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) | 280 | #define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19) |
289 | #define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) | 281 | #define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22) |
290 | 282 | ||
291 | /* UCM agg counter flag selection (HW) */ | 283 | /* UCM agg counter flag selection (HW) */ |
292 | #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 | 284 | #define DQ_UCM_AGG_FLG_SHIFT_CF0 0 |
@@ -317,9 +309,9 @@ | |||
317 | #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 | 309 | #define DQ_TCM_AGG_FLG_SHIFT_CF6 6 |
318 | #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 | 310 | #define DQ_TCM_AGG_FLG_SHIFT_CF7 7 |
319 | /* TCM agg counter flag selection (FW) */ | 311 | /* TCM agg counter flag selection (FW) */ |
320 | #define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) | 312 | #define DQ_TCM_FCOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) |
321 | #define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) | 313 | #define DQ_TCM_FCOE_DUMMY_TIMER_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF2) |
322 | #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) | 314 | #define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) |
323 | #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) | 315 | #define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) |
324 | #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) | 316 | #define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3) |
325 | #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) | 317 | #define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) |
@@ -327,18 +319,18 @@ | |||
327 | #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) | 319 | #define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1) |
328 | 320 | ||
329 | /* PWM address mapping */ | 321 | /* PWM address mapping */ |
330 | #define DQ_PWM_OFFSET_DPM_BASE 0x0 | 322 | #define DQ_PWM_OFFSET_DPM_BASE 0x0 |
331 | #define DQ_PWM_OFFSET_DPM_END 0x27 | 323 | #define DQ_PWM_OFFSET_DPM_END 0x27 |
332 | #define DQ_PWM_OFFSET_XCM16_BASE 0x40 | 324 | #define DQ_PWM_OFFSET_XCM16_BASE 0x40 |
333 | #define DQ_PWM_OFFSET_XCM32_BASE 0x44 | 325 | #define DQ_PWM_OFFSET_XCM32_BASE 0x44 |
334 | #define DQ_PWM_OFFSET_UCM16_BASE 0x48 | 326 | #define DQ_PWM_OFFSET_UCM16_BASE 0x48 |
335 | #define DQ_PWM_OFFSET_UCM32_BASE 0x4C | 327 | #define DQ_PWM_OFFSET_UCM32_BASE 0x4C |
336 | #define DQ_PWM_OFFSET_UCM16_4 0x50 | 328 | #define DQ_PWM_OFFSET_UCM16_4 0x50 |
337 | #define DQ_PWM_OFFSET_TCM16_BASE 0x58 | 329 | #define DQ_PWM_OFFSET_TCM16_BASE 0x58 |
338 | #define DQ_PWM_OFFSET_TCM32_BASE 0x5C | 330 | #define DQ_PWM_OFFSET_TCM32_BASE 0x5C |
339 | #define DQ_PWM_OFFSET_XCM_FLAGS 0x68 | 331 | #define DQ_PWM_OFFSET_XCM_FLAGS 0x68 |
340 | #define DQ_PWM_OFFSET_UCM_FLAGS 0x69 | 332 | #define DQ_PWM_OFFSET_UCM_FLAGS 0x69 |
341 | #define DQ_PWM_OFFSET_TCM_FLAGS 0x6B | 333 | #define DQ_PWM_OFFSET_TCM_FLAGS 0x6B |
342 | 334 | ||
343 | #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) | 335 | #define DQ_PWM_OFFSET_XCM_RDMA_SQ_PROD (DQ_PWM_OFFSET_XCM16_BASE + 2) |
344 | #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) | 336 | #define DQ_PWM_OFFSET_UCM_RDMA_CQ_CONS_32BIT (DQ_PWM_OFFSET_UCM32_BASE) |
@@ -347,10 +339,11 @@ | |||
347 | #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) | 339 | #define DQ_PWM_OFFSET_UCM_RDMA_ARM_FLAGS (DQ_PWM_OFFSET_UCM_FLAGS) |
348 | #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) | 340 | #define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1) |
349 | #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) | 341 | #define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3) |
350 | #define DQ_REGION_SHIFT (12) | 342 | |
343 | #define DQ_REGION_SHIFT (12) | ||
351 | 344 | ||
352 | /* DPM */ | 345 | /* DPM */ |
353 | #define DQ_DPM_WQE_BUFF_SIZE (320) | 346 | #define DQ_DPM_WQE_BUFF_SIZE (320) |
354 | 347 | ||
355 | /* Conn type ranges */ | 348 | /* Conn type ranges */ |
356 | #define DQ_CONN_TYPE_RANGE_SHIFT (4) | 349 | #define DQ_CONN_TYPE_RANGE_SHIFT (4) |
@@ -359,29 +352,30 @@ | |||
359 | /* QM CONSTANTS */ | 352 | /* QM CONSTANTS */ |
360 | /*****************/ | 353 | /*****************/ |
361 | 354 | ||
362 | /* number of TX queues in the QM */ | 355 | /* Number of TX queues in the QM */ |
363 | #define MAX_QM_TX_QUEUES_K2 512 | 356 | #define MAX_QM_TX_QUEUES_K2 512 |
364 | #define MAX_QM_TX_QUEUES_BB 448 | 357 | #define MAX_QM_TX_QUEUES_BB 448 |
365 | #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 | 358 | #define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 |
366 | 359 | ||
367 | /* number of Other queues in the QM */ | 360 | /* Number of Other queues in the QM */ |
368 | #define MAX_QM_OTHER_QUEUES_BB 64 | 361 | #define MAX_QM_OTHER_QUEUES_BB 64 |
369 | #define MAX_QM_OTHER_QUEUES_K2 128 | 362 | #define MAX_QM_OTHER_QUEUES_K2 128 |
370 | #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 | 363 | #define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 |
371 | 364 | ||
372 | /* number of queues in a PF queue group */ | 365 | /* Number of queues in a PF queue group */ |
373 | #define QM_PF_QUEUE_GROUP_SIZE 8 | 366 | #define QM_PF_QUEUE_GROUP_SIZE 8 |
374 | 367 | ||
375 | /* the size of a single queue element in bytes */ | 368 | /* The size of a single queue element in bytes */ |
376 | #define QM_PQ_ELEMENT_SIZE 4 | 369 | #define QM_PQ_ELEMENT_SIZE 4 |
377 | 370 | ||
378 | /* base number of Tx PQs in the CM PQ representation. | 371 | /* Base number of Tx PQs in the CM PQ representation. |
379 | * should be used when storing PQ IDs in CM PQ registers and context | 372 | * Should be used when storing PQ IDs in CM PQ registers and context. |
380 | */ | 373 | */ |
381 | #define CM_TX_PQ_BASE 0x200 | 374 | #define CM_TX_PQ_BASE 0x200 |
382 | 375 | ||
383 | /* number of global Vport/QCN rate limiters */ | 376 | /* Number of global Vport/QCN rate limiters */ |
384 | #define MAX_QM_GLOBAL_RLS 256 | 377 | #define MAX_QM_GLOBAL_RLS 256 |
378 | |||
385 | /* QM registers data */ | 379 | /* QM registers data */ |
386 | #define QM_LINE_CRD_REG_WIDTH 16 | 380 | #define QM_LINE_CRD_REG_WIDTH 16 |
387 | #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) | 381 | #define QM_LINE_CRD_REG_SIGN_BIT BIT((QM_LINE_CRD_REG_WIDTH - 1)) |
@@ -400,7 +394,7 @@ | |||
400 | #define CAU_FSM_ETH_TX 1 | 394 | #define CAU_FSM_ETH_TX 1 |
401 | 395 | ||
402 | /* Number of Protocol Indices per Status Block */ | 396 | /* Number of Protocol Indices per Status Block */ |
403 | #define PIS_PER_SB 12 | 397 | #define PIS_PER_SB_E4 12 |
404 | 398 | ||
405 | #define CAU_HC_STOPPED_STATE 3 | 399 | #define CAU_HC_STOPPED_STATE 3 |
406 | #define CAU_HC_DISABLE_STATE 4 | 400 | #define CAU_HC_DISABLE_STATE 4 |
@@ -432,8 +426,7 @@ | |||
432 | 426 | ||
433 | #define IGU_CMD_INT_ACK_BASE 0x0400 | 427 | #define IGU_CMD_INT_ACK_BASE 0x0400 |
434 | #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ | 428 | #define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ |
435 | MAX_TOT_SB_PER_PATH - \ | 429 | MAX_TOT_SB_PER_PATH - 1) |
436 | 1) | ||
437 | #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff | 430 | #define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff |
438 | 431 | ||
439 | #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 | 432 | #define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 |
@@ -447,8 +440,7 @@ | |||
447 | 440 | ||
448 | #define IGU_CMD_PROD_UPD_BASE 0x0600 | 441 | #define IGU_CMD_PROD_UPD_BASE 0x0600 |
449 | #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ | 442 | #define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ |
450 | MAX_TOT_SB_PER_PATH - \ | 443 | MAX_TOT_SB_PER_PATH - 1) |
451 | 1) | ||
452 | #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff | 444 | #define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff |
453 | 445 | ||
454 | /*****************/ | 446 | /*****************/ |
@@ -514,129 +506,126 @@ | |||
514 | PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) | 506 | PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) |
515 | 507 | ||
516 | /* PF BAR */ | 508 | /* PF BAR */ |
517 | #define PXP_BAR0_START_GRC 0x0000 | 509 | #define PXP_BAR0_START_GRC 0x0000 |
518 | #define PXP_BAR0_GRC_LENGTH 0x1C00000 | 510 | #define PXP_BAR0_GRC_LENGTH 0x1C00000 |
519 | #define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ | 511 | #define PXP_BAR0_END_GRC (PXP_BAR0_START_GRC + \ |
520 | PXP_BAR0_GRC_LENGTH - 1) | 512 | PXP_BAR0_GRC_LENGTH - 1) |
521 | 513 | ||
522 | #define PXP_BAR0_START_IGU 0x1C00000 | 514 | #define PXP_BAR0_START_IGU 0x1C00000 |
523 | #define PXP_BAR0_IGU_LENGTH 0x10000 | 515 | #define PXP_BAR0_IGU_LENGTH 0x10000 |
524 | #define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ | 516 | #define PXP_BAR0_END_IGU (PXP_BAR0_START_IGU + \ |
525 | PXP_BAR0_IGU_LENGTH - 1) | 517 | PXP_BAR0_IGU_LENGTH - 1) |
526 | 518 | ||
527 | #define PXP_BAR0_START_TSDM 0x1C80000 | 519 | #define PXP_BAR0_START_TSDM 0x1C80000 |
528 | #define PXP_BAR0_SDM_LENGTH 0x40000 | 520 | #define PXP_BAR0_SDM_LENGTH 0x40000 |
529 | #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 | 521 | #define PXP_BAR0_SDM_RESERVED_LENGTH 0x40000 |
530 | #define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ | 522 | #define PXP_BAR0_END_TSDM (PXP_BAR0_START_TSDM + \ |
531 | PXP_BAR0_SDM_LENGTH - 1) | 523 | PXP_BAR0_SDM_LENGTH - 1) |
532 | 524 | ||
533 | #define PXP_BAR0_START_MSDM 0x1D00000 | 525 | #define PXP_BAR0_START_MSDM 0x1D00000 |
534 | #define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ | 526 | #define PXP_BAR0_END_MSDM (PXP_BAR0_START_MSDM + \ |
535 | PXP_BAR0_SDM_LENGTH - 1) | 527 | PXP_BAR0_SDM_LENGTH - 1) |
536 | 528 | ||
537 | #define PXP_BAR0_START_USDM 0x1D80000 | 529 | #define PXP_BAR0_START_USDM 0x1D80000 |
538 | #define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ | 530 | #define PXP_BAR0_END_USDM (PXP_BAR0_START_USDM + \ |
539 | PXP_BAR0_SDM_LENGTH - 1) | 531 | PXP_BAR0_SDM_LENGTH - 1) |
540 | 532 | ||
541 | #define PXP_BAR0_START_XSDM 0x1E00000 | 533 | #define PXP_BAR0_START_XSDM 0x1E00000 |
542 | #define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ | 534 | #define PXP_BAR0_END_XSDM (PXP_BAR0_START_XSDM + \ |
543 | PXP_BAR0_SDM_LENGTH - 1) | 535 | PXP_BAR0_SDM_LENGTH - 1) |
544 | 536 | ||
545 | #define PXP_BAR0_START_YSDM 0x1E80000 | 537 | #define PXP_BAR0_START_YSDM 0x1E80000 |
546 | #define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ | 538 | #define PXP_BAR0_END_YSDM (PXP_BAR0_START_YSDM + \ |
547 | PXP_BAR0_SDM_LENGTH - 1) | 539 | PXP_BAR0_SDM_LENGTH - 1) |
548 | 540 | ||
549 | #define PXP_BAR0_START_PSDM 0x1F00000 | 541 | #define PXP_BAR0_START_PSDM 0x1F00000 |
550 | #define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ | 542 | #define PXP_BAR0_END_PSDM (PXP_BAR0_START_PSDM + \ |
551 | PXP_BAR0_SDM_LENGTH - 1) | 543 | PXP_BAR0_SDM_LENGTH - 1) |
552 | 544 | ||
553 | #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) | 545 | #define PXP_BAR0_FIRST_INVALID_ADDRESS (PXP_BAR0_END_PSDM + 1) |
554 | 546 | ||
555 | /* VF BAR */ | 547 | /* VF BAR */ |
556 | #define PXP_VF_BAR0 0 | 548 | #define PXP_VF_BAR0 0 |
557 | 549 | ||
558 | #define PXP_VF_BAR0_START_GRC 0x3E00 | 550 | #define PXP_VF_BAR0_START_IGU 0 |
559 | #define PXP_VF_BAR0_GRC_LENGTH 0x200 | 551 | #define PXP_VF_BAR0_IGU_LENGTH 0x3000 |
560 | #define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ | 552 | #define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ |
561 | PXP_VF_BAR0_GRC_LENGTH - 1) | 553 | PXP_VF_BAR0_IGU_LENGTH - 1) |
562 | 554 | ||
563 | #define PXP_VF_BAR0_START_IGU 0 | 555 | #define PXP_VF_BAR0_START_DQ 0x3000 |
564 | #define PXP_VF_BAR0_IGU_LENGTH 0x3000 | 556 | #define PXP_VF_BAR0_DQ_LENGTH 0x200 |
565 | #define PXP_VF_BAR0_END_IGU (PXP_VF_BAR0_START_IGU + \ | 557 | #define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 |
566 | PXP_VF_BAR0_IGU_LENGTH - 1) | 558 | #define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ |
567 | 559 | PXP_VF_BAR0_DQ_OPAQUE_OFFSET) | |
568 | #define PXP_VF_BAR0_START_DQ 0x3000 | 560 | #define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ |
569 | #define PXP_VF_BAR0_DQ_LENGTH 0x200 | 561 | + 4) |
570 | #define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 | 562 | #define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ |
571 | #define PXP_VF_BAR0_ME_OPAQUE_ADDRESS (PXP_VF_BAR0_START_DQ + \ | 563 | PXP_VF_BAR0_DQ_LENGTH - 1) |
572 | PXP_VF_BAR0_DQ_OPAQUE_OFFSET) | 564 | |
573 | #define PXP_VF_BAR0_ME_CONCRETE_ADDRESS (PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ | 565 | #define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 |
574 | + 4) | 566 | #define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 |
575 | #define PXP_VF_BAR0_END_DQ (PXP_VF_BAR0_START_DQ + \ | 567 | #define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B + \ |
576 | PXP_VF_BAR0_DQ_LENGTH - 1) | 568 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) |
577 | 569 | ||
578 | #define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 | 570 | #define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 |
579 | #define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 | 571 | #define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B + \ |
580 | #define PXP_VF_BAR0_END_TSDM_ZONE_B (PXP_VF_BAR0_START_TSDM_ZONE_B \ | 572 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) |
581 | + \ | 573 | |
582 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 574 | #define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 |
583 | - 1) | 575 | #define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B + \ |
584 | 576 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) | |
585 | #define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 | 577 | |
586 | #define PXP_VF_BAR0_END_MSDM_ZONE_B (PXP_VF_BAR0_START_MSDM_ZONE_B \ | 578 | #define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 |
587 | + \ | 579 | #define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B + \ |
588 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 580 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) |
589 | - 1) | 581 | |
590 | 582 | #define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 | |
591 | #define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 | 583 | #define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B + \ |
592 | #define PXP_VF_BAR0_END_USDM_ZONE_B (PXP_VF_BAR0_START_USDM_ZONE_B \ | 584 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) |
593 | + \ | 585 | |
594 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 586 | #define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 |
595 | - 1) | 587 | #define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B + \ |
596 | 588 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) | |
597 | #define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 | 589 | |
598 | #define PXP_VF_BAR0_END_XSDM_ZONE_B (PXP_VF_BAR0_START_XSDM_ZONE_B \ | 590 | #define PXP_VF_BAR0_START_GRC 0x3E00 |
599 | + \ | 591 | #define PXP_VF_BAR0_GRC_LENGTH 0x200 |
600 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 592 | #define PXP_VF_BAR0_END_GRC (PXP_VF_BAR0_START_GRC + \ |
601 | - 1) | 593 | PXP_VF_BAR0_GRC_LENGTH - 1) |
602 | 594 | ||
603 | #define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 | 595 | #define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 |
604 | #define PXP_VF_BAR0_END_YSDM_ZONE_B (PXP_VF_BAR0_START_YSDM_ZONE_B \ | 596 | #define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 |
605 | + \ | 597 | |
606 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 598 | #define PXP_VF_BAR0_START_IGU2 0x10000 |
607 | - 1) | 599 | #define PXP_VF_BAR0_IGU2_LENGTH 0xD000 |
608 | 600 | #define PXP_VF_BAR0_END_IGU2 (PXP_VF_BAR0_START_IGU2 + \ | |
609 | #define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 | 601 | PXP_VF_BAR0_IGU2_LENGTH - 1) |
610 | #define PXP_VF_BAR0_END_PSDM_ZONE_B (PXP_VF_BAR0_START_PSDM_ZONE_B \ | 602 | |
611 | + \ | 603 | #define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 |
612 | PXP_VF_BAR0_SDM_LENGTH_ZONE_B \ | 604 | |
613 | - 1) | 605 | #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 |
614 | 606 | #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 | |
615 | #define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 | ||
616 | #define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 | ||
617 | |||
618 | #define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 | ||
619 | |||
620 | #define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 | ||
621 | #define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 | ||
622 | 607 | ||
623 | /* ILT Records */ | 608 | /* ILT Records */ |
624 | #define PXP_NUM_ILT_RECORDS_BB 7600 | 609 | #define PXP_NUM_ILT_RECORDS_BB 7600 |
625 | #define PXP_NUM_ILT_RECORDS_K2 11000 | 610 | #define PXP_NUM_ILT_RECORDS_K2 11000 |
626 | #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) | 611 | #define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) |
627 | #define PXP_QUEUES_ZONE_MAX_NUM 320 | 612 | |
613 | /* Host Interface */ | ||
614 | #define PXP_QUEUES_ZONE_MAX_NUM 320 | ||
615 | |||
628 | /*****************/ | 616 | /*****************/ |
629 | /* PRM CONSTANTS */ | 617 | /* PRM CONSTANTS */ |
630 | /*****************/ | 618 | /*****************/ |
631 | #define PRM_DMA_PAD_BYTES_NUM 2 | 619 | #define PRM_DMA_PAD_BYTES_NUM 2 |
620 | |||
632 | /*****************/ | 621 | /*****************/ |
633 | /* SDMs CONSTANTS */ | 622 | /* SDMs CONSTANTS */ |
634 | /*****************/ | 623 | /*****************/ |
635 | 624 | ||
636 | #define SDM_OP_GEN_TRIG_NONE 0 | 625 | #define SDM_OP_GEN_TRIG_NONE 0 |
637 | #define SDM_OP_GEN_TRIG_WAKE_THREAD 1 | 626 | #define SDM_OP_GEN_TRIG_WAKE_THREAD 1 |
638 | #define SDM_OP_GEN_TRIG_AGG_INT 2 | 627 | #define SDM_OP_GEN_TRIG_AGG_INT 2 |
639 | #define SDM_OP_GEN_TRIG_LOADER 4 | 628 | #define SDM_OP_GEN_TRIG_LOADER 4 |
640 | #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 | 629 | #define SDM_OP_GEN_TRIG_INDICATE_ERROR 6 |
641 | #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 | 630 | #define SDM_OP_GEN_TRIG_INC_ORDER_CNT 9 |
642 | 631 | ||
@@ -644,26 +633,26 @@ | |||
644 | /* Completion types */ | 633 | /* Completion types */ |
645 | /********************/ | 634 | /********************/ |
646 | 635 | ||
647 | #define SDM_COMP_TYPE_NONE 0 | 636 | #define SDM_COMP_TYPE_NONE 0 |
648 | #define SDM_COMP_TYPE_WAKE_THREAD 1 | 637 | #define SDM_COMP_TYPE_WAKE_THREAD 1 |
649 | #define SDM_COMP_TYPE_AGG_INT 2 | 638 | #define SDM_COMP_TYPE_AGG_INT 2 |
650 | #define SDM_COMP_TYPE_CM 3 | 639 | #define SDM_COMP_TYPE_CM 3 |
651 | #define SDM_COMP_TYPE_LOADER 4 | 640 | #define SDM_COMP_TYPE_LOADER 4 |
652 | #define SDM_COMP_TYPE_PXP 5 | 641 | #define SDM_COMP_TYPE_PXP 5 |
653 | #define SDM_COMP_TYPE_INDICATE_ERROR 6 | 642 | #define SDM_COMP_TYPE_INDICATE_ERROR 6 |
654 | #define SDM_COMP_TYPE_RELEASE_THREAD 7 | 643 | #define SDM_COMP_TYPE_RELEASE_THREAD 7 |
655 | #define SDM_COMP_TYPE_RAM 8 | 644 | #define SDM_COMP_TYPE_RAM 8 |
656 | #define SDM_COMP_TYPE_INC_ORDER_CNT 9 | 645 | #define SDM_COMP_TYPE_INC_ORDER_CNT 9 |
657 | 646 | ||
658 | /*****************/ | 647 | /*****************/ |
659 | /* PBF Constants */ | 648 | /* PBF CONSTANTS */ |
660 | /*****************/ | 649 | /*****************/ |
661 | 650 | ||
662 | /* Number of PBF command queue lines. Each line is 32B. */ | 651 | /* Number of PBF command queue lines. Each line is 32B. */ |
663 | #define PBF_MAX_CMD_LINES 3328 | 652 | #define PBF_MAX_CMD_LINES 3328 |
664 | 653 | ||
665 | /* Number of BTB blocks. Each block is 256B. */ | 654 | /* Number of BTB blocks. Each block is 256B. */ |
666 | #define BTB_MAX_BLOCKS 1440 | 655 | #define BTB_MAX_BLOCKS 1440 |
667 | 656 | ||
668 | /*****************/ | 657 | /*****************/ |
669 | /* PRS CONSTANTS */ | 658 | /* PRS CONSTANTS */ |
@@ -671,14 +660,7 @@ | |||
671 | 660 | ||
672 | #define PRS_GFT_CAM_LINES_NO_MATCH 31 | 661 | #define PRS_GFT_CAM_LINES_NO_MATCH 31 |
673 | 662 | ||
674 | /* Async data KCQ CQE */ | 663 | /* Interrupt coalescing TimeSet */ |
675 | struct async_data { | ||
676 | __le32 cid; | ||
677 | __le16 itid; | ||
678 | u8 error_code; | ||
679 | u8 fw_debug_param; | ||
680 | }; | ||
681 | |||
682 | struct coalescing_timeset { | 664 | struct coalescing_timeset { |
683 | u8 value; | 665 | u8 value; |
684 | #define COALESCING_TIMESET_TIMESET_MASK 0x7F | 666 | #define COALESCING_TIMESET_TIMESET_MASK 0x7F |
@@ -692,23 +674,32 @@ struct common_queue_zone { | |||
692 | __le16 reserved; | 674 | __le16 reserved; |
693 | }; | 675 | }; |
694 | 676 | ||
677 | /* ETH Rx producers data */ | ||
695 | struct eth_rx_prod_data { | 678 | struct eth_rx_prod_data { |
696 | __le16 bd_prod; | 679 | __le16 bd_prod; |
697 | __le16 cqe_prod; | 680 | __le16 cqe_prod; |
698 | }; | 681 | }; |
699 | 682 | ||
700 | struct regpair { | 683 | struct tcp_ulp_connect_done_params { |
701 | __le32 lo; | 684 | __le16 mss; |
702 | __le32 hi; | 685 | u8 snd_wnd_scale; |
686 | u8 flags; | ||
687 | #define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_MASK 0x1 | ||
688 | #define TCP_ULP_CONNECT_DONE_PARAMS_TS_EN_SHIFT 0 | ||
689 | #define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_MASK 0x7F | ||
690 | #define TCP_ULP_CONNECT_DONE_PARAMS_RESERVED_SHIFT 1 | ||
703 | }; | 691 | }; |
704 | 692 | ||
705 | struct vf_pf_channel_eqe_data { | 693 | struct iscsi_connect_done_results { |
706 | struct regpair msg_addr; | 694 | __le16 icid; |
695 | __le16 conn_id; | ||
696 | struct tcp_ulp_connect_done_params params; | ||
707 | }; | 697 | }; |
708 | 698 | ||
709 | struct iscsi_eqe_data { | 699 | struct iscsi_eqe_data { |
710 | __le32 cid; | 700 | __le16 icid; |
711 | __le16 conn_id; | 701 | __le16 conn_id; |
702 | __le16 reserved; | ||
712 | u8 error_code; | 703 | u8 error_code; |
713 | u8 error_pdu_opcode_reserved; | 704 | u8 error_pdu_opcode_reserved; |
714 | #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F | 705 | #define ISCSI_EQE_DATA_ERROR_PDU_OPCODE_MASK 0x3F |
@@ -719,52 +710,6 @@ struct iscsi_eqe_data { | |||
719 | #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 | 710 | #define ISCSI_EQE_DATA_RESERVED0_SHIFT 7 |
720 | }; | 711 | }; |
721 | 712 | ||
722 | struct rdma_eqe_destroy_qp { | ||
723 | __le32 cid; | ||
724 | u8 reserved[4]; | ||
725 | }; | ||
726 | |||
727 | union rdma_eqe_data { | ||
728 | struct regpair async_handle; | ||
729 | struct rdma_eqe_destroy_qp rdma_destroy_qp_data; | ||
730 | }; | ||
731 | |||
732 | struct malicious_vf_eqe_data { | ||
733 | u8 vf_id; | ||
734 | u8 err_id; | ||
735 | __le16 reserved[3]; | ||
736 | }; | ||
737 | |||
738 | struct initial_cleanup_eqe_data { | ||
739 | u8 vf_id; | ||
740 | u8 reserved[7]; | ||
741 | }; | ||
742 | |||
743 | /* Event Data Union */ | ||
744 | union event_ring_data { | ||
745 | u8 bytes[8]; | ||
746 | struct vf_pf_channel_eqe_data vf_pf_channel; | ||
747 | struct iscsi_eqe_data iscsi_info; | ||
748 | union rdma_eqe_data rdma_data; | ||
749 | struct malicious_vf_eqe_data malicious_vf; | ||
750 | struct initial_cleanup_eqe_data vf_init_cleanup; | ||
751 | }; | ||
752 | |||
753 | /* Event Ring Entry */ | ||
754 | struct event_ring_entry { | ||
755 | u8 protocol_id; | ||
756 | u8 opcode; | ||
757 | __le16 reserved0; | ||
758 | __le16 echo; | ||
759 | u8 fw_return_code; | ||
760 | u8 flags; | ||
761 | #define EVENT_RING_ENTRY_ASYNC_MASK 0x1 | ||
762 | #define EVENT_RING_ENTRY_ASYNC_SHIFT 0 | ||
763 | #define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F | ||
764 | #define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 | ||
765 | union event_ring_data data; | ||
766 | }; | ||
767 | |||
768 | /* Multi function mode */ | 713 | /* Multi function mode */ |
769 | enum mf_mode { | 714 | enum mf_mode { |
770 | ERROR_MODE /* Unsupported mode */, | 715 | ERROR_MODE /* Unsupported mode */, |
@@ -781,13 +726,31 @@ enum protocol_type { | |||
781 | PROTOCOLID_CORE, | 726 | PROTOCOLID_CORE, |
782 | PROTOCOLID_ETH, | 727 | PROTOCOLID_ETH, |
783 | PROTOCOLID_IWARP, | 728 | PROTOCOLID_IWARP, |
784 | PROTOCOLID_RESERVED5, | 729 | PROTOCOLID_RESERVED0, |
785 | PROTOCOLID_PREROCE, | 730 | PROTOCOLID_PREROCE, |
786 | PROTOCOLID_COMMON, | 731 | PROTOCOLID_COMMON, |
787 | PROTOCOLID_RESERVED6, | 732 | PROTOCOLID_RESERVED1, |
788 | MAX_PROTOCOL_TYPE | 733 | MAX_PROTOCOL_TYPE |
789 | }; | 734 | }; |
790 | 735 | ||
736 | struct regpair { | ||
737 | __le32 lo; | ||
738 | __le32 hi; | ||
739 | }; | ||
740 | |||
741 | /* RoCE Destroy Event Data */ | ||
742 | struct rdma_eqe_destroy_qp { | ||
743 | __le32 cid; | ||
744 | u8 reserved[4]; | ||
745 | }; | ||
746 | |||
747 | /* RDMA Event Data Union */ | ||
748 | union rdma_eqe_data { | ||
749 | struct regpair async_handle; | ||
750 | struct rdma_eqe_destroy_qp rdma_destroy_qp_data; | ||
751 | }; | ||
752 | |||
753 | /* Ustorm Queue Zone */ | ||
791 | struct ustorm_eth_queue_zone { | 754 | struct ustorm_eth_queue_zone { |
792 | struct coalescing_timeset int_coalescing_timeset; | 755 | struct coalescing_timeset int_coalescing_timeset; |
793 | u8 reserved[3]; | 756 | u8 reserved[3]; |
@@ -798,62 +761,71 @@ struct ustorm_queue_zone { | |||
798 | struct common_queue_zone common; | 761 | struct common_queue_zone common; |
799 | }; | 762 | }; |
800 | 763 | ||
801 | /* status block structure */ | 764 | /* Status block structure */ |
802 | struct cau_pi_entry { | 765 | struct cau_pi_entry { |
803 | u32 prod; | 766 | __le32 prod; |
804 | #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF | 767 | #define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF |
805 | #define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 | 768 | #define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 |
806 | #define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F | 769 | #define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F |
807 | #define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 | 770 | #define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 |
808 | #define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 | 771 | #define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 |
809 | #define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 | 772 | #define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 |
810 | #define CAU_PI_ENTRY_RESERVED_MASK 0xFF | 773 | #define CAU_PI_ENTRY_RESERVED_MASK 0xFF |
811 | #define CAU_PI_ENTRY_RESERVED_SHIFT 24 | 774 | #define CAU_PI_ENTRY_RESERVED_SHIFT 24 |
812 | }; | 775 | }; |
813 | 776 | ||
814 | /* status block structure */ | 777 | /* Status block structure */ |
815 | struct cau_sb_entry { | 778 | struct cau_sb_entry { |
816 | u32 data; | 779 | __le32 data; |
817 | #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF | 780 | #define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF |
818 | #define CAU_SB_ENTRY_SB_PROD_SHIFT 0 | 781 | #define CAU_SB_ENTRY_SB_PROD_SHIFT 0 |
819 | #define CAU_SB_ENTRY_STATE0_MASK 0xF | 782 | #define CAU_SB_ENTRY_STATE0_MASK 0xF |
820 | #define CAU_SB_ENTRY_STATE0_SHIFT 24 | 783 | #define CAU_SB_ENTRY_STATE0_SHIFT 24 |
821 | #define CAU_SB_ENTRY_STATE1_MASK 0xF | 784 | #define CAU_SB_ENTRY_STATE1_MASK 0xF |
822 | #define CAU_SB_ENTRY_STATE1_SHIFT 28 | 785 | #define CAU_SB_ENTRY_STATE1_SHIFT 28 |
823 | u32 params; | 786 | __le32 params; |
824 | #define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F | 787 | #define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F |
825 | #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 | 788 | #define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 |
826 | #define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F | 789 | #define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F |
827 | #define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 | 790 | #define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 |
828 | #define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 | 791 | #define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 |
829 | #define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 | 792 | #define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 |
830 | #define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 | 793 | #define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 |
831 | #define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 | 794 | #define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 |
832 | #define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF | 795 | #define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF |
833 | #define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 | 796 | #define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 |
834 | #define CAU_SB_ENTRY_VF_VALID_MASK 0x1 | 797 | #define CAU_SB_ENTRY_VF_VALID_MASK 0x1 |
835 | #define CAU_SB_ENTRY_VF_VALID_SHIFT 26 | 798 | #define CAU_SB_ENTRY_VF_VALID_SHIFT 26 |
836 | #define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF | 799 | #define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF |
837 | #define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 | 800 | #define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 |
838 | #define CAU_SB_ENTRY_TPH_MASK 0x1 | 801 | #define CAU_SB_ENTRY_TPH_MASK 0x1 |
839 | #define CAU_SB_ENTRY_TPH_SHIFT 31 | 802 | #define CAU_SB_ENTRY_TPH_SHIFT 31 |
840 | }; | 803 | }; |
841 | 804 | ||
842 | /* core doorbell data */ | 805 | /* Igu cleanup bit values to distinguish between clean or producer consumer |
806 | * update. | ||
807 | */ | ||
808 | enum command_type_bit { | ||
809 | IGU_COMMAND_TYPE_NOP = 0, | ||
810 | IGU_COMMAND_TYPE_SET = 1, | ||
811 | MAX_COMMAND_TYPE_BIT | ||
812 | }; | ||
813 | |||
814 | /* Core doorbell data */ | ||
843 | struct core_db_data { | 815 | struct core_db_data { |
844 | u8 params; | 816 | u8 params; |
845 | #define CORE_DB_DATA_DEST_MASK 0x3 | 817 | #define CORE_DB_DATA_DEST_MASK 0x3 |
846 | #define CORE_DB_DATA_DEST_SHIFT 0 | 818 | #define CORE_DB_DATA_DEST_SHIFT 0 |
847 | #define CORE_DB_DATA_AGG_CMD_MASK 0x3 | 819 | #define CORE_DB_DATA_AGG_CMD_MASK 0x3 |
848 | #define CORE_DB_DATA_AGG_CMD_SHIFT 2 | 820 | #define CORE_DB_DATA_AGG_CMD_SHIFT 2 |
849 | #define CORE_DB_DATA_BYPASS_EN_MASK 0x1 | 821 | #define CORE_DB_DATA_BYPASS_EN_MASK 0x1 |
850 | #define CORE_DB_DATA_BYPASS_EN_SHIFT 4 | 822 | #define CORE_DB_DATA_BYPASS_EN_SHIFT 4 |
851 | #define CORE_DB_DATA_RESERVED_MASK 0x1 | 823 | #define CORE_DB_DATA_RESERVED_MASK 0x1 |
852 | #define CORE_DB_DATA_RESERVED_SHIFT 5 | 824 | #define CORE_DB_DATA_RESERVED_SHIFT 5 |
853 | #define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 | 825 | #define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 |
854 | #define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 | 826 | #define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 |
855 | u8 agg_flags; | 827 | u8 agg_flags; |
856 | __le16 spq_prod; | 828 | __le16 spq_prod; |
857 | }; | 829 | }; |
858 | 830 | ||
859 | /* Enum of doorbell aggregative command selection */ | 831 | /* Enum of doorbell aggregative command selection */ |
@@ -909,67 +881,69 @@ struct db_l2_dpm_sge { | |||
909 | struct regpair addr; | 881 | struct regpair addr; |
910 | __le16 nbytes; | 882 | __le16 nbytes; |
911 | __le16 bitfields; | 883 | __le16 bitfields; |
912 | #define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF | 884 | #define DB_L2_DPM_SGE_TPH_ST_INDEX_MASK 0x1FF |
913 | #define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 | 885 | #define DB_L2_DPM_SGE_TPH_ST_INDEX_SHIFT 0 |
914 | #define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 | 886 | #define DB_L2_DPM_SGE_RESERVED0_MASK 0x3 |
915 | #define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 | 887 | #define DB_L2_DPM_SGE_RESERVED0_SHIFT 9 |
916 | #define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 | 888 | #define DB_L2_DPM_SGE_ST_VALID_MASK 0x1 |
917 | #define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 | 889 | #define DB_L2_DPM_SGE_ST_VALID_SHIFT 11 |
918 | #define DB_L2_DPM_SGE_RESERVED1_MASK 0xF | 890 | #define DB_L2_DPM_SGE_RESERVED1_MASK 0xF |
919 | #define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 | 891 | #define DB_L2_DPM_SGE_RESERVED1_SHIFT 12 |
920 | __le32 reserved2; | 892 | __le32 reserved2; |
921 | }; | 893 | }; |
922 | 894 | ||
923 | /* Structure for doorbell address, in legacy mode */ | 895 | /* Structure for doorbell address, in legacy mode */ |
924 | struct db_legacy_addr { | 896 | struct db_legacy_addr { |
925 | __le32 addr; | 897 | __le32 addr; |
926 | #define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 | 898 | #define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 |
927 | #define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 | 899 | #define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 |
928 | #define DB_LEGACY_ADDR_DEMS_MASK 0x7 | 900 | #define DB_LEGACY_ADDR_DEMS_MASK 0x7 |
929 | #define DB_LEGACY_ADDR_DEMS_SHIFT 2 | 901 | #define DB_LEGACY_ADDR_DEMS_SHIFT 2 |
930 | #define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF | 902 | #define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF |
931 | #define DB_LEGACY_ADDR_ICID_SHIFT 5 | 903 | #define DB_LEGACY_ADDR_ICID_SHIFT 5 |
932 | }; | 904 | }; |
933 | 905 | ||
934 | /* Structure for doorbell address, in PWM mode */ | 906 | /* Structure for doorbell address, in PWM mode */ |
935 | struct db_pwm_addr { | 907 | struct db_pwm_addr { |
936 | __le32 addr; | 908 | __le32 addr; |
937 | #define DB_PWM_ADDR_RESERVED0_MASK 0x7 | 909 | #define DB_PWM_ADDR_RESERVED0_MASK 0x7 |
938 | #define DB_PWM_ADDR_RESERVED0_SHIFT 0 | 910 | #define DB_PWM_ADDR_RESERVED0_SHIFT 0 |
939 | #define DB_PWM_ADDR_OFFSET_MASK 0x7F | 911 | #define DB_PWM_ADDR_OFFSET_MASK 0x7F |
940 | #define DB_PWM_ADDR_OFFSET_SHIFT 3 | 912 | #define DB_PWM_ADDR_OFFSET_SHIFT 3 |
941 | #define DB_PWM_ADDR_WID_MASK 0x3 | 913 | #define DB_PWM_ADDR_WID_MASK 0x3 |
942 | #define DB_PWM_ADDR_WID_SHIFT 10 | 914 | #define DB_PWM_ADDR_WID_SHIFT 10 |
943 | #define DB_PWM_ADDR_DPI_MASK 0xFFFF | 915 | #define DB_PWM_ADDR_DPI_MASK 0xFFFF |
944 | #define DB_PWM_ADDR_DPI_SHIFT 12 | 916 | #define DB_PWM_ADDR_DPI_SHIFT 12 |
945 | #define DB_PWM_ADDR_RESERVED1_MASK 0xF | 917 | #define DB_PWM_ADDR_RESERVED1_MASK 0xF |
946 | #define DB_PWM_ADDR_RESERVED1_SHIFT 28 | 918 | #define DB_PWM_ADDR_RESERVED1_SHIFT 28 |
947 | }; | 919 | }; |
948 | 920 | ||
949 | /* Parameters to RoCE firmware, passed in EDPM doorbell */ | 921 | /* Parameters to RDMA firmware, passed in EDPM doorbell */ |
950 | struct db_rdma_dpm_params { | 922 | struct db_rdma_dpm_params { |
951 | __le32 params; | 923 | __le32 params; |
952 | #define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F | 924 | #define DB_RDMA_DPM_PARAMS_SIZE_MASK 0x3F |
953 | #define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 | 925 | #define DB_RDMA_DPM_PARAMS_SIZE_SHIFT 0 |
954 | #define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 | 926 | #define DB_RDMA_DPM_PARAMS_DPM_TYPE_MASK 0x3 |
955 | #define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 | 927 | #define DB_RDMA_DPM_PARAMS_DPM_TYPE_SHIFT 6 |
956 | #define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF | 928 | #define DB_RDMA_DPM_PARAMS_OPCODE_MASK 0xFF |
957 | #define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 | 929 | #define DB_RDMA_DPM_PARAMS_OPCODE_SHIFT 8 |
958 | #define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF | 930 | #define DB_RDMA_DPM_PARAMS_WQE_SIZE_MASK 0x7FF |
959 | #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 | 931 | #define DB_RDMA_DPM_PARAMS_WQE_SIZE_SHIFT 16 |
960 | #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 | 932 | #define DB_RDMA_DPM_PARAMS_RESERVED0_MASK 0x1 |
961 | #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 | 933 | #define DB_RDMA_DPM_PARAMS_RESERVED0_SHIFT 27 |
962 | #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 | 934 | #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_MASK 0x1 |
963 | #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 | 935 | #define DB_RDMA_DPM_PARAMS_COMPLETION_FLG_SHIFT 28 |
964 | #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 | 936 | #define DB_RDMA_DPM_PARAMS_S_FLG_MASK 0x1 |
965 | #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 | 937 | #define DB_RDMA_DPM_PARAMS_S_FLG_SHIFT 29 |
966 | #define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 | 938 | #define DB_RDMA_DPM_PARAMS_RESERVED1_MASK 0x1 |
967 | #define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 | 939 | #define DB_RDMA_DPM_PARAMS_RESERVED1_SHIFT 30 |
968 | #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 | 940 | #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_MASK 0x1 |
969 | #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 | 941 | #define DB_RDMA_DPM_PARAMS_CONN_TYPE_IS_IWARP_SHIFT 31 |
970 | }; | 942 | }; |
971 | 943 | ||
972 | /* Structure for doorbell data, in ROCE DPM mode, for 1st db in a DPM burst */ | 944 | /* Structure for doorbell data, in RDMA DPM mode, for the first doorbell in a |
945 | * DPM burst. | ||
946 | */ | ||
973 | struct db_rdma_dpm_data { | 947 | struct db_rdma_dpm_data { |
974 | __le16 icid; | 948 | __le16 icid; |
975 | __le16 prod_val; | 949 | __le16 prod_val; |
@@ -987,22 +961,22 @@ enum igu_int_cmd { | |||
987 | 961 | ||
988 | /* IGU producer or consumer update command */ | 962 | /* IGU producer or consumer update command */ |
989 | struct igu_prod_cons_update { | 963 | struct igu_prod_cons_update { |
990 | u32 sb_id_and_flags; | 964 | __le32 sb_id_and_flags; |
991 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF | 965 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF |
992 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 | 966 | #define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 |
993 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 | 967 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 |
994 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 | 968 | #define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 |
995 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 | 969 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 |
996 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 | 970 | #define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 |
997 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 | 971 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 |
998 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 | 972 | #define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 |
999 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 | 973 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 |
1000 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 | 974 | #define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 |
1001 | #define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 | 975 | #define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 |
1002 | #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 | 976 | #define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 |
1003 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 | 977 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 |
1004 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 | 978 | #define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 |
1005 | u32 reserved1; | 979 | __le32 reserved1; |
1006 | }; | 980 | }; |
1007 | 981 | ||
1008 | /* Igu segments access for default status block only */ | 982 | /* Igu segments access for default status block only */ |
@@ -1012,38 +986,63 @@ enum igu_seg_access { | |||
1012 | MAX_IGU_SEG_ACCESS | 986 | MAX_IGU_SEG_ACCESS |
1013 | }; | 987 | }; |
1014 | 988 | ||
989 | /* Enumeration for L3 type field of parsing_and_err_flags. | ||
990 | * L3Type: 0 - unknown (not ip), 1 - Ipv4, 2 - Ipv6 | ||
991 | * (This field can be filled according to the last-ethertype) | ||
992 | */ | ||
993 | enum l3_type { | ||
994 | e_l3_type_unknown, | ||
995 | e_l3_type_ipv4, | ||
996 | e_l3_type_ipv6, | ||
997 | MAX_L3_TYPE | ||
998 | }; | ||
999 | |||
1000 | /* Enumeration for l4Protocol field of parsing_and_err_flags. | ||
1001 | * L4-protocol: 0 - none, 1 - TCP, 2 - UDP. | ||
1002 | * If the packet is IPv4 fragment, and its not the first fragment, the | ||
1003 | * protocol-type should be set to none. | ||
1004 | */ | ||
1005 | enum l4_protocol { | ||
1006 | e_l4_protocol_none, | ||
1007 | e_l4_protocol_tcp, | ||
1008 | e_l4_protocol_udp, | ||
1009 | MAX_L4_PROTOCOL | ||
1010 | }; | ||
1011 | |||
1012 | /* Parsing and error flags field */ | ||
1015 | struct parsing_and_err_flags { | 1013 | struct parsing_and_err_flags { |
1016 | __le16 flags; | 1014 | __le16 flags; |
1017 | #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 | 1015 | #define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 |
1018 | #define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 | 1016 | #define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 |
1019 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 | 1017 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 |
1020 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 | 1018 | #define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 |
1021 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 | 1019 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 |
1022 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 | 1020 | #define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 |
1023 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 | 1021 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 |
1024 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 | 1022 | #define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 |
1025 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 | 1023 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 |
1026 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 | 1024 | #define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 |
1027 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 | 1025 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 |
1028 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 | 1026 | #define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 |
1029 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 | 1027 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 |
1030 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 | 1028 | #define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 |
1031 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 | 1029 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 |
1032 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 | 1030 | #define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 |
1033 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 | 1031 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 |
1034 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 | 1032 | #define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 |
1035 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 | 1033 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 |
1036 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 | 1034 | #define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 |
1037 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 | 1035 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 |
1038 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 | 1036 | #define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 |
1039 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 | 1037 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 |
1040 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 | 1038 | #define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 |
1041 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 | 1039 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 |
1042 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 | 1040 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 |
1043 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 | 1041 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 |
1044 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 | 1042 | #define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 |
1045 | }; | 1043 | }; |
1046 | 1044 | ||
1045 | /* Parsing error flags bitmap */ | ||
1047 | struct parsing_err_flags { | 1046 | struct parsing_err_flags { |
1048 | __le16 flags; | 1047 | __le16 flags; |
1049 | #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 | 1048 | #define PARSING_ERR_FLAGS_MAC_ERROR_MASK 0x1 |
@@ -1080,266 +1079,260 @@ struct parsing_err_flags { | |||
1080 | #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 | 1079 | #define PARSING_ERR_FLAGS_TUNNEL_L4_CHKSM_ERROR_SHIFT 15 |
1081 | }; | 1080 | }; |
1082 | 1081 | ||
1082 | /* Pb context */ | ||
1083 | struct pb_context { | 1083 | struct pb_context { |
1084 | __le32 crc[4]; | 1084 | __le32 crc[4]; |
1085 | }; | 1085 | }; |
1086 | 1086 | ||
1087 | /* Concrete Function ID */ | ||
1087 | struct pxp_concrete_fid { | 1088 | struct pxp_concrete_fid { |
1088 | __le16 fid; | 1089 | __le16 fid; |
1089 | #define PXP_CONCRETE_FID_PFID_MASK 0xF | 1090 | #define PXP_CONCRETE_FID_PFID_MASK 0xF |
1090 | #define PXP_CONCRETE_FID_PFID_SHIFT 0 | 1091 | #define PXP_CONCRETE_FID_PFID_SHIFT 0 |
1091 | #define PXP_CONCRETE_FID_PORT_MASK 0x3 | 1092 | #define PXP_CONCRETE_FID_PORT_MASK 0x3 |
1092 | #define PXP_CONCRETE_FID_PORT_SHIFT 4 | 1093 | #define PXP_CONCRETE_FID_PORT_SHIFT 4 |
1093 | #define PXP_CONCRETE_FID_PATH_MASK 0x1 | 1094 | #define PXP_CONCRETE_FID_PATH_MASK 0x1 |
1094 | #define PXP_CONCRETE_FID_PATH_SHIFT 6 | 1095 | #define PXP_CONCRETE_FID_PATH_SHIFT 6 |
1095 | #define PXP_CONCRETE_FID_VFVALID_MASK 0x1 | 1096 | #define PXP_CONCRETE_FID_VFVALID_MASK 0x1 |
1096 | #define PXP_CONCRETE_FID_VFVALID_SHIFT 7 | 1097 | #define PXP_CONCRETE_FID_VFVALID_SHIFT 7 |
1097 | #define PXP_CONCRETE_FID_VFID_MASK 0xFF | 1098 | #define PXP_CONCRETE_FID_VFID_MASK 0xFF |
1098 | #define PXP_CONCRETE_FID_VFID_SHIFT 8 | 1099 | #define PXP_CONCRETE_FID_VFID_SHIFT 8 |
1099 | }; | 1100 | }; |
1100 | 1101 | ||
1102 | /* Concrete Function ID */ | ||
1101 | struct pxp_pretend_concrete_fid { | 1103 | struct pxp_pretend_concrete_fid { |
1102 | __le16 fid; | 1104 | __le16 fid; |
1103 | #define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF | 1105 | #define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF |
1104 | #define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 | 1106 | #define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 |
1105 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 | 1107 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 |
1106 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 | 1108 | #define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 |
1107 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 | 1109 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 |
1108 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 | 1110 | #define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 |
1109 | #define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF | 1111 | #define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF |
1110 | #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 | 1112 | #define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 |
1111 | }; | 1113 | }; |
1112 | 1114 | ||
1115 | /* Function ID */ | ||
1113 | union pxp_pretend_fid { | 1116 | union pxp_pretend_fid { |
1114 | struct pxp_pretend_concrete_fid concrete_fid; | 1117 | struct pxp_pretend_concrete_fid concrete_fid; |
1115 | __le16 opaque_fid; | 1118 | __le16 opaque_fid; |
1116 | }; | 1119 | }; |
1117 | 1120 | ||
1118 | /* Pxp Pretend Command Register. */ | 1121 | /* Pxp Pretend Command Register */ |
1119 | struct pxp_pretend_cmd { | 1122 | struct pxp_pretend_cmd { |
1120 | union pxp_pretend_fid fid; | 1123 | union pxp_pretend_fid fid; |
1121 | __le16 control; | 1124 | __le16 control; |
1122 | #define PXP_PRETEND_CMD_PATH_MASK 0x1 | 1125 | #define PXP_PRETEND_CMD_PATH_MASK 0x1 |
1123 | #define PXP_PRETEND_CMD_PATH_SHIFT 0 | 1126 | #define PXP_PRETEND_CMD_PATH_SHIFT 0 |
1124 | #define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 | 1127 | #define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 |
1125 | #define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 | 1128 | #define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 |
1126 | #define PXP_PRETEND_CMD_PORT_MASK 0x3 | 1129 | #define PXP_PRETEND_CMD_PORT_MASK 0x3 |
1127 | #define PXP_PRETEND_CMD_PORT_SHIFT 2 | 1130 | #define PXP_PRETEND_CMD_PORT_SHIFT 2 |
1128 | #define PXP_PRETEND_CMD_RESERVED0_MASK 0xF | 1131 | #define PXP_PRETEND_CMD_RESERVED0_MASK 0xF |
1129 | #define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 | 1132 | #define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 |
1130 | #define PXP_PRETEND_CMD_RESERVED1_MASK 0xF | 1133 | #define PXP_PRETEND_CMD_RESERVED1_MASK 0xF |
1131 | #define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 | 1134 | #define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 |
1132 | #define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 | 1135 | #define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 |
1133 | #define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 | 1136 | #define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 |
1134 | #define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 | 1137 | #define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 |
1135 | #define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 | 1138 | #define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 |
1136 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 | 1139 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 |
1137 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 | 1140 | #define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 |
1138 | #define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 | 1141 | #define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 |
1139 | #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 | 1142 | #define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 |
1140 | }; | 1143 | }; |
1141 | 1144 | ||
1142 | /* PTT Record in PXP Admin Window. */ | 1145 | /* PTT Record in PXP Admin Window */ |
1143 | struct pxp_ptt_entry { | 1146 | struct pxp_ptt_entry { |
1144 | __le32 offset; | 1147 | __le32 offset; |
1145 | #define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF | 1148 | #define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF |
1146 | #define PXP_PTT_ENTRY_OFFSET_SHIFT 0 | 1149 | #define PXP_PTT_ENTRY_OFFSET_SHIFT 0 |
1147 | #define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF | 1150 | #define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF |
1148 | #define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 | 1151 | #define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 |
1149 | struct pxp_pretend_cmd pretend; | 1152 | struct pxp_pretend_cmd pretend; |
1150 | }; | 1153 | }; |
1151 | 1154 | ||
1152 | /* VF Zone A Permission Register. */ | 1155 | /* VF Zone A Permission Register */ |
1153 | struct pxp_vf_zone_a_permission { | 1156 | struct pxp_vf_zone_a_permission { |
1154 | __le32 control; | 1157 | __le32 control; |
1155 | #define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF | 1158 | #define PXP_VF_ZONE_A_PERMISSION_VFID_MASK 0xFF |
1156 | #define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 | 1159 | #define PXP_VF_ZONE_A_PERMISSION_VFID_SHIFT 0 |
1157 | #define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 | 1160 | #define PXP_VF_ZONE_A_PERMISSION_VALID_MASK 0x1 |
1158 | #define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 | 1161 | #define PXP_VF_ZONE_A_PERMISSION_VALID_SHIFT 8 |
1159 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F | 1162 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_MASK 0x7F |
1160 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 | 1163 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED0_SHIFT 9 |
1161 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF | 1164 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_MASK 0xFFFF |
1162 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 | 1165 | #define PXP_VF_ZONE_A_PERMISSION_RESERVED1_SHIFT 16 |
1163 | }; | 1166 | }; |
1164 | 1167 | ||
1165 | /* RSS hash type */ | 1168 | /* Rdif context */ |
1166 | struct rdif_task_context { | 1169 | struct rdif_task_context { |
1167 | __le32 initial_ref_tag; | 1170 | __le32 initial_ref_tag; |
1168 | __le16 app_tag_value; | 1171 | __le16 app_tag_value; |
1169 | __le16 app_tag_mask; | 1172 | __le16 app_tag_mask; |
1170 | u8 flags0; | 1173 | u8 flags0; |
1171 | #define RDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 | 1174 | #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 |
1172 | #define RDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 | 1175 | #define RDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 |
1173 | #define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 | 1176 | #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 |
1174 | #define RDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 | 1177 | #define RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 |
1175 | #define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 | 1178 | #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 |
1176 | #define RDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 | 1179 | #define RDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 |
1177 | #define RDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 | 1180 | #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 |
1178 | #define RDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 | 1181 | #define RDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 |
1179 | #define RDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 | 1182 | #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 |
1180 | #define RDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 | 1183 | #define RDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 |
1181 | #define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 | 1184 | #define RDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 |
1182 | #define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 | 1185 | #define RDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 |
1183 | #define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 | 1186 | #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 |
1184 | #define RDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 7 | 1187 | #define RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 7 |
1185 | u8 partial_dif_data[7]; | 1188 | u8 partial_dif_data[7]; |
1186 | __le16 partial_crc_value; | 1189 | __le16 partial_crc_value; |
1187 | __le16 partial_checksum_value; | 1190 | __le16 partial_checksum_value; |
1188 | __le32 offset_in_io; | 1191 | __le32 offset_in_io; |
1189 | __le16 flags1; | 1192 | __le16 flags1; |
1190 | #define RDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 | 1193 | #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 |
1191 | #define RDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 | 1194 | #define RDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 |
1192 | #define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 | 1195 | #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 |
1193 | #define RDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 | 1196 | #define RDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 |
1194 | #define RDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 | 1197 | #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 |
1195 | #define RDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 | 1198 | #define RDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 |
1196 | #define RDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 | 1199 | #define RDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 |
1197 | #define RDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 | 1200 | #define RDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 |
1198 | #define RDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 | 1201 | #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 |
1199 | #define RDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 | 1202 | #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 |
1200 | #define RDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 | 1203 | #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 |
1201 | #define RDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 | 1204 | #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 |
1202 | #define RDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 | 1205 | #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 |
1203 | #define RDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 | 1206 | #define RDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 |
1204 | #define RDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 | 1207 | #define RDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 |
1205 | #define RDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 | 1208 | #define RDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 |
1206 | #define RDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 | 1209 | #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 |
1207 | #define RDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 | 1210 | #define RDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 |
1208 | #define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 | 1211 | #define RDIF_TASK_CONTEXT_RESERVED0_MASK 0x1 |
1209 | #define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 | 1212 | #define RDIF_TASK_CONTEXT_RESERVED0_SHIFT 12 |
1210 | #define RDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 | 1213 | #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 |
1211 | #define RDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 | 1214 | #define RDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 |
1212 | #define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 | 1215 | #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 |
1213 | #define RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 14 | 1216 | #define RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 |
1214 | #define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 | 1217 | #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 |
1215 | #define RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 15 | 1218 | #define RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 |
1216 | __le16 state; | 1219 | __le16 state; |
1217 | #define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_MASK 0xF | 1220 | #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_MASK 0xF |
1218 | #define RDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFT_SHIFT 0 | 1221 | #define RDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_SHIFT 0 |
1219 | #define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_MASK 0xF | 1222 | #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_MASK 0xF |
1220 | #define RDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFT_SHIFT 4 | 1223 | #define RDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_SHIFT 4 |
1221 | #define RDIF_TASK_CONTEXT_ERRORINIO_MASK 0x1 | 1224 | #define RDIF_TASK_CONTEXT_ERROR_IN_IO_MASK 0x1 |
1222 | #define RDIF_TASK_CONTEXT_ERRORINIO_SHIFT 8 | 1225 | #define RDIF_TASK_CONTEXT_ERROR_IN_IO_SHIFT 8 |
1223 | #define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 | 1226 | #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_MASK 0x1 |
1224 | #define RDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 | 1227 | #define RDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_SHIFT 9 |
1225 | #define RDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF | 1228 | #define RDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF |
1226 | #define RDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 10 | 1229 | #define RDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 10 |
1227 | #define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 | 1230 | #define RDIF_TASK_CONTEXT_RESERVED1_MASK 0x3 |
1228 | #define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 | 1231 | #define RDIF_TASK_CONTEXT_RESERVED1_SHIFT 14 |
1229 | __le32 reserved2; | 1232 | __le32 reserved2; |
1230 | }; | 1233 | }; |
1231 | 1234 | ||
1232 | /* RSS hash type */ | 1235 | /* Status block structure */ |
1233 | enum rss_hash_type { | 1236 | struct status_block_e4 { |
1234 | RSS_HASH_TYPE_DEFAULT = 0, | 1237 | __le16 pi_array[PIS_PER_SB_E4]; |
1235 | RSS_HASH_TYPE_IPV4 = 1, | ||
1236 | RSS_HASH_TYPE_TCP_IPV4 = 2, | ||
1237 | RSS_HASH_TYPE_IPV6 = 3, | ||
1238 | RSS_HASH_TYPE_TCP_IPV6 = 4, | ||
1239 | RSS_HASH_TYPE_UDP_IPV4 = 5, | ||
1240 | RSS_HASH_TYPE_UDP_IPV6 = 6, | ||
1241 | MAX_RSS_HASH_TYPE | ||
1242 | }; | ||
1243 | |||
1244 | /* status block structure */ | ||
1245 | struct status_block { | ||
1246 | __le16 pi_array[PIS_PER_SB]; | ||
1247 | __le32 sb_num; | 1238 | __le32 sb_num; |
1248 | #define STATUS_BLOCK_SB_NUM_MASK 0x1FF | 1239 | #define STATUS_BLOCK_E4_SB_NUM_MASK 0x1FF |
1249 | #define STATUS_BLOCK_SB_NUM_SHIFT 0 | 1240 | #define STATUS_BLOCK_E4_SB_NUM_SHIFT 0 |
1250 | #define STATUS_BLOCK_ZERO_PAD_MASK 0x7F | 1241 | #define STATUS_BLOCK_E4_ZERO_PAD_MASK 0x7F |
1251 | #define STATUS_BLOCK_ZERO_PAD_SHIFT 9 | 1242 | #define STATUS_BLOCK_E4_ZERO_PAD_SHIFT 9 |
1252 | #define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF | 1243 | #define STATUS_BLOCK_E4_ZERO_PAD2_MASK 0xFFFF |
1253 | #define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 | 1244 | #define STATUS_BLOCK_E4_ZERO_PAD2_SHIFT 16 |
1254 | __le32 prod_index; | 1245 | __le32 prod_index; |
1255 | #define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF | 1246 | #define STATUS_BLOCK_E4_PROD_INDEX_MASK 0xFFFFFF |
1256 | #define STATUS_BLOCK_PROD_INDEX_SHIFT 0 | 1247 | #define STATUS_BLOCK_E4_PROD_INDEX_SHIFT 0 |
1257 | #define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF | 1248 | #define STATUS_BLOCK_E4_ZERO_PAD3_MASK 0xFF |
1258 | #define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 | 1249 | #define STATUS_BLOCK_E4_ZERO_PAD3_SHIFT 24 |
1259 | }; | 1250 | }; |
1260 | 1251 | ||
1252 | /* Tdif context */ | ||
1261 | struct tdif_task_context { | 1253 | struct tdif_task_context { |
1262 | __le32 initial_ref_tag; | 1254 | __le32 initial_ref_tag; |
1263 | __le16 app_tag_value; | 1255 | __le16 app_tag_value; |
1264 | __le16 app_tag_mask; | 1256 | __le16 app_tag_mask; |
1265 | __le16 partial_crc_valueB; | 1257 | __le16 partial_crc_value_b; |
1266 | __le16 partial_checksum_valueB; | 1258 | __le16 partial_checksum_value_b; |
1267 | __le16 stateB; | 1259 | __le16 stateB; |
1268 | #define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_MASK 0xF | 1260 | #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_MASK 0xF |
1269 | #define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTB_SHIFT 0 | 1261 | #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_B_SHIFT 0 |
1270 | #define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_MASK 0xF | 1262 | #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_MASK 0xF |
1271 | #define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTB_SHIFT 4 | 1263 | #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_B_SHIFT 4 |
1272 | #define TDIF_TASK_CONTEXT_ERRORINIOB_MASK 0x1 | 1264 | #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_MASK 0x1 |
1273 | #define TDIF_TASK_CONTEXT_ERRORINIOB_SHIFT 8 | 1265 | #define TDIF_TASK_CONTEXT_ERROR_IN_IO_B_SHIFT 8 |
1274 | #define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_MASK 0x1 | 1266 | #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_MASK 0x1 |
1275 | #define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOW_SHIFT 9 | 1267 | #define TDIF_TASK_CONTEXT_CHECKSUM_VERFLOW_SHIFT 9 |
1276 | #define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F | 1268 | #define TDIF_TASK_CONTEXT_RESERVED0_MASK 0x3F |
1277 | #define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 | 1269 | #define TDIF_TASK_CONTEXT_RESERVED0_SHIFT 10 |
1278 | u8 reserved1; | 1270 | u8 reserved1; |
1279 | u8 flags0; | 1271 | u8 flags0; |
1280 | #define TDIF_TASK_CONTEXT_IGNOREAPPTAG_MASK 0x1 | 1272 | #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_MASK 0x1 |
1281 | #define TDIF_TASK_CONTEXT_IGNOREAPPTAG_SHIFT 0 | 1273 | #define TDIF_TASK_CONTEXT_IGNORE_APP_TAG_SHIFT 0 |
1282 | #define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_MASK 0x1 | 1274 | #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_MASK 0x1 |
1283 | #define TDIF_TASK_CONTEXT_INITIALREFTAGVALID_SHIFT 1 | 1275 | #define TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID_SHIFT 1 |
1284 | #define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_MASK 0x1 | 1276 | #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_MASK 0x1 |
1285 | #define TDIF_TASK_CONTEXT_HOSTGUARDTYPE_SHIFT 2 | 1277 | #define TDIF_TASK_CONTEXT_HOST_GUARD_TYPE_SHIFT 2 |
1286 | #define TDIF_TASK_CONTEXT_SETERRORWITHEOP_MASK 0x1 | 1278 | #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_MASK 0x1 |
1287 | #define TDIF_TASK_CONTEXT_SETERRORWITHEOP_SHIFT 3 | 1279 | #define TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP_SHIFT 3 |
1288 | #define TDIF_TASK_CONTEXT_PROTECTIONTYPE_MASK 0x3 | 1280 | #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_MASK 0x3 |
1289 | #define TDIF_TASK_CONTEXT_PROTECTIONTYPE_SHIFT 4 | 1281 | #define TDIF_TASK_CONTEXT_PROTECTION_TYPE_SHIFT 4 |
1290 | #define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 | 1282 | #define TDIF_TASK_CONTEXT_CRC_SEED_MASK 0x1 |
1291 | #define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 | 1283 | #define TDIF_TASK_CONTEXT_CRC_SEED_SHIFT 6 |
1292 | #define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 | 1284 | #define TDIF_TASK_CONTEXT_RESERVED2_MASK 0x1 |
1293 | #define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 | 1285 | #define TDIF_TASK_CONTEXT_RESERVED2_SHIFT 7 |
1294 | __le32 flags1; | 1286 | __le32 flags1; |
1295 | #define TDIF_TASK_CONTEXT_VALIDATEGUARD_MASK 0x1 | 1287 | #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_MASK 0x1 |
1296 | #define TDIF_TASK_CONTEXT_VALIDATEGUARD_SHIFT 0 | 1288 | #define TDIF_TASK_CONTEXT_VALIDATE_GUARD_SHIFT 0 |
1297 | #define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_MASK 0x1 | 1289 | #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_MASK 0x1 |
1298 | #define TDIF_TASK_CONTEXT_VALIDATEAPPTAG_SHIFT 1 | 1290 | #define TDIF_TASK_CONTEXT_VALIDATE_APP_TAG_SHIFT 1 |
1299 | #define TDIF_TASK_CONTEXT_VALIDATEREFTAG_MASK 0x1 | 1291 | #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_MASK 0x1 |
1300 | #define TDIF_TASK_CONTEXT_VALIDATEREFTAG_SHIFT 2 | 1292 | #define TDIF_TASK_CONTEXT_VALIDATE_REF_TAG_SHIFT 2 |
1301 | #define TDIF_TASK_CONTEXT_FORWARDGUARD_MASK 0x1 | 1293 | #define TDIF_TASK_CONTEXT_FORWARD_GUARD_MASK 0x1 |
1302 | #define TDIF_TASK_CONTEXT_FORWARDGUARD_SHIFT 3 | 1294 | #define TDIF_TASK_CONTEXT_FORWARD_GUARD_SHIFT 3 |
1303 | #define TDIF_TASK_CONTEXT_FORWARDAPPTAG_MASK 0x1 | 1295 | #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_MASK 0x1 |
1304 | #define TDIF_TASK_CONTEXT_FORWARDAPPTAG_SHIFT 4 | 1296 | #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_SHIFT 4 |
1305 | #define TDIF_TASK_CONTEXT_FORWARDREFTAG_MASK 0x1 | 1297 | #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_MASK 0x1 |
1306 | #define TDIF_TASK_CONTEXT_FORWARDREFTAG_SHIFT 5 | 1298 | #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_SHIFT 5 |
1307 | #define TDIF_TASK_CONTEXT_INTERVALSIZE_MASK 0x7 | 1299 | #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_MASK 0x7 |
1308 | #define TDIF_TASK_CONTEXT_INTERVALSIZE_SHIFT 6 | 1300 | #define TDIF_TASK_CONTEXT_INTERVAL_SIZE_SHIFT 6 |
1309 | #define TDIF_TASK_CONTEXT_HOSTINTERFACE_MASK 0x3 | 1301 | #define TDIF_TASK_CONTEXT_HOST_INTERFACE_MASK 0x3 |
1310 | #define TDIF_TASK_CONTEXT_HOSTINTERFACE_SHIFT 9 | 1302 | #define TDIF_TASK_CONTEXT_HOST_INTERFACE_SHIFT 9 |
1311 | #define TDIF_TASK_CONTEXT_DIFBEFOREDATA_MASK 0x1 | 1303 | #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_MASK 0x1 |
1312 | #define TDIF_TASK_CONTEXT_DIFBEFOREDATA_SHIFT 11 | 1304 | #define TDIF_TASK_CONTEXT_DIF_BEFORE_DATA_SHIFT 11 |
1313 | #define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 | 1305 | #define TDIF_TASK_CONTEXT_RESERVED3_MASK 0x1 |
1314 | #define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 | 1306 | #define TDIF_TASK_CONTEXT_RESERVED3_SHIFT 12 |
1315 | #define TDIF_TASK_CONTEXT_NETWORKINTERFACE_MASK 0x1 | 1307 | #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_MASK 0x1 |
1316 | #define TDIF_TASK_CONTEXT_NETWORKINTERFACE_SHIFT 13 | 1308 | #define TDIF_TASK_CONTEXT_NETWORK_INTERFACE_SHIFT 13 |
1317 | #define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_MASK 0xF | 1309 | #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_MASK 0xF |
1318 | #define TDIF_TASK_CONTEXT_RECEIVEDDIFBYTESLEFTA_SHIFT 14 | 1310 | #define TDIF_TASK_CONTEXT_RECEIVED_DIF_BYTES_LEFT_A_SHIFT 14 |
1319 | #define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_MASK 0xF | 1311 | #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_MASK 0xF |
1320 | #define TDIF_TASK_CONTEXT_TRANSMITEDDIFBYTESLEFTA_SHIFT 18 | 1312 | #define TDIF_TASK_CONTEXT_TRANSMITED_DIF_BYTES_LEFT_A_SHIFT 18 |
1321 | #define TDIF_TASK_CONTEXT_ERRORINIOA_MASK 0x1 | 1313 | #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_MASK 0x1 |
1322 | #define TDIF_TASK_CONTEXT_ERRORINIOA_SHIFT 22 | 1314 | #define TDIF_TASK_CONTEXT_ERROR_IN_IO_A_SHIFT 22 |
1323 | #define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_MASK 0x1 | 1315 | #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_MASK 0x1 |
1324 | #define TDIF_TASK_CONTEXT_CHECKSUMOVERFLOWA_SHIFT 23 | 1316 | #define TDIF_TASK_CONTEXT_CHECKSUM_OVERFLOW_A_SHIFT 23 |
1325 | #define TDIF_TASK_CONTEXT_REFTAGMASK_MASK 0xF | 1317 | #define TDIF_TASK_CONTEXT_REF_TAG_MASK_MASK 0xF |
1326 | #define TDIF_TASK_CONTEXT_REFTAGMASK_SHIFT 24 | 1318 | #define TDIF_TASK_CONTEXT_REF_TAG_MASK_SHIFT 24 |
1327 | #define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_MASK 0x1 | 1319 | #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 |
1328 | #define TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK_SHIFT 28 | 1320 | #define TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK_SHIFT 28 |
1329 | #define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_MASK 0x1 | 1321 | #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 |
1330 | #define TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK_SHIFT 29 | 1322 | #define TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK_SHIFT 29 |
1331 | #define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_MASK 0x1 | 1323 | #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_MASK 0x1 |
1332 | #define TDIF_TASK_CONTEXT_KEEPREFTAGCONST_SHIFT 30 | 1324 | #define TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST_SHIFT 30 |
1333 | #define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 | 1325 | #define TDIF_TASK_CONTEXT_RESERVED4_MASK 0x1 |
1334 | #define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 | 1326 | #define TDIF_TASK_CONTEXT_RESERVED4_SHIFT 31 |
1335 | __le32 offset_in_iob; | 1327 | __le32 offset_in_io_b; |
1336 | __le16 partial_crc_value_a; | 1328 | __le16 partial_crc_value_a; |
1337 | __le16 partial_checksum_valuea_; | 1329 | __le16 partial_checksum_value_a; |
1338 | __le32 offset_in_ioa; | 1330 | __le32 offset_in_io_a; |
1339 | u8 partial_dif_data_a[8]; | 1331 | u8 partial_dif_data_a[8]; |
1340 | u8 partial_dif_data_b[8]; | 1332 | u8 partial_dif_data_b[8]; |
1341 | }; | 1333 | }; |
1342 | 1334 | ||
1335 | /* Timers context */ | ||
1343 | struct timers_context { | 1336 | struct timers_context { |
1344 | __le32 logical_client_0; | 1337 | __le32 logical_client_0; |
1345 | #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF | 1338 | #define TIMERS_CONTEXT_EXPIRATIONTIMELC0_MASK 0x7FFFFFF |
@@ -1385,6 +1378,7 @@ struct timers_context { | |||
1385 | #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 | 1378 | #define TIMERS_CONTEXT_RESERVED7_SHIFT 29 |
1386 | }; | 1379 | }; |
1387 | 1380 | ||
1381 | /* Enum for next_protocol field of tunnel_parsing_flags / tunnelTypeDesc */ | ||
1388 | enum tunnel_next_protocol { | 1382 | enum tunnel_next_protocol { |
1389 | e_unknown = 0, | 1383 | e_unknown = 0, |
1390 | e_l2 = 1, | 1384 | e_l2 = 1, |
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h index cb06e6e368e1..9db02856623b 100644 --- a/include/linux/qed/eth_common.h +++ b/include/linux/qed/eth_common.h | |||
@@ -36,150 +36,168 @@ | |||
36 | /********************/ | 36 | /********************/ |
37 | /* ETH FW CONSTANTS */ | 37 | /* ETH FW CONSTANTS */ |
38 | /********************/ | 38 | /********************/ |
39 | #define ETH_HSI_VER_MAJOR 3 | 39 | |
40 | #define ETH_HSI_VER_MINOR 10 | 40 | #define ETH_HSI_VER_MAJOR 3 |
41 | #define ETH_HSI_VER_MINOR 10 | ||
41 | 42 | ||
42 | #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 | 43 | #define ETH_HSI_VER_NO_PKT_LEN_TUNN 5 |
43 | 44 | ||
44 | #define ETH_CACHE_LINE_SIZE 64 | 45 | #define ETH_CACHE_LINE_SIZE 64 |
45 | #define ETH_RX_CQE_GAP 32 | 46 | #define ETH_RX_CQE_GAP 32 |
46 | #define ETH_MAX_RAMROD_PER_CON 8 | 47 | #define ETH_MAX_RAMROD_PER_CON 8 |
47 | #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 | 48 | #define ETH_TX_BD_PAGE_SIZE_BYTES 4096 |
48 | #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 | 49 | #define ETH_RX_BD_PAGE_SIZE_BYTES 4096 |
49 | #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 | 50 | #define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 |
50 | #define ETH_RX_NUM_NEXT_PAGE_BDS 2 | 51 | #define ETH_RX_NUM_NEXT_PAGE_BDS 2 |
51 | 52 | ||
52 | #define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 | 53 | #define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253 |
53 | #define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 | 54 | #define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251 |
54 | 55 | ||
55 | #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 | 56 | #define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 |
56 | #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 | 57 | #define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 |
57 | #define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 | 58 | #define ETH_TX_MAX_BDS_PER_LSO_PACKET 255 |
58 | #define ETH_TX_MAX_LSO_HDR_NBD 4 | 59 | #define ETH_TX_MAX_LSO_HDR_NBD 4 |
59 | #define ETH_TX_MIN_BDS_PER_LSO_PKT 3 | 60 | #define ETH_TX_MIN_BDS_PER_LSO_PKT 3 |
60 | #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 | 61 | #define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 |
61 | #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 | 62 | #define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 |
62 | #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 | 63 | #define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 |
63 | #define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) | 64 | #define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8)) |
64 | #define ETH_TX_MAX_LSO_HDR_BYTES 510 | 65 | #define ETH_TX_MAX_LSO_HDR_BYTES 510 |
65 | #define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) | 66 | #define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1) |
66 | #define ETH_TX_LSO_WINDOW_MIN_LEN 9700 | 67 | #define ETH_TX_LSO_WINDOW_MIN_LEN 9700 |
67 | #define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 | 68 | #define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFE000 |
68 | #define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 | 69 | #define ETH_TX_NUM_SAME_AS_LAST_ENTRIES 320 |
69 | #define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF | 70 | #define ETH_TX_INACTIVE_SAME_AS_LAST 0xFFFF |
70 | 71 | ||
71 | #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS | 72 | #define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS |
72 | #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ | 73 | #define ETH_NUM_STATISTIC_COUNTERS_DOUBLE_VF_ZONE \ |
73 | (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) | 74 | (ETH_NUM_STATISTIC_COUNTERS - MAX_NUM_VFS / 2) |
74 | #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ | 75 | #define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \ |
75 | (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) | 76 | (ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4) |
76 | 77 | ||
77 | /* Maximum number of buffers, used for RX packet placement */ | 78 | /* Maximum number of buffers, used for RX packet placement */ |
78 | #define ETH_RX_MAX_BUFF_PER_PKT 5 | 79 | #define ETH_RX_MAX_BUFF_PER_PKT 5 |
79 | #define ETH_RX_BD_THRESHOLD 12 | 80 | #define ETH_RX_BD_THRESHOLD 12 |
80 | 81 | ||
81 | /* num of MAC/VLAN filters */ | 82 | /* Num of MAC/VLAN filters */ |
82 | #define ETH_NUM_MAC_FILTERS 512 | 83 | #define ETH_NUM_MAC_FILTERS 512 |
83 | #define ETH_NUM_VLAN_FILTERS 512 | 84 | #define ETH_NUM_VLAN_FILTERS 512 |
84 | 85 | ||
85 | /* approx. multicast constants */ | 86 | /* Approx. multicast constants */ |
86 | #define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 | 87 | #define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 |
87 | #define ETH_MULTICAST_MAC_BINS 256 | 88 | #define ETH_MULTICAST_MAC_BINS 256 |
88 | #define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) | 89 | #define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) |
89 | 90 | ||
90 | /* ethernet vport update constants */ | 91 | /* Ethernet vport update constants */ |
91 | #define ETH_FILTER_RULES_COUNT 10 | 92 | #define ETH_FILTER_RULES_COUNT 10 |
92 | #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 | 93 | #define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 |
93 | #define ETH_RSS_KEY_SIZE_REGS 10 | 94 | #define ETH_RSS_KEY_SIZE_REGS 10 |
94 | #define ETH_RSS_ENGINE_NUM_K2 207 | 95 | #define ETH_RSS_ENGINE_NUM_K2 207 |
95 | #define ETH_RSS_ENGINE_NUM_BB 127 | 96 | #define ETH_RSS_ENGINE_NUM_BB 127 |
96 | 97 | ||
97 | /* TPA constants */ | 98 | /* TPA constants */ |
98 | #define ETH_TPA_MAX_AGGS_NUM 64 | 99 | #define ETH_TPA_MAX_AGGS_NUM 64 |
99 | #define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT | 100 | #define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT |
100 | #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 | 101 | #define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 |
101 | #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 | 102 | #define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 |
102 | 103 | ||
103 | /* Control frame check constants */ | 104 | /* Control frame check constants */ |
104 | #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 | 105 | #define ETH_CTL_FRAME_ETH_TYPE_NUM 4 |
105 | 106 | ||
107 | /* GFS constants */ | ||
108 | #define ETH_GFT_TRASH_CAN_VPORT 0x1FF | ||
109 | |||
110 | /* Destination port mode */ | ||
111 | enum dest_port_mode { | ||
112 | DEST_PORT_PHY, | ||
113 | DEST_PORT_LOOPBACK, | ||
114 | DEST_PORT_PHY_LOOPBACK, | ||
115 | DEST_PORT_DROP, | ||
116 | MAX_DEST_PORT_MODE | ||
117 | }; | ||
118 | |||
119 | /* Ethernet address type */ | ||
120 | enum eth_addr_type { | ||
121 | BROADCAST_ADDRESS, | ||
122 | MULTICAST_ADDRESS, | ||
123 | UNICAST_ADDRESS, | ||
124 | UNKNOWN_ADDRESS, | ||
125 | MAX_ETH_ADDR_TYPE | ||
126 | }; | ||
127 | |||
106 | struct eth_tx_1st_bd_flags { | 128 | struct eth_tx_1st_bd_flags { |
107 | u8 bitfields; | 129 | u8 bitfields; |
108 | #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 | 130 | #define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 |
109 | #define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 | 131 | #define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 |
110 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 | 132 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 |
111 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 | 133 | #define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 |
112 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 | 134 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 |
113 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 | 135 | #define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 |
114 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 | 136 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 |
115 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 | 137 | #define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 |
116 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 | 138 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 |
117 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 | 139 | #define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 |
118 | #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 | 140 | #define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 |
119 | #define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 | 141 | #define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 |
120 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 | 142 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 |
121 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 | 143 | #define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 |
122 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 | 144 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 |
123 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 | 145 | #define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 |
124 | }; | 146 | }; |
125 | 147 | ||
126 | /* The parsing information data fo rthe first tx bd of a given packet. */ | 148 | /* The parsing information data fo rthe first tx bd of a given packet */ |
127 | struct eth_tx_data_1st_bd { | 149 | struct eth_tx_data_1st_bd { |
128 | __le16 vlan; | 150 | __le16 vlan; |
129 | u8 nbds; | 151 | u8 nbds; |
130 | struct eth_tx_1st_bd_flags bd_flags; | 152 | struct eth_tx_1st_bd_flags bd_flags; |
131 | __le16 bitfields; | 153 | __le16 bitfields; |
132 | #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 | 154 | #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_MASK 0x1 |
133 | #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 | 155 | #define ETH_TX_DATA_1ST_BD_TUNN_FLAG_SHIFT 0 |
134 | #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 | 156 | #define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 |
135 | #define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 | 157 | #define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 |
136 | #define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF | 158 | #define ETH_TX_DATA_1ST_BD_PKT_LEN_MASK 0x3FFF |
137 | #define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 | 159 | #define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2 |
138 | }; | 160 | }; |
139 | 161 | ||
140 | /* The parsing information data for the second tx bd of a given packet. */ | 162 | /* The parsing information data for the second tx bd of a given packet */ |
141 | struct eth_tx_data_2nd_bd { | 163 | struct eth_tx_data_2nd_bd { |
142 | __le16 tunn_ip_size; | 164 | __le16 tunn_ip_size; |
143 | __le16 bitfields1; | 165 | __le16 bitfields1; |
144 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF | 166 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF |
145 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 | 167 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 |
146 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 | 168 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 |
147 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 | 169 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 |
148 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 | 170 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 |
149 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 | 171 | #define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 |
150 | #define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 | 172 | #define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 |
151 | #define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 | 173 | #define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 |
152 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 | 174 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 |
153 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 | 175 | #define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 |
154 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 | 176 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 |
155 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 | 177 | #define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 |
156 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 | 178 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 |
157 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 | 179 | #define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 |
158 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 | 180 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 |
159 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 | 181 | #define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 |
160 | #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 | 182 | #define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 |
161 | #define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 | 183 | #define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 |
162 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 | 184 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 |
163 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 | 185 | #define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 |
164 | __le16 bitfields2; | 186 | __le16 bitfields2; |
165 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF | 187 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF |
166 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 | 188 | #define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 |
167 | #define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 | 189 | #define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 |
168 | #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 | 190 | #define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 |
169 | }; | 191 | }; |
170 | 192 | ||
171 | /* Firmware data for L2-EDPM packet. */ | 193 | /* Firmware data for L2-EDPM packet */ |
172 | struct eth_edpm_fw_data { | 194 | struct eth_edpm_fw_data { |
173 | struct eth_tx_data_1st_bd data_1st_bd; | 195 | struct eth_tx_data_1st_bd data_1st_bd; |
174 | struct eth_tx_data_2nd_bd data_2nd_bd; | 196 | struct eth_tx_data_2nd_bd data_2nd_bd; |
175 | __le32 reserved; | 197 | __le32 reserved; |
176 | }; | 198 | }; |
177 | 199 | ||
178 | struct eth_fast_path_cqe_fw_debug { | 200 | /* Tunneling parsing flags */ |
179 | __le16 reserved2; | ||
180 | }; | ||
181 | |||
182 | /* tunneling parsing flags */ | ||
183 | struct eth_tunnel_parsing_flags { | 201 | struct eth_tunnel_parsing_flags { |
184 | u8 flags; | 202 | u8 flags; |
185 | #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 | 203 | #define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 |
@@ -199,24 +217,24 @@ struct eth_tunnel_parsing_flags { | |||
199 | /* PMD flow control bits */ | 217 | /* PMD flow control bits */ |
200 | struct eth_pmd_flow_flags { | 218 | struct eth_pmd_flow_flags { |
201 | u8 flags; | 219 | u8 flags; |
202 | #define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 | 220 | #define ETH_PMD_FLOW_FLAGS_VALID_MASK 0x1 |
203 | #define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 | 221 | #define ETH_PMD_FLOW_FLAGS_VALID_SHIFT 0 |
204 | #define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 | 222 | #define ETH_PMD_FLOW_FLAGS_TOGGLE_MASK 0x1 |
205 | #define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 | 223 | #define ETH_PMD_FLOW_FLAGS_TOGGLE_SHIFT 1 |
206 | #define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F | 224 | #define ETH_PMD_FLOW_FLAGS_RESERVED_MASK 0x3F |
207 | #define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 | 225 | #define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2 |
208 | }; | 226 | }; |
209 | 227 | ||
210 | /* Regular ETH Rx FP CQE. */ | 228 | /* Regular ETH Rx FP CQE */ |
211 | struct eth_fast_path_rx_reg_cqe { | 229 | struct eth_fast_path_rx_reg_cqe { |
212 | u8 type; | 230 | u8 type; |
213 | u8 bitfields; | 231 | u8 bitfields; |
214 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 | 232 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 |
215 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 | 233 | #define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 |
216 | #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF | 234 | #define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF |
217 | #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 | 235 | #define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 |
218 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 | 236 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 |
219 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 | 237 | #define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 |
220 | __le16 pkt_len; | 238 | __le16 pkt_len; |
221 | struct parsing_and_err_flags pars_flags; | 239 | struct parsing_and_err_flags pars_flags; |
222 | __le16 vlan_tag; | 240 | __le16 vlan_tag; |
@@ -225,13 +243,13 @@ struct eth_fast_path_rx_reg_cqe { | |||
225 | u8 placement_offset; | 243 | u8 placement_offset; |
226 | struct eth_tunnel_parsing_flags tunnel_pars_flags; | 244 | struct eth_tunnel_parsing_flags tunnel_pars_flags; |
227 | u8 bd_num; | 245 | u8 bd_num; |
228 | u8 reserved[9]; | 246 | u8 reserved; |
229 | struct eth_fast_path_cqe_fw_debug fw_debug; | 247 | __le16 flow_id; |
230 | u8 reserved1[3]; | 248 | u8 reserved1[11]; |
231 | struct eth_pmd_flow_flags pmd_flags; | 249 | struct eth_pmd_flow_flags pmd_flags; |
232 | }; | 250 | }; |
233 | 251 | ||
234 | /* TPA-continue ETH Rx FP CQE. */ | 252 | /* TPA-continue ETH Rx FP CQE */ |
235 | struct eth_fast_path_rx_tpa_cont_cqe { | 253 | struct eth_fast_path_rx_tpa_cont_cqe { |
236 | u8 type; | 254 | u8 type; |
237 | u8 tpa_agg_index; | 255 | u8 tpa_agg_index; |
@@ -243,7 +261,7 @@ struct eth_fast_path_rx_tpa_cont_cqe { | |||
243 | struct eth_pmd_flow_flags pmd_flags; | 261 | struct eth_pmd_flow_flags pmd_flags; |
244 | }; | 262 | }; |
245 | 263 | ||
246 | /* TPA-end ETH Rx FP CQE. */ | 264 | /* TPA-end ETH Rx FP CQE */ |
247 | struct eth_fast_path_rx_tpa_end_cqe { | 265 | struct eth_fast_path_rx_tpa_end_cqe { |
248 | u8 type; | 266 | u8 type; |
249 | u8 tpa_agg_index; | 267 | u8 tpa_agg_index; |
@@ -259,16 +277,16 @@ struct eth_fast_path_rx_tpa_end_cqe { | |||
259 | struct eth_pmd_flow_flags pmd_flags; | 277 | struct eth_pmd_flow_flags pmd_flags; |
260 | }; | 278 | }; |
261 | 279 | ||
262 | /* TPA-start ETH Rx FP CQE. */ | 280 | /* TPA-start ETH Rx FP CQE */ |
263 | struct eth_fast_path_rx_tpa_start_cqe { | 281 | struct eth_fast_path_rx_tpa_start_cqe { |
264 | u8 type; | 282 | u8 type; |
265 | u8 bitfields; | 283 | u8 bitfields; |
266 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 | 284 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 |
267 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 | 285 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 |
268 | #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF | 286 | #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF |
269 | #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 | 287 | #define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 |
270 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 | 288 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 |
271 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 | 289 | #define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 |
272 | __le16 seg_len; | 290 | __le16 seg_len; |
273 | struct parsing_and_err_flags pars_flags; | 291 | struct parsing_and_err_flags pars_flags; |
274 | __le16 vlan_tag; | 292 | __le16 vlan_tag; |
@@ -279,7 +297,7 @@ struct eth_fast_path_rx_tpa_start_cqe { | |||
279 | u8 tpa_agg_index; | 297 | u8 tpa_agg_index; |
280 | u8 header_len; | 298 | u8 header_len; |
281 | __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; | 299 | __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE]; |
282 | struct eth_fast_path_cqe_fw_debug fw_debug; | 300 | __le16 flow_id; |
283 | u8 reserved; | 301 | u8 reserved; |
284 | struct eth_pmd_flow_flags pmd_flags; | 302 | struct eth_pmd_flow_flags pmd_flags; |
285 | }; | 303 | }; |
@@ -295,24 +313,24 @@ struct eth_rx_bd { | |||
295 | struct regpair addr; | 313 | struct regpair addr; |
296 | }; | 314 | }; |
297 | 315 | ||
298 | /* regular ETH Rx SP CQE */ | 316 | /* Regular ETH Rx SP CQE */ |
299 | struct eth_slow_path_rx_cqe { | 317 | struct eth_slow_path_rx_cqe { |
300 | u8 type; | 318 | u8 type; |
301 | u8 ramrod_cmd_id; | 319 | u8 ramrod_cmd_id; |
302 | u8 error_flag; | 320 | u8 error_flag; |
303 | u8 reserved[25]; | 321 | u8 reserved[25]; |
304 | __le16 echo; | 322 | __le16 echo; |
305 | u8 reserved1; | 323 | u8 reserved1; |
306 | struct eth_pmd_flow_flags pmd_flags; | 324 | struct eth_pmd_flow_flags pmd_flags; |
307 | }; | 325 | }; |
308 | 326 | ||
309 | /* union for all ETH Rx CQE types */ | 327 | /* Union for all ETH Rx CQE types */ |
310 | union eth_rx_cqe { | 328 | union eth_rx_cqe { |
311 | struct eth_fast_path_rx_reg_cqe fast_path_regular; | 329 | struct eth_fast_path_rx_reg_cqe fast_path_regular; |
312 | struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; | 330 | struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start; |
313 | struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; | 331 | struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont; |
314 | struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; | 332 | struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end; |
315 | struct eth_slow_path_rx_cqe slow_path; | 333 | struct eth_slow_path_rx_cqe slow_path; |
316 | }; | 334 | }; |
317 | 335 | ||
318 | /* ETH Rx CQE type */ | 336 | /* ETH Rx CQE type */ |
@@ -339,7 +357,7 @@ enum eth_rx_tunn_type { | |||
339 | MAX_ETH_RX_TUNN_TYPE | 357 | MAX_ETH_RX_TUNN_TYPE |
340 | }; | 358 | }; |
341 | 359 | ||
342 | /* Aggregation end reason. */ | 360 | /* Aggregation end reason. */ |
343 | enum eth_tpa_end_reason { | 361 | enum eth_tpa_end_reason { |
344 | ETH_AGG_END_UNUSED, | 362 | ETH_AGG_END_UNUSED, |
345 | ETH_AGG_END_SP_UPDATE, | 363 | ETH_AGG_END_SP_UPDATE, |
@@ -354,59 +372,59 @@ enum eth_tpa_end_reason { | |||
354 | 372 | ||
355 | /* The first tx bd of a given packet */ | 373 | /* The first tx bd of a given packet */ |
356 | struct eth_tx_1st_bd { | 374 | struct eth_tx_1st_bd { |
357 | struct regpair addr; | 375 | struct regpair addr; |
358 | __le16 nbytes; | 376 | __le16 nbytes; |
359 | struct eth_tx_data_1st_bd data; | 377 | struct eth_tx_data_1st_bd data; |
360 | }; | 378 | }; |
361 | 379 | ||
362 | /* The second tx bd of a given packet */ | 380 | /* The second tx bd of a given packet */ |
363 | struct eth_tx_2nd_bd { | 381 | struct eth_tx_2nd_bd { |
364 | struct regpair addr; | 382 | struct regpair addr; |
365 | __le16 nbytes; | 383 | __le16 nbytes; |
366 | struct eth_tx_data_2nd_bd data; | 384 | struct eth_tx_data_2nd_bd data; |
367 | }; | 385 | }; |
368 | 386 | ||
369 | /* The parsing information data for the third tx bd of a given packet. */ | 387 | /* The parsing information data for the third tx bd of a given packet */ |
370 | struct eth_tx_data_3rd_bd { | 388 | struct eth_tx_data_3rd_bd { |
371 | __le16 lso_mss; | 389 | __le16 lso_mss; |
372 | __le16 bitfields; | 390 | __le16 bitfields; |
373 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF | 391 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF |
374 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 | 392 | #define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 |
375 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF | 393 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF |
376 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 | 394 | #define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 |
377 | #define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 | 395 | #define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 |
378 | #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 | 396 | #define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 |
379 | #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F | 397 | #define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F |
380 | #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 | 398 | #define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 |
381 | u8 tunn_l4_hdr_start_offset_w; | 399 | u8 tunn_l4_hdr_start_offset_w; |
382 | u8 tunn_hdr_size_w; | 400 | u8 tunn_hdr_size_w; |
383 | }; | 401 | }; |
384 | 402 | ||
385 | /* The third tx bd of a given packet */ | 403 | /* The third tx bd of a given packet */ |
386 | struct eth_tx_3rd_bd { | 404 | struct eth_tx_3rd_bd { |
387 | struct regpair addr; | 405 | struct regpair addr; |
388 | __le16 nbytes; | 406 | __le16 nbytes; |
389 | struct eth_tx_data_3rd_bd data; | 407 | struct eth_tx_data_3rd_bd data; |
390 | }; | 408 | }; |
391 | 409 | ||
392 | /* Complementary information for the regular tx bd of a given packet. */ | 410 | /* Complementary information for the regular tx bd of a given packet */ |
393 | struct eth_tx_data_bd { | 411 | struct eth_tx_data_bd { |
394 | __le16 reserved0; | 412 | __le16 reserved0; |
395 | __le16 bitfields; | 413 | __le16 bitfields; |
396 | #define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF | 414 | #define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF |
397 | #define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 | 415 | #define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 |
398 | #define ETH_TX_DATA_BD_START_BD_MASK 0x1 | 416 | #define ETH_TX_DATA_BD_START_BD_MASK 0x1 |
399 | #define ETH_TX_DATA_BD_START_BD_SHIFT 8 | 417 | #define ETH_TX_DATA_BD_START_BD_SHIFT 8 |
400 | #define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F | 418 | #define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F |
401 | #define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 | 419 | #define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 |
402 | __le16 reserved3; | 420 | __le16 reserved3; |
403 | }; | 421 | }; |
404 | 422 | ||
405 | /* The common non-special TX BD ring element */ | 423 | /* The common non-special TX BD ring element */ |
406 | struct eth_tx_bd { | 424 | struct eth_tx_bd { |
407 | struct regpair addr; | 425 | struct regpair addr; |
408 | __le16 nbytes; | 426 | __le16 nbytes; |
409 | struct eth_tx_data_bd data; | 427 | struct eth_tx_data_bd data; |
410 | }; | 428 | }; |
411 | 429 | ||
412 | union eth_tx_bd_types { | 430 | union eth_tx_bd_types { |
@@ -434,18 +452,30 @@ struct xstorm_eth_queue_zone { | |||
434 | /* ETH doorbell data */ | 452 | /* ETH doorbell data */ |
435 | struct eth_db_data { | 453 | struct eth_db_data { |
436 | u8 params; | 454 | u8 params; |
437 | #define ETH_DB_DATA_DEST_MASK 0x3 | 455 | #define ETH_DB_DATA_DEST_MASK 0x3 |
438 | #define ETH_DB_DATA_DEST_SHIFT 0 | 456 | #define ETH_DB_DATA_DEST_SHIFT 0 |
439 | #define ETH_DB_DATA_AGG_CMD_MASK 0x3 | 457 | #define ETH_DB_DATA_AGG_CMD_MASK 0x3 |
440 | #define ETH_DB_DATA_AGG_CMD_SHIFT 2 | 458 | #define ETH_DB_DATA_AGG_CMD_SHIFT 2 |
441 | #define ETH_DB_DATA_BYPASS_EN_MASK 0x1 | 459 | #define ETH_DB_DATA_BYPASS_EN_MASK 0x1 |
442 | #define ETH_DB_DATA_BYPASS_EN_SHIFT 4 | 460 | #define ETH_DB_DATA_BYPASS_EN_SHIFT 4 |
443 | #define ETH_DB_DATA_RESERVED_MASK 0x1 | 461 | #define ETH_DB_DATA_RESERVED_MASK 0x1 |
444 | #define ETH_DB_DATA_RESERVED_SHIFT 5 | 462 | #define ETH_DB_DATA_RESERVED_SHIFT 5 |
445 | #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 | 463 | #define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 |
446 | #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 | 464 | #define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 |
447 | u8 agg_flags; | 465 | u8 agg_flags; |
448 | __le16 bd_prod; | 466 | __le16 bd_prod; |
449 | }; | 467 | }; |
450 | 468 | ||
469 | /* RSS hash type */ | ||
470 | enum rss_hash_type { | ||
471 | RSS_HASH_TYPE_DEFAULT = 0, | ||
472 | RSS_HASH_TYPE_IPV4 = 1, | ||
473 | RSS_HASH_TYPE_TCP_IPV4 = 2, | ||
474 | RSS_HASH_TYPE_IPV6 = 3, | ||
475 | RSS_HASH_TYPE_TCP_IPV6 = 4, | ||
476 | RSS_HASH_TYPE_UDP_IPV4 = 5, | ||
477 | RSS_HASH_TYPE_UDP_IPV6 = 6, | ||
478 | MAX_RSS_HASH_TYPE | ||
479 | }; | ||
480 | |||
451 | #endif /* __ETH_COMMON__ */ | 481 | #endif /* __ETH_COMMON__ */ |
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h index 12fc9e788eea..22077c586853 100644 --- a/include/linux/qed/fcoe_common.h +++ b/include/linux/qed/fcoe_common.h | |||
@@ -8,217 +8,78 @@ | |||
8 | 8 | ||
9 | #ifndef __FCOE_COMMON__ | 9 | #ifndef __FCOE_COMMON__ |
10 | #define __FCOE_COMMON__ | 10 | #define __FCOE_COMMON__ |
11 | |||
11 | /*********************/ | 12 | /*********************/ |
12 | /* FCOE FW CONSTANTS */ | 13 | /* FCOE FW CONSTANTS */ |
13 | /*********************/ | 14 | /*********************/ |
14 | 15 | ||
15 | #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 | 16 | #define FC_ABTS_REPLY_MAX_PAYLOAD_LEN 12 |
16 | 17 | ||
17 | struct fcoe_abts_pkt { | 18 | /* The fcoe storm task context protection-information of Ystorm */ |
18 | __le32 abts_rsp_fc_payload_lo; | 19 | struct protection_info_ctx { |
19 | __le16 abts_rsp_rx_id; | 20 | __le16 flags; |
20 | u8 abts_rsp_rctl; | 21 | #define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 |
21 | u8 reserved2; | 22 | #define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 |
22 | }; | 23 | #define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 |
23 | 24 | #define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 | |
24 | /* FCoE additional WQE (Sq/XferQ) information */ | 25 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 |
25 | union fcoe_additional_info_union { | 26 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 |
26 | __le32 previous_tid; | 27 | #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF |
27 | __le32 parent_tid; | 28 | #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 |
28 | __le32 burst_length; | 29 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 |
29 | __le32 seq_rec_updated_offset; | 30 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 |
30 | }; | 31 | #define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F |
31 | 32 | #define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 | |
32 | struct fcoe_exp_ro { | 33 | u8 dix_block_size; |
33 | __le32 data_offset; | 34 | u8 dst_size; |
34 | __le32 reserved; | ||
35 | }; | ||
36 | |||
37 | union fcoe_cleanup_addr_exp_ro_union { | ||
38 | struct regpair abts_rsp_fc_payload_hi; | ||
39 | struct fcoe_exp_ro exp_ro; | ||
40 | }; | ||
41 | |||
42 | /* FCoE Ramrod Command IDs */ | ||
43 | enum fcoe_completion_status { | ||
44 | FCOE_COMPLETION_STATUS_SUCCESS, | ||
45 | FCOE_COMPLETION_STATUS_FCOE_VER_ERR, | ||
46 | FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, | ||
47 | MAX_FCOE_COMPLETION_STATUS | ||
48 | }; | ||
49 | |||
50 | struct fc_addr_nw { | ||
51 | u8 addr_lo; | ||
52 | u8 addr_mid; | ||
53 | u8 addr_hi; | ||
54 | }; | ||
55 | |||
56 | /* FCoE connection offload */ | ||
57 | struct fcoe_conn_offload_ramrod_data { | ||
58 | struct regpair sq_pbl_addr; | ||
59 | struct regpair sq_curr_page_addr; | ||
60 | struct regpair sq_next_page_addr; | ||
61 | struct regpair xferq_pbl_addr; | ||
62 | struct regpair xferq_curr_page_addr; | ||
63 | struct regpair xferq_next_page_addr; | ||
64 | struct regpair respq_pbl_addr; | ||
65 | struct regpair respq_curr_page_addr; | ||
66 | struct regpair respq_next_page_addr; | ||
67 | __le16 dst_mac_addr_lo; | ||
68 | __le16 dst_mac_addr_mid; | ||
69 | __le16 dst_mac_addr_hi; | ||
70 | __le16 src_mac_addr_lo; | ||
71 | __le16 src_mac_addr_mid; | ||
72 | __le16 src_mac_addr_hi; | ||
73 | __le16 tx_max_fc_pay_len; | ||
74 | __le16 e_d_tov_timer_val; | ||
75 | __le16 rx_max_fc_pay_len; | ||
76 | __le16 vlan_tag; | ||
77 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF | ||
78 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 | ||
79 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 | ||
80 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 | ||
81 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 | ||
82 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 | ||
83 | __le16 physical_q0; | ||
84 | __le16 rec_rr_tov_timer_val; | ||
85 | struct fc_addr_nw s_id; | ||
86 | u8 max_conc_seqs_c3; | ||
87 | struct fc_addr_nw d_id; | ||
88 | u8 flags; | ||
89 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 | ||
90 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 | ||
91 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 | ||
92 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 | ||
93 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 | ||
94 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 | ||
95 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 | ||
96 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 | ||
97 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 | ||
98 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 4 | ||
99 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x3 | ||
100 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 6 | ||
101 | __le16 conn_id; | ||
102 | u8 def_q_idx; | ||
103 | u8 reserved[5]; | ||
104 | }; | ||
105 | |||
106 | /* FCoE terminate connection request */ | ||
107 | struct fcoe_conn_terminate_ramrod_data { | ||
108 | struct regpair terminate_params_addr; | ||
109 | }; | ||
110 | |||
111 | struct fcoe_slow_sgl_ctx { | ||
112 | struct regpair base_sgl_addr; | ||
113 | __le16 curr_sge_off; | ||
114 | __le16 remainder_num_sges; | ||
115 | __le16 curr_sgl_index; | ||
116 | __le16 reserved; | ||
117 | }; | ||
118 | |||
119 | union fcoe_dix_desc_ctx { | ||
120 | struct fcoe_slow_sgl_ctx dix_sgl; | ||
121 | struct scsi_sge cached_dix_sge; | ||
122 | }; | 35 | }; |
123 | 36 | ||
124 | struct fcoe_fast_sgl_ctx { | 37 | /* The fcoe storm task context protection-information of Ystorm */ |
125 | struct regpair sgl_start_addr; | 38 | union protection_info_union_ctx { |
126 | __le32 sgl_byte_offset; | 39 | struct protection_info_ctx info; |
127 | __le16 task_reuse_cnt; | 40 | __le32 value; |
128 | __le16 init_offset_in_first_sge; | ||
129 | }; | 41 | }; |
130 | 42 | ||
43 | /* FCP CMD payload */ | ||
131 | struct fcoe_fcp_cmd_payload { | 44 | struct fcoe_fcp_cmd_payload { |
132 | __le32 opaque[8]; | 45 | __le32 opaque[8]; |
133 | }; | 46 | }; |
134 | 47 | ||
48 | /* FCP RSP payload */ | ||
135 | struct fcoe_fcp_rsp_payload { | 49 | struct fcoe_fcp_rsp_payload { |
136 | __le32 opaque[6]; | 50 | __le32 opaque[6]; |
137 | }; | 51 | }; |
138 | 52 | ||
139 | struct fcoe_fcp_xfer_payload { | 53 | /* FCP RSP payload */ |
140 | __le32 opaque[3]; | ||
141 | }; | ||
142 | |||
143 | /* FCoE firmware function init */ | ||
144 | struct fcoe_init_func_ramrod_data { | ||
145 | struct scsi_init_func_params func_params; | ||
146 | struct scsi_init_func_queues q_params; | ||
147 | __le16 mtu; | ||
148 | __le16 sq_num_pages_in_pbl; | ||
149 | __le32 reserved; | ||
150 | }; | ||
151 | |||
152 | /* FCoE: Mode of the connection: Target or Initiator or both */ | ||
153 | enum fcoe_mode_type { | ||
154 | FCOE_INITIATOR_MODE = 0x0, | ||
155 | FCOE_TARGET_MODE = 0x1, | ||
156 | FCOE_BOTH_OR_NOT_CHOSEN = 0x3, | ||
157 | MAX_FCOE_MODE_TYPE | ||
158 | }; | ||
159 | |||
160 | struct fcoe_rx_stat { | ||
161 | struct regpair fcoe_rx_byte_cnt; | ||
162 | struct regpair fcoe_rx_data_pkt_cnt; | ||
163 | struct regpair fcoe_rx_xfer_pkt_cnt; | ||
164 | struct regpair fcoe_rx_other_pkt_cnt; | ||
165 | __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; | ||
166 | __le32 fcoe_silent_drop_pkt_rq_full_cnt; | ||
167 | __le32 fcoe_silent_drop_pkt_crc_error_cnt; | ||
168 | __le32 fcoe_silent_drop_pkt_task_invalid_cnt; | ||
169 | __le32 fcoe_silent_drop_total_pkt_cnt; | ||
170 | __le32 rsrv; | ||
171 | }; | ||
172 | |||
173 | struct fcoe_stat_ramrod_data { | ||
174 | struct regpair stat_params_addr; | ||
175 | }; | ||
176 | |||
177 | struct protection_info_ctx { | ||
178 | __le16 flags; | ||
179 | #define PROTECTION_INFO_CTX_HOST_INTERFACE_MASK 0x3 | ||
180 | #define PROTECTION_INFO_CTX_HOST_INTERFACE_SHIFT 0 | ||
181 | #define PROTECTION_INFO_CTX_DIF_TO_PEER_MASK 0x1 | ||
182 | #define PROTECTION_INFO_CTX_DIF_TO_PEER_SHIFT 2 | ||
183 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_MASK 0x1 | ||
184 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_APP_TAG_SHIFT 3 | ||
185 | #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_MASK 0xF | ||
186 | #define PROTECTION_INFO_CTX_INTERVAL_SIZE_LOG_SHIFT 4 | ||
187 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 | ||
188 | #define PROTECTION_INFO_CTX_VALIDATE_DIX_REF_TAG_SHIFT 8 | ||
189 | #define PROTECTION_INFO_CTX_RESERVED0_MASK 0x7F | ||
190 | #define PROTECTION_INFO_CTX_RESERVED0_SHIFT 9 | ||
191 | u8 dix_block_size; | ||
192 | u8 dst_size; | ||
193 | }; | ||
194 | |||
195 | union protection_info_union_ctx { | ||
196 | struct protection_info_ctx info; | ||
197 | __le32 value; | ||
198 | }; | ||
199 | |||
200 | struct fcp_rsp_payload_padded { | 54 | struct fcp_rsp_payload_padded { |
201 | struct fcoe_fcp_rsp_payload rsp_payload; | 55 | struct fcoe_fcp_rsp_payload rsp_payload; |
202 | __le32 reserved[2]; | 56 | __le32 reserved[2]; |
203 | }; | 57 | }; |
204 | 58 | ||
59 | /* FCP RSP payload */ | ||
60 | struct fcoe_fcp_xfer_payload { | ||
61 | __le32 opaque[3]; | ||
62 | }; | ||
63 | |||
64 | /* FCP RSP payload */ | ||
205 | struct fcp_xfer_payload_padded { | 65 | struct fcp_xfer_payload_padded { |
206 | struct fcoe_fcp_xfer_payload xfer_payload; | 66 | struct fcoe_fcp_xfer_payload xfer_payload; |
207 | __le32 reserved[5]; | 67 | __le32 reserved[5]; |
208 | }; | 68 | }; |
209 | 69 | ||
70 | /* Task params */ | ||
210 | struct fcoe_tx_data_params { | 71 | struct fcoe_tx_data_params { |
211 | __le32 data_offset; | 72 | __le32 data_offset; |
212 | __le32 offset_in_io; | 73 | __le32 offset_in_io; |
213 | u8 flags; | 74 | u8 flags; |
214 | #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 | 75 | #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_MASK 0x1 |
215 | #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 | 76 | #define FCOE_TX_DATA_PARAMS_OFFSET_IN_IO_VALID_SHIFT 0 |
216 | #define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 | 77 | #define FCOE_TX_DATA_PARAMS_DROP_DATA_MASK 0x1 |
217 | #define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 | 78 | #define FCOE_TX_DATA_PARAMS_DROP_DATA_SHIFT 1 |
218 | #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 | 79 | #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_MASK 0x1 |
219 | #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 | 80 | #define FCOE_TX_DATA_PARAMS_AFTER_SEQ_REC_SHIFT 2 |
220 | #define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F | 81 | #define FCOE_TX_DATA_PARAMS_RESERVED0_MASK 0x1F |
221 | #define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 | 82 | #define FCOE_TX_DATA_PARAMS_RESERVED0_SHIFT 3 |
222 | u8 dif_residual; | 83 | u8 dif_residual; |
223 | __le16 seq_cnt; | 84 | __le16 seq_cnt; |
224 | __le16 single_sge_saved_offset; | 85 | __le16 single_sge_saved_offset; |
@@ -227,6 +88,7 @@ struct fcoe_tx_data_params { | |||
227 | __le16 reserved3; | 88 | __le16 reserved3; |
228 | }; | 89 | }; |
229 | 90 | ||
91 | /* Middle path parameters: FC header fields provided by the driver */ | ||
230 | struct fcoe_tx_mid_path_params { | 92 | struct fcoe_tx_mid_path_params { |
231 | __le32 parameter; | 93 | __le32 parameter; |
232 | u8 r_ctl; | 94 | u8 r_ctl; |
@@ -237,11 +99,13 @@ struct fcoe_tx_mid_path_params { | |||
237 | __le16 ox_id; | 99 | __le16 ox_id; |
238 | }; | 100 | }; |
239 | 101 | ||
102 | /* Task params */ | ||
240 | struct fcoe_tx_params { | 103 | struct fcoe_tx_params { |
241 | struct fcoe_tx_data_params data; | 104 | struct fcoe_tx_data_params data; |
242 | struct fcoe_tx_mid_path_params mid_path; | 105 | struct fcoe_tx_mid_path_params mid_path; |
243 | }; | 106 | }; |
244 | 107 | ||
108 | /* Union of FCP CMD payload \ TX params \ ABTS \ Cleanup */ | ||
245 | union fcoe_tx_info_union_ctx { | 109 | union fcoe_tx_info_union_ctx { |
246 | struct fcoe_fcp_cmd_payload fcp_cmd_payload; | 110 | struct fcoe_fcp_cmd_payload fcp_cmd_payload; |
247 | struct fcp_rsp_payload_padded fcp_rsp_payload; | 111 | struct fcp_rsp_payload_padded fcp_rsp_payload; |
@@ -249,13 +113,29 @@ union fcoe_tx_info_union_ctx { | |||
249 | struct fcoe_tx_params tx_params; | 113 | struct fcoe_tx_params tx_params; |
250 | }; | 114 | }; |
251 | 115 | ||
116 | /* Data sgl */ | ||
117 | struct fcoe_slow_sgl_ctx { | ||
118 | struct regpair base_sgl_addr; | ||
119 | __le16 curr_sge_off; | ||
120 | __le16 remainder_num_sges; | ||
121 | __le16 curr_sgl_index; | ||
122 | __le16 reserved; | ||
123 | }; | ||
124 | |||
125 | /* Union of DIX SGL \ cached DIX sges */ | ||
126 | union fcoe_dix_desc_ctx { | ||
127 | struct fcoe_slow_sgl_ctx dix_sgl; | ||
128 | struct scsi_sge cached_dix_sge; | ||
129 | }; | ||
130 | |||
131 | /* The fcoe storm task context of Ystorm */ | ||
252 | struct ystorm_fcoe_task_st_ctx { | 132 | struct ystorm_fcoe_task_st_ctx { |
253 | u8 task_type; | 133 | u8 task_type; |
254 | u8 sgl_mode; | 134 | u8 sgl_mode; |
255 | #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 | 135 | #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 |
256 | #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 | 136 | #define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0 |
257 | #define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F | 137 | #define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F |
258 | #define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 | 138 | #define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1 |
259 | u8 cached_dix_sge; | 139 | u8 cached_dix_sge; |
260 | u8 expect_first_xfer; | 140 | u8 expect_first_xfer; |
261 | __le32 num_pbf_zero_write; | 141 | __le32 num_pbf_zero_write; |
@@ -272,49 +152,49 @@ struct ystorm_fcoe_task_st_ctx { | |||
272 | u8 reserved2[8]; | 152 | u8 reserved2[8]; |
273 | }; | 153 | }; |
274 | 154 | ||
275 | struct ystorm_fcoe_task_ag_ctx { | 155 | struct e4_ystorm_fcoe_task_ag_ctx { |
276 | u8 byte0; | 156 | u8 byte0; |
277 | u8 byte1; | 157 | u8 byte1; |
278 | __le16 word0; | 158 | __le16 word0; |
279 | u8 flags0; | 159 | u8 flags0; |
280 | #define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF | 160 | #define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_MASK 0xF |
281 | #define YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 | 161 | #define E4_YSTORM_FCOE_TASK_AG_CTX_NIBBLE0_SHIFT 0 |
282 | #define YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 | 162 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_MASK 0x1 |
283 | #define YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 | 163 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT0_SHIFT 4 |
284 | #define YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 | 164 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 |
285 | #define YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 | 165 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 |
286 | #define YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 | 166 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 |
287 | #define YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 | 167 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 |
288 | #define YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 | 168 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 |
289 | #define YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 | 169 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 |
290 | u8 flags1; | 170 | u8 flags1; |
291 | #define YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 | 171 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 |
292 | #define YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 | 172 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 0 |
293 | #define YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 | 173 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 |
294 | #define YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 | 174 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 |
295 | #define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 | 175 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 |
296 | #define YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 | 176 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 |
297 | #define YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 | 177 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 |
298 | #define YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 | 178 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 6 |
299 | #define YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 | 179 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 |
300 | #define YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 | 180 | #define E4_YSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 |
301 | u8 flags2; | 181 | u8 flags2; |
302 | #define YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 | 182 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_MASK 0x1 |
303 | #define YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 | 183 | #define E4_YSTORM_FCOE_TASK_AG_CTX_BIT4_SHIFT 0 |
304 | #define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 | 184 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 |
305 | #define YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 | 185 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 |
306 | #define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 | 186 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 |
307 | #define YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 | 187 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 |
308 | #define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 | 188 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 |
309 | #define YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 | 189 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 |
310 | #define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 | 190 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 |
311 | #define YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 | 191 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 |
312 | #define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 | 192 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 |
313 | #define YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 | 193 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 |
314 | #define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 | 194 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 |
315 | #define YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 | 195 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 6 |
316 | #define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 | 196 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 |
317 | #define YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 | 197 | #define E4_YSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 |
318 | u8 byte2; | 198 | u8 byte2; |
319 | __le32 reg0; | 199 | __le32 reg0; |
320 | u8 byte3; | 200 | u8 byte3; |
@@ -328,73 +208,73 @@ struct ystorm_fcoe_task_ag_ctx { | |||
328 | __le32 reg2; | 208 | __le32 reg2; |
329 | }; | 209 | }; |
330 | 210 | ||
331 | struct tstorm_fcoe_task_ag_ctx { | 211 | struct e4_tstorm_fcoe_task_ag_ctx { |
332 | u8 reserved; | 212 | u8 reserved; |
333 | u8 byte1; | 213 | u8 byte1; |
334 | __le16 icid; | 214 | __le16 icid; |
335 | u8 flags0; | 215 | u8 flags0; |
336 | #define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | 216 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF |
337 | #define TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | 217 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 |
338 | #define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | 218 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 |
339 | #define TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | 219 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 |
340 | #define TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 | 220 | #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 |
341 | #define TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 | 221 | #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 |
342 | #define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 | 222 | #define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_MASK 0x1 |
343 | #define TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 | 223 | #define E4_TSTORM_FCOE_TASK_AG_CTX_WAIT_ABTS_RSP_F_SHIFT 6 |
344 | #define TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 | 224 | #define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_MASK 0x1 |
345 | #define TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 | 225 | #define E4_TSTORM_FCOE_TASK_AG_CTX_VALID_SHIFT 7 |
346 | u8 flags1; | 226 | u8 flags1; |
347 | #define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 | 227 | #define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_MASK 0x1 |
348 | #define TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 | 228 | #define E4_TSTORM_FCOE_TASK_AG_CTX_FALSE_RR_TOV_SHIFT 0 |
349 | #define TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 | 229 | #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_MASK 0x1 |
350 | #define TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 | 230 | #define E4_TSTORM_FCOE_TASK_AG_CTX_BIT5_SHIFT 1 |
351 | #define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 | 231 | #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_MASK 0x3 |
352 | #define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 | 232 | #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_SHIFT 2 |
353 | #define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 | 233 | #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_MASK 0x3 |
354 | #define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 | 234 | #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_SHIFT 4 |
355 | #define TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 | 235 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 |
356 | #define TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 | 236 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 6 |
357 | u8 flags2; | 237 | u8 flags2; |
358 | #define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 | 238 | #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_MASK 0x3 |
359 | #define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 | 239 | #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_SHIFT 0 |
360 | #define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 | 240 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 |
361 | #define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 | 241 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 2 |
362 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 | 242 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_MASK 0x3 |
363 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 | 243 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_SHIFT 4 |
364 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 | 244 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_MASK 0x3 |
365 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 | 245 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_SHIFT 6 |
366 | u8 flags3; | 246 | u8 flags3; |
367 | #define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 | 247 | #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_MASK 0x3 |
368 | #define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 | 248 | #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_SHIFT 0 |
369 | #define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 | 249 | #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_MASK 0x1 |
370 | #define TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 | 250 | #define E4_TSTORM_FCOE_TASK_AG_CTX_REC_RR_TOV_CF_EN_SHIFT 2 |
371 | #define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 | 251 | #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_MASK 0x1 |
372 | #define TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 | 252 | #define E4_TSTORM_FCOE_TASK_AG_CTX_ED_TOV_CF_EN_SHIFT 3 |
373 | #define TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 | 253 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 |
374 | #define TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 | 254 | #define E4_TSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 4 |
375 | #define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 | 255 | #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_MASK 0x1 |
376 | #define TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 | 256 | #define E4_TSTORM_FCOE_TASK_AG_CTX_TIMER_STOP_ALL_EN_SHIFT 5 |
377 | #define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 | 257 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 |
378 | #define TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 | 258 | #define E4_TSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 |
379 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 | 259 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_MASK 0x1 |
380 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 | 260 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_INIT_CF_EN_SHIFT 7 |
381 | u8 flags4; | 261 | u8 flags4; |
382 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 | 262 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_MASK 0x1 |
383 | #define TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 | 263 | #define E4_TSTORM_FCOE_TASK_AG_CTX_SEQ_RECOVERY_CF_EN_SHIFT 0 |
384 | #define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 | 264 | #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_MASK 0x1 |
385 | #define TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 | 265 | #define E4_TSTORM_FCOE_TASK_AG_CTX_UNSOL_COMP_CF_EN_SHIFT 1 |
386 | #define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 | 266 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 |
387 | #define TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 | 267 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 2 |
388 | #define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 | 268 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 |
389 | #define TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 | 269 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 3 |
390 | #define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 | 270 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 |
391 | #define TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 | 271 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 4 |
392 | #define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 | 272 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 |
393 | #define TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 | 273 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 5 |
394 | #define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 | 274 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 |
395 | #define TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 | 275 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 6 |
396 | #define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 | 276 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 |
397 | #define TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 | 277 | #define E4_TSTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 7 |
398 | u8 cleanup_state; | 278 | u8 cleanup_state; |
399 | __le16 last_sent_tid; | 279 | __le16 last_sent_tid; |
400 | __le32 rec_rr_tov_exp_timeout; | 280 | __le32 rec_rr_tov_exp_timeout; |
@@ -407,25 +287,46 @@ struct tstorm_fcoe_task_ag_ctx { | |||
407 | __le32 data_offset_next; | 287 | __le32 data_offset_next; |
408 | }; | 288 | }; |
409 | 289 | ||
290 | /* Cached data sges */ | ||
291 | struct fcoe_exp_ro { | ||
292 | __le32 data_offset; | ||
293 | __le32 reserved; | ||
294 | }; | ||
295 | |||
296 | /* Union of Cleanup address \ expected relative offsets */ | ||
297 | union fcoe_cleanup_addr_exp_ro_union { | ||
298 | struct regpair abts_rsp_fc_payload_hi; | ||
299 | struct fcoe_exp_ro exp_ro; | ||
300 | }; | ||
301 | |||
302 | /* Fields coppied from ABTSrsp pckt */ | ||
303 | struct fcoe_abts_pkt { | ||
304 | __le32 abts_rsp_fc_payload_lo; | ||
305 | __le16 abts_rsp_rx_id; | ||
306 | u8 abts_rsp_rctl; | ||
307 | u8 reserved2; | ||
308 | }; | ||
309 | |||
310 | /* FW read- write (modifyable) part The fcoe task storm context of Tstorm */ | ||
410 | struct fcoe_tstorm_fcoe_task_st_ctx_read_write { | 311 | struct fcoe_tstorm_fcoe_task_st_ctx_read_write { |
411 | union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; | 312 | union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union; |
412 | __le16 flags; | 313 | __le16 flags; |
413 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 | 314 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1 |
414 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 | 315 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0 |
415 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 | 316 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1 |
416 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 | 317 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1 |
417 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 | 318 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1 |
418 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 | 319 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2 |
419 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 | 320 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1 |
420 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 | 321 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3 |
421 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 | 322 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1 |
422 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 | 323 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4 |
423 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 | 324 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1 |
424 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 | 325 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5 |
425 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 | 326 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3 |
426 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 | 327 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6 |
427 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF | 328 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF |
428 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 | 329 | #define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8 |
429 | __le16 seq_cnt; | 330 | __le16 seq_cnt; |
430 | u8 seq_id; | 331 | u8 seq_id; |
431 | u8 ooo_rx_seq_id; | 332 | u8 ooo_rx_seq_id; |
@@ -436,6 +337,7 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_write { | |||
436 | __le16 reserved1; | 337 | __le16 reserved1; |
437 | }; | 338 | }; |
438 | 339 | ||
340 | /* FW read only part The fcoe task storm context of Tstorm */ | ||
439 | struct fcoe_tstorm_fcoe_task_st_ctx_read_only { | 341 | struct fcoe_tstorm_fcoe_task_st_ctx_read_only { |
440 | u8 task_type; | 342 | u8 task_type; |
441 | u8 dev_type; | 343 | u8 dev_type; |
@@ -446,54 +348,55 @@ struct fcoe_tstorm_fcoe_task_st_ctx_read_only { | |||
446 | __le32 rsrv; | 348 | __le32 rsrv; |
447 | }; | 349 | }; |
448 | 350 | ||
351 | /** The fcoe task storm context of Tstorm */ | ||
449 | struct tstorm_fcoe_task_st_ctx { | 352 | struct tstorm_fcoe_task_st_ctx { |
450 | struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; | 353 | struct fcoe_tstorm_fcoe_task_st_ctx_read_write read_write; |
451 | struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; | 354 | struct fcoe_tstorm_fcoe_task_st_ctx_read_only read_only; |
452 | }; | 355 | }; |
453 | 356 | ||
454 | struct mstorm_fcoe_task_ag_ctx { | 357 | struct e4_mstorm_fcoe_task_ag_ctx { |
455 | u8 byte0; | 358 | u8 byte0; |
456 | u8 byte1; | 359 | u8 byte1; |
457 | __le16 icid; | 360 | __le16 icid; |
458 | u8 flags0; | 361 | u8 flags0; |
459 | #define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | 362 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF |
460 | #define MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | 363 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 |
461 | #define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | 364 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 |
462 | #define MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | 365 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 |
463 | #define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 | 366 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_MASK 0x1 |
464 | #define MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 | 367 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CQE_PLACED_SHIFT 5 |
465 | #define MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 | 368 | #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_MASK 0x1 |
466 | #define MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 | 369 | #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT2_SHIFT 6 |
467 | #define MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 | 370 | #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_MASK 0x1 |
468 | #define MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 | 371 | #define E4_MSTORM_FCOE_TASK_AG_CTX_BIT3_SHIFT 7 |
469 | u8 flags1; | 372 | u8 flags1; |
470 | #define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 | 373 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_MASK 0x3 |
471 | #define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 | 374 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_SHIFT 0 |
472 | #define MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 | 375 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 |
473 | #define MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 | 376 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 2 |
474 | #define MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 | 377 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 |
475 | #define MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 | 378 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 4 |
476 | #define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 | 379 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_MASK 0x1 |
477 | #define MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 | 380 | #define E4_MSTORM_FCOE_TASK_AG_CTX_EX_CLEANUP_CF_EN_SHIFT 6 |
478 | #define MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 | 381 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 |
479 | #define MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 | 382 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 7 |
480 | u8 flags2; | 383 | u8 flags2; |
481 | #define MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 | 384 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 |
482 | #define MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 | 385 | #define E4_MSTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 0 |
483 | #define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 | 386 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 |
484 | #define MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 | 387 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 1 |
485 | #define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 | 388 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 |
486 | #define MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 | 389 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 2 |
487 | #define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 | 390 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 |
488 | #define MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 | 391 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 3 |
489 | #define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 | 392 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 |
490 | #define MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 | 393 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 4 |
491 | #define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 | 394 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 |
492 | #define MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 | 395 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 5 |
493 | #define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 | 396 | #define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_MASK 0x1 |
494 | #define MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 | 397 | #define E4_MSTORM_FCOE_TASK_AG_CTX_XFER_PLACEMENT_EN_SHIFT 6 |
495 | #define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 | 398 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 |
496 | #define MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 | 399 | #define E4_MSTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 7 |
497 | u8 cleanup_state; | 400 | u8 cleanup_state; |
498 | __le32 received_bytes; | 401 | __le32 received_bytes; |
499 | u8 byte3; | 402 | u8 byte3; |
@@ -507,6 +410,7 @@ struct mstorm_fcoe_task_ag_ctx { | |||
507 | __le32 reg2; | 410 | __le32 reg2; |
508 | }; | 411 | }; |
509 | 412 | ||
413 | /* The fcoe task storm context of Mstorm */ | ||
510 | struct mstorm_fcoe_task_st_ctx { | 414 | struct mstorm_fcoe_task_st_ctx { |
511 | struct regpair rsp_buf_addr; | 415 | struct regpair rsp_buf_addr; |
512 | __le32 rsrv[2]; | 416 | __le32 rsrv[2]; |
@@ -515,79 +419,79 @@ struct mstorm_fcoe_task_st_ctx { | |||
515 | __le32 data_buffer_offset; | 419 | __le32 data_buffer_offset; |
516 | __le16 parent_id; | 420 | __le16 parent_id; |
517 | __le16 flags; | 421 | __le16 flags; |
518 | #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF | 422 | #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF |
519 | #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 | 423 | #define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0 |
520 | #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 | 424 | #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3 |
521 | #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 | 425 | #define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4 |
522 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 | 426 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1 |
523 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 | 427 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6 |
524 | #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 | 428 | #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1 |
525 | #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 | 429 | #define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7 |
526 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 | 430 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3 |
527 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 | 431 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8 |
528 | #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 | 432 | #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1 |
529 | #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 | 433 | #define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10 |
530 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 | 434 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1 |
531 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 | 435 | #define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11 |
532 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 | 436 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1 |
533 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 | 437 | #define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12 |
534 | #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 | 438 | #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1 |
535 | #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 | 439 | #define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13 |
536 | #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 | 440 | #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3 |
537 | #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 | 441 | #define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14 |
538 | struct scsi_cached_sges data_desc; | 442 | struct scsi_cached_sges data_desc; |
539 | }; | 443 | }; |
540 | 444 | ||
541 | struct ustorm_fcoe_task_ag_ctx { | 445 | struct e4_ustorm_fcoe_task_ag_ctx { |
542 | u8 reserved; | 446 | u8 reserved; |
543 | u8 byte1; | 447 | u8 byte1; |
544 | __le16 icid; | 448 | __le16 icid; |
545 | u8 flags0; | 449 | u8 flags0; |
546 | #define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | 450 | #define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF |
547 | #define USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | 451 | #define E4_USTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 |
548 | #define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | 452 | #define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 |
549 | #define USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | 453 | #define E4_USTORM_FCOE_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 |
550 | #define USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 | 454 | #define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_MASK 0x1 |
551 | #define USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 | 455 | #define E4_USTORM_FCOE_TASK_AG_CTX_BIT1_SHIFT 5 |
552 | #define USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 | 456 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF0_MASK 0x3 |
553 | #define USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 | 457 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF0_SHIFT 6 |
554 | u8 flags1; | 458 | u8 flags1; |
555 | #define USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 | 459 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF1_MASK 0x3 |
556 | #define USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 | 460 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF1_SHIFT 0 |
557 | #define USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 | 461 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF2_MASK 0x3 |
558 | #define USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 | 462 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF2_SHIFT 2 |
559 | #define USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 | 463 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF3_MASK 0x3 |
560 | #define USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 | 464 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF3_SHIFT 4 |
561 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 | 465 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 |
562 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 | 466 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 |
563 | u8 flags2; | 467 | u8 flags2; |
564 | #define USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 | 468 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_MASK 0x1 |
565 | #define USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 | 469 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF0EN_SHIFT 0 |
566 | #define USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 | 470 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_MASK 0x1 |
567 | #define USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 | 471 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF1EN_SHIFT 1 |
568 | #define USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 | 472 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_MASK 0x1 |
569 | #define USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 | 473 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF2EN_SHIFT 2 |
570 | #define USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 | 474 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_MASK 0x1 |
571 | #define USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 | 475 | #define E4_USTORM_FCOE_TASK_AG_CTX_CF3EN_SHIFT 3 |
572 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 | 476 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 |
573 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 | 477 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 |
574 | #define USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 | 478 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_MASK 0x1 |
575 | #define USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 | 479 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE0EN_SHIFT 5 |
576 | #define USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 | 480 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_MASK 0x1 |
577 | #define USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 | 481 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE1EN_SHIFT 6 |
578 | #define USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 | 482 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_MASK 0x1 |
579 | #define USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 | 483 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE2EN_SHIFT 7 |
580 | u8 flags3; | 484 | u8 flags3; |
581 | #define USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 | 485 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_MASK 0x1 |
582 | #define USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 | 486 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE3EN_SHIFT 0 |
583 | #define USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 | 487 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_MASK 0x1 |
584 | #define USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 | 488 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE4EN_SHIFT 1 |
585 | #define USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 | 489 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_MASK 0x1 |
586 | #define USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 | 490 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE5EN_SHIFT 2 |
587 | #define USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 | 491 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_MASK 0x1 |
588 | #define USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 | 492 | #define E4_USTORM_FCOE_TASK_AG_CTX_RULE6EN_SHIFT 3 |
589 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF | 493 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF |
590 | #define USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 | 494 | #define E4_USTORM_FCOE_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 |
591 | __le32 dif_err_intervals; | 495 | __le32 dif_err_intervals; |
592 | __le32 dif_error_1st_interval; | 496 | __le32 dif_error_1st_interval; |
593 | __le32 global_cq_num; | 497 | __le32 global_cq_num; |
@@ -596,21 +500,189 @@ struct ustorm_fcoe_task_ag_ctx { | |||
596 | __le32 reg5; | 500 | __le32 reg5; |
597 | }; | 501 | }; |
598 | 502 | ||
599 | struct fcoe_task_context { | 503 | /* FCoE task context */ |
504 | struct e4_fcoe_task_context { | ||
600 | struct ystorm_fcoe_task_st_ctx ystorm_st_context; | 505 | struct ystorm_fcoe_task_st_ctx ystorm_st_context; |
601 | struct regpair ystorm_st_padding[2]; | 506 | struct regpair ystorm_st_padding[2]; |
602 | struct tdif_task_context tdif_context; | 507 | struct tdif_task_context tdif_context; |
603 | struct ystorm_fcoe_task_ag_ctx ystorm_ag_context; | 508 | struct e4_ystorm_fcoe_task_ag_ctx ystorm_ag_context; |
604 | struct tstorm_fcoe_task_ag_ctx tstorm_ag_context; | 509 | struct e4_tstorm_fcoe_task_ag_ctx tstorm_ag_context; |
605 | struct timers_context timer_context; | 510 | struct timers_context timer_context; |
606 | struct tstorm_fcoe_task_st_ctx tstorm_st_context; | 511 | struct tstorm_fcoe_task_st_ctx tstorm_st_context; |
607 | struct regpair tstorm_st_padding[2]; | 512 | struct regpair tstorm_st_padding[2]; |
608 | struct mstorm_fcoe_task_ag_ctx mstorm_ag_context; | 513 | struct e4_mstorm_fcoe_task_ag_ctx mstorm_ag_context; |
609 | struct mstorm_fcoe_task_st_ctx mstorm_st_context; | 514 | struct mstorm_fcoe_task_st_ctx mstorm_st_context; |
610 | struct ustorm_fcoe_task_ag_ctx ustorm_ag_context; | 515 | struct e4_ustorm_fcoe_task_ag_ctx ustorm_ag_context; |
611 | struct rdif_task_context rdif_context; | 516 | struct rdif_task_context rdif_context; |
612 | }; | 517 | }; |
613 | 518 | ||
519 | /* FCoE additional WQE (Sq/XferQ) information */ | ||
520 | union fcoe_additional_info_union { | ||
521 | __le32 previous_tid; | ||
522 | __le32 parent_tid; | ||
523 | __le32 burst_length; | ||
524 | __le32 seq_rec_updated_offset; | ||
525 | }; | ||
526 | |||
527 | /* FCoE Ramrod Command IDs */ | ||
528 | enum fcoe_completion_status { | ||
529 | FCOE_COMPLETION_STATUS_SUCCESS, | ||
530 | FCOE_COMPLETION_STATUS_FCOE_VER_ERR, | ||
531 | FCOE_COMPLETION_STATUS_SRC_MAC_ADD_ARR_ERR, | ||
532 | MAX_FCOE_COMPLETION_STATUS | ||
533 | }; | ||
534 | |||
535 | /* FC address (SID/DID) network presentation */ | ||
536 | struct fc_addr_nw { | ||
537 | u8 addr_lo; | ||
538 | u8 addr_mid; | ||
539 | u8 addr_hi; | ||
540 | }; | ||
541 | |||
542 | /* FCoE connection offload */ | ||
543 | struct fcoe_conn_offload_ramrod_data { | ||
544 | struct regpair sq_pbl_addr; | ||
545 | struct regpair sq_curr_page_addr; | ||
546 | struct regpair sq_next_page_addr; | ||
547 | struct regpair xferq_pbl_addr; | ||
548 | struct regpair xferq_curr_page_addr; | ||
549 | struct regpair xferq_next_page_addr; | ||
550 | struct regpair respq_pbl_addr; | ||
551 | struct regpair respq_curr_page_addr; | ||
552 | struct regpair respq_next_page_addr; | ||
553 | __le16 dst_mac_addr_lo; | ||
554 | __le16 dst_mac_addr_mid; | ||
555 | __le16 dst_mac_addr_hi; | ||
556 | __le16 src_mac_addr_lo; | ||
557 | __le16 src_mac_addr_mid; | ||
558 | __le16 src_mac_addr_hi; | ||
559 | __le16 tx_max_fc_pay_len; | ||
560 | __le16 e_d_tov_timer_val; | ||
561 | __le16 rx_max_fc_pay_len; | ||
562 | __le16 vlan_tag; | ||
563 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_MASK 0xFFF | ||
564 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_VLAN_ID_SHIFT 0 | ||
565 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_MASK 0x1 | ||
566 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_CFI_SHIFT 12 | ||
567 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_MASK 0x7 | ||
568 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_PRIORITY_SHIFT 13 | ||
569 | __le16 physical_q0; | ||
570 | __le16 rec_rr_tov_timer_val; | ||
571 | struct fc_addr_nw s_id; | ||
572 | u8 max_conc_seqs_c3; | ||
573 | struct fc_addr_nw d_id; | ||
574 | u8 flags; | ||
575 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_MASK 0x1 | ||
576 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONT_INCR_SEQ_CNT_SHIFT 0 | ||
577 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_MASK 0x1 | ||
578 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_CONF_REQ_SHIFT 1 | ||
579 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_MASK 0x1 | ||
580 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_REC_VALID_SHIFT 2 | ||
581 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_MASK 0x1 | ||
582 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_VLAN_FLAG_SHIFT 3 | ||
583 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_MASK 0x1 | ||
584 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_B_SINGLE_VLAN_SHIFT 4 | ||
585 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_MASK 0x3 | ||
586 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_MODE_SHIFT 5 | ||
587 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_MASK 0x1 | ||
588 | #define FCOE_CONN_OFFLOAD_RAMROD_DATA_RESERVED0_SHIFT 7 | ||
589 | __le16 conn_id; | ||
590 | u8 def_q_idx; | ||
591 | u8 reserved[5]; | ||
592 | }; | ||
593 | |||
594 | /* FCoE terminate connection request */ | ||
595 | struct fcoe_conn_terminate_ramrod_data { | ||
596 | struct regpair terminate_params_addr; | ||
597 | }; | ||
598 | |||
599 | /* FCoE device type */ | ||
600 | enum fcoe_device_type { | ||
601 | FCOE_TASK_DEV_TYPE_DISK, | ||
602 | FCOE_TASK_DEV_TYPE_TAPE, | ||
603 | MAX_FCOE_DEVICE_TYPE | ||
604 | }; | ||
605 | |||
606 | /* Data sgl */ | ||
607 | struct fcoe_fast_sgl_ctx { | ||
608 | struct regpair sgl_start_addr; | ||
609 | __le32 sgl_byte_offset; | ||
610 | __le16 task_reuse_cnt; | ||
611 | __le16 init_offset_in_first_sge; | ||
612 | }; | ||
613 | |||
614 | /* FCoE firmware function init */ | ||
615 | struct fcoe_init_func_ramrod_data { | ||
616 | struct scsi_init_func_params func_params; | ||
617 | struct scsi_init_func_queues q_params; | ||
618 | __le16 mtu; | ||
619 | __le16 sq_num_pages_in_pbl; | ||
620 | __le32 reserved[3]; | ||
621 | }; | ||
622 | |||
623 | /* FCoE: Mode of the connection: Target or Initiator or both */ | ||
624 | enum fcoe_mode_type { | ||
625 | FCOE_INITIATOR_MODE = 0x0, | ||
626 | FCOE_TARGET_MODE = 0x1, | ||
627 | FCOE_BOTH_OR_NOT_CHOSEN = 0x3, | ||
628 | MAX_FCOE_MODE_TYPE | ||
629 | }; | ||
630 | |||
631 | /* Per PF FCoE receive path statistics - tStorm RAM structure */ | ||
632 | struct fcoe_rx_stat { | ||
633 | struct regpair fcoe_rx_byte_cnt; | ||
634 | struct regpair fcoe_rx_data_pkt_cnt; | ||
635 | struct regpair fcoe_rx_xfer_pkt_cnt; | ||
636 | struct regpair fcoe_rx_other_pkt_cnt; | ||
637 | __le32 fcoe_silent_drop_pkt_cmdq_full_cnt; | ||
638 | __le32 fcoe_silent_drop_pkt_rq_full_cnt; | ||
639 | __le32 fcoe_silent_drop_pkt_crc_error_cnt; | ||
640 | __le32 fcoe_silent_drop_pkt_task_invalid_cnt; | ||
641 | __le32 fcoe_silent_drop_total_pkt_cnt; | ||
642 | __le32 rsrv; | ||
643 | }; | ||
644 | |||
645 | /* FCoE SQE request type */ | ||
646 | enum fcoe_sqe_request_type { | ||
647 | SEND_FCOE_CMD, | ||
648 | SEND_FCOE_MIDPATH, | ||
649 | SEND_FCOE_ABTS_REQUEST, | ||
650 | FCOE_EXCHANGE_CLEANUP, | ||
651 | FCOE_SEQUENCE_RECOVERY, | ||
652 | SEND_FCOE_XFER_RDY, | ||
653 | SEND_FCOE_RSP, | ||
654 | SEND_FCOE_RSP_WITH_SENSE_DATA, | ||
655 | SEND_FCOE_TARGET_DATA, | ||
656 | SEND_FCOE_INITIATOR_DATA, | ||
657 | SEND_FCOE_XFER_CONTINUATION_RDY, | ||
658 | SEND_FCOE_TARGET_ABTS_RSP, | ||
659 | MAX_FCOE_SQE_REQUEST_TYPE | ||
660 | }; | ||
661 | |||
662 | /* FCoe statistics request */ | ||
663 | struct fcoe_stat_ramrod_data { | ||
664 | struct regpair stat_params_addr; | ||
665 | }; | ||
666 | |||
667 | /* FCoE task type */ | ||
668 | enum fcoe_task_type { | ||
669 | FCOE_TASK_TYPE_WRITE_INITIATOR, | ||
670 | FCOE_TASK_TYPE_READ_INITIATOR, | ||
671 | FCOE_TASK_TYPE_MIDPATH, | ||
672 | FCOE_TASK_TYPE_UNSOLICITED, | ||
673 | FCOE_TASK_TYPE_ABTS, | ||
674 | FCOE_TASK_TYPE_EXCHANGE_CLEANUP, | ||
675 | FCOE_TASK_TYPE_SEQUENCE_CLEANUP, | ||
676 | FCOE_TASK_TYPE_WRITE_TARGET, | ||
677 | FCOE_TASK_TYPE_READ_TARGET, | ||
678 | FCOE_TASK_TYPE_RSP, | ||
679 | FCOE_TASK_TYPE_RSP_SENSE_DATA, | ||
680 | FCOE_TASK_TYPE_ABTS_TARGET, | ||
681 | FCOE_TASK_TYPE_ENUM_SIZE, | ||
682 | MAX_FCOE_TASK_TYPE | ||
683 | }; | ||
684 | |||
685 | /* Per PF FCoE transmit path statistics - pStorm RAM structure */ | ||
614 | struct fcoe_tx_stat { | 686 | struct fcoe_tx_stat { |
615 | struct regpair fcoe_tx_byte_cnt; | 687 | struct regpair fcoe_tx_byte_cnt; |
616 | struct regpair fcoe_tx_data_pkt_cnt; | 688 | struct regpair fcoe_tx_data_pkt_cnt; |
@@ -618,51 +690,55 @@ struct fcoe_tx_stat { | |||
618 | struct regpair fcoe_tx_other_pkt_cnt; | 690 | struct regpair fcoe_tx_other_pkt_cnt; |
619 | }; | 691 | }; |
620 | 692 | ||
693 | /* FCoE SQ/XferQ element */ | ||
621 | struct fcoe_wqe { | 694 | struct fcoe_wqe { |
622 | __le16 task_id; | 695 | __le16 task_id; |
623 | __le16 flags; | 696 | __le16 flags; |
624 | #define FCOE_WQE_REQ_TYPE_MASK 0xF | 697 | #define FCOE_WQE_REQ_TYPE_MASK 0xF |
625 | #define FCOE_WQE_REQ_TYPE_SHIFT 0 | 698 | #define FCOE_WQE_REQ_TYPE_SHIFT 0 |
626 | #define FCOE_WQE_SGL_MODE_MASK 0x1 | 699 | #define FCOE_WQE_SGL_MODE_MASK 0x1 |
627 | #define FCOE_WQE_SGL_MODE_SHIFT 4 | 700 | #define FCOE_WQE_SGL_MODE_SHIFT 4 |
628 | #define FCOE_WQE_CONTINUATION_MASK 0x1 | 701 | #define FCOE_WQE_CONTINUATION_MASK 0x1 |
629 | #define FCOE_WQE_CONTINUATION_SHIFT 5 | 702 | #define FCOE_WQE_CONTINUATION_SHIFT 5 |
630 | #define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 | 703 | #define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1 |
631 | #define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 | 704 | #define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6 |
632 | #define FCOE_WQE_RESERVED_MASK 0x1 | 705 | #define FCOE_WQE_RESERVED_MASK 0x1 |
633 | #define FCOE_WQE_RESERVED_SHIFT 7 | 706 | #define FCOE_WQE_RESERVED_SHIFT 7 |
634 | #define FCOE_WQE_NUM_SGES_MASK 0xF | 707 | #define FCOE_WQE_NUM_SGES_MASK 0xF |
635 | #define FCOE_WQE_NUM_SGES_SHIFT 8 | 708 | #define FCOE_WQE_NUM_SGES_SHIFT 8 |
636 | #define FCOE_WQE_RESERVED1_MASK 0xF | 709 | #define FCOE_WQE_RESERVED1_MASK 0xF |
637 | #define FCOE_WQE_RESERVED1_SHIFT 12 | 710 | #define FCOE_WQE_RESERVED1_SHIFT 12 |
638 | union fcoe_additional_info_union additional_info_union; | 711 | union fcoe_additional_info_union additional_info_union; |
639 | }; | 712 | }; |
640 | 713 | ||
714 | /* FCoE XFRQ element */ | ||
641 | struct xfrqe_prot_flags { | 715 | struct xfrqe_prot_flags { |
642 | u8 flags; | 716 | u8 flags; |
643 | #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF | 717 | #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF |
644 | #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 | 718 | #define XFRQE_PROT_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 |
645 | #define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 | 719 | #define XFRQE_PROT_FLAGS_DIF_TO_PEER_MASK 0x1 |
646 | #define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 | 720 | #define XFRQE_PROT_FLAGS_DIF_TO_PEER_SHIFT 4 |
647 | #define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 | 721 | #define XFRQE_PROT_FLAGS_HOST_INTERFACE_MASK 0x3 |
648 | #define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 | 722 | #define XFRQE_PROT_FLAGS_HOST_INTERFACE_SHIFT 5 |
649 | #define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 | 723 | #define XFRQE_PROT_FLAGS_RESERVED_MASK 0x1 |
650 | #define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 | 724 | #define XFRQE_PROT_FLAGS_RESERVED_SHIFT 7 |
651 | }; | 725 | }; |
652 | 726 | ||
727 | /* FCoE doorbell data */ | ||
653 | struct fcoe_db_data { | 728 | struct fcoe_db_data { |
654 | u8 params; | 729 | u8 params; |
655 | #define FCOE_DB_DATA_DEST_MASK 0x3 | 730 | #define FCOE_DB_DATA_DEST_MASK 0x3 |
656 | #define FCOE_DB_DATA_DEST_SHIFT 0 | 731 | #define FCOE_DB_DATA_DEST_SHIFT 0 |
657 | #define FCOE_DB_DATA_AGG_CMD_MASK 0x3 | 732 | #define FCOE_DB_DATA_AGG_CMD_MASK 0x3 |
658 | #define FCOE_DB_DATA_AGG_CMD_SHIFT 2 | 733 | #define FCOE_DB_DATA_AGG_CMD_SHIFT 2 |
659 | #define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 | 734 | #define FCOE_DB_DATA_BYPASS_EN_MASK 0x1 |
660 | #define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 | 735 | #define FCOE_DB_DATA_BYPASS_EN_SHIFT 4 |
661 | #define FCOE_DB_DATA_RESERVED_MASK 0x1 | 736 | #define FCOE_DB_DATA_RESERVED_MASK 0x1 |
662 | #define FCOE_DB_DATA_RESERVED_SHIFT 5 | 737 | #define FCOE_DB_DATA_RESERVED_SHIFT 5 |
663 | #define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 | 738 | #define FCOE_DB_DATA_AGG_VAL_SEL_MASK 0x3 |
664 | #define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 | 739 | #define FCOE_DB_DATA_AGG_VAL_SEL_SHIFT 6 |
665 | u8 agg_flags; | 740 | u8 agg_flags; |
666 | __le16 sq_prod; | 741 | __le16 sq_prod; |
667 | }; | 742 | }; |
743 | |||
668 | #endif /* __FCOE_COMMON__ */ | 744 | #endif /* __FCOE_COMMON__ */ |
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h index 85e086cba639..4cc9b37b8d95 100644 --- a/include/linux/qed/iscsi_common.h +++ b/include/linux/qed/iscsi_common.h | |||
@@ -32,47 +32,48 @@ | |||
32 | 32 | ||
33 | #ifndef __ISCSI_COMMON__ | 33 | #ifndef __ISCSI_COMMON__ |
34 | #define __ISCSI_COMMON__ | 34 | #define __ISCSI_COMMON__ |
35 | |||
35 | /**********************/ | 36 | /**********************/ |
36 | /* ISCSI FW CONSTANTS */ | 37 | /* ISCSI FW CONSTANTS */ |
37 | /**********************/ | 38 | /**********************/ |
38 | 39 | ||
39 | /* iSCSI HSI constants */ | 40 | /* iSCSI HSI constants */ |
40 | #define ISCSI_DEFAULT_MTU (1500) | 41 | #define ISCSI_DEFAULT_MTU (1500) |
41 | 42 | ||
42 | /* KWQ (kernel work queue) layer codes */ | 43 | /* KWQ (kernel work queue) layer codes */ |
43 | #define ISCSI_SLOW_PATH_LAYER_CODE (6) | 44 | #define ISCSI_SLOW_PATH_LAYER_CODE (6) |
44 | 45 | ||
45 | /* iSCSI parameter defaults */ | 46 | /* iSCSI parameter defaults */ |
46 | #define ISCSI_DEFAULT_HEADER_DIGEST (0) | 47 | #define ISCSI_DEFAULT_HEADER_DIGEST (0) |
47 | #define ISCSI_DEFAULT_DATA_DIGEST (0) | 48 | #define ISCSI_DEFAULT_DATA_DIGEST (0) |
48 | #define ISCSI_DEFAULT_INITIAL_R2T (1) | 49 | #define ISCSI_DEFAULT_INITIAL_R2T (1) |
49 | #define ISCSI_DEFAULT_IMMEDIATE_DATA (1) | 50 | #define ISCSI_DEFAULT_IMMEDIATE_DATA (1) |
50 | #define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) | 51 | #define ISCSI_DEFAULT_MAX_PDU_LENGTH (0x2000) |
51 | #define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) | 52 | #define ISCSI_DEFAULT_FIRST_BURST_LENGTH (0x10000) |
52 | #define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) | 53 | #define ISCSI_DEFAULT_MAX_BURST_LENGTH (0x40000) |
53 | #define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) | 54 | #define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1) |
54 | 55 | ||
55 | /* iSCSI parameter limits */ | 56 | /* iSCSI parameter limits */ |
56 | #define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) | 57 | #define ISCSI_MIN_VAL_MAX_PDU_LENGTH (0x200) |
57 | #define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) | 58 | #define ISCSI_MAX_VAL_MAX_PDU_LENGTH (0xffffff) |
58 | #define ISCSI_MIN_VAL_BURST_LENGTH (0x200) | 59 | #define ISCSI_MIN_VAL_BURST_LENGTH (0x200) |
59 | #define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) | 60 | #define ISCSI_MAX_VAL_BURST_LENGTH (0xffffff) |
60 | #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) | 61 | #define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1) |
61 | #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) | 62 | #define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff) |
62 | 63 | ||
63 | #define ISCSI_AHS_CNTL_SIZE 4 | 64 | #define ISCSI_AHS_CNTL_SIZE 4 |
64 | 65 | ||
65 | #define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) | 66 | #define ISCSI_WQE_NUM_SGES_SLOWIO (0xf) |
66 | 67 | ||
67 | /* iSCSI reserved params */ | 68 | /* iSCSI reserved params */ |
68 | #define ISCSI_ITT_ALL_ONES (0xffffffff) | 69 | #define ISCSI_ITT_ALL_ONES (0xffffffff) |
69 | #define ISCSI_TTT_ALL_ONES (0xffffffff) | 70 | #define ISCSI_TTT_ALL_ONES (0xffffffff) |
70 | 71 | ||
71 | #define ISCSI_OPTION_1_OFF_CHIP_TCP 1 | 72 | #define ISCSI_OPTION_1_OFF_CHIP_TCP 1 |
72 | #define ISCSI_OPTION_2_ON_CHIP_TCP 2 | 73 | #define ISCSI_OPTION_2_ON_CHIP_TCP 2 |
73 | 74 | ||
74 | #define ISCSI_INITIATOR_MODE 0 | 75 | #define ISCSI_INITIATOR_MODE 0 |
75 | #define ISCSI_TARGET_MODE 1 | 76 | #define ISCSI_TARGET_MODE 1 |
76 | 77 | ||
77 | /* iSCSI request op codes */ | 78 | /* iSCSI request op codes */ |
78 | #define ISCSI_OPCODE_NOP_OUT (0) | 79 | #define ISCSI_OPCODE_NOP_OUT (0) |
@@ -84,41 +85,48 @@ | |||
84 | #define ISCSI_OPCODE_LOGOUT_REQUEST (6) | 85 | #define ISCSI_OPCODE_LOGOUT_REQUEST (6) |
85 | 86 | ||
86 | /* iSCSI response/messages op codes */ | 87 | /* iSCSI response/messages op codes */ |
87 | #define ISCSI_OPCODE_NOP_IN (0x20) | 88 | #define ISCSI_OPCODE_NOP_IN (0x20) |
88 | #define ISCSI_OPCODE_SCSI_RESPONSE (0x21) | 89 | #define ISCSI_OPCODE_SCSI_RESPONSE (0x21) |
89 | #define ISCSI_OPCODE_TMF_RESPONSE (0x22) | 90 | #define ISCSI_OPCODE_TMF_RESPONSE (0x22) |
90 | #define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) | 91 | #define ISCSI_OPCODE_LOGIN_RESPONSE (0x23) |
91 | #define ISCSI_OPCODE_TEXT_RESPONSE (0x24) | 92 | #define ISCSI_OPCODE_TEXT_RESPONSE (0x24) |
92 | #define ISCSI_OPCODE_DATA_IN (0x25) | 93 | #define ISCSI_OPCODE_DATA_IN (0x25) |
93 | #define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) | 94 | #define ISCSI_OPCODE_LOGOUT_RESPONSE (0x26) |
94 | #define ISCSI_OPCODE_R2T (0x31) | 95 | #define ISCSI_OPCODE_R2T (0x31) |
95 | #define ISCSI_OPCODE_ASYNC_MSG (0x32) | 96 | #define ISCSI_OPCODE_ASYNC_MSG (0x32) |
96 | #define ISCSI_OPCODE_REJECT (0x3f) | 97 | #define ISCSI_OPCODE_REJECT (0x3f) |
97 | 98 | ||
98 | /* iSCSI stages */ | 99 | /* iSCSI stages */ |
99 | #define ISCSI_STAGE_SECURITY_NEGOTIATION (0) | 100 | #define ISCSI_STAGE_SECURITY_NEGOTIATION (0) |
100 | #define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) | 101 | #define ISCSI_STAGE_LOGIN_OPERATIONAL_NEGOTIATION (1) |
101 | #define ISCSI_STAGE_FULL_FEATURE_PHASE (3) | 102 | #define ISCSI_STAGE_FULL_FEATURE_PHASE (3) |
102 | 103 | ||
103 | /* iSCSI CQE errors */ | 104 | /* iSCSI CQE errors */ |
104 | #define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) | 105 | #define CQE_ERROR_BITMAP_DATA_DIGEST (0x08) |
105 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) | 106 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN (0x10) |
106 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) | 107 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED (0x20) |
108 | |||
109 | /* Union of data bd_opaque/ tq_tid */ | ||
110 | union bd_opaque_tq_union { | ||
111 | __le16 bd_opaque; | ||
112 | __le16 tq_tid; | ||
113 | }; | ||
107 | 114 | ||
115 | /* ISCSI SGL entry */ | ||
108 | struct cqe_error_bitmap { | 116 | struct cqe_error_bitmap { |
109 | u8 cqe_error_status_bits; | 117 | u8 cqe_error_status_bits; |
110 | #define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 | 118 | #define CQE_ERROR_BITMAP_DIF_ERR_BITS_MASK 0x7 |
111 | #define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 | 119 | #define CQE_ERROR_BITMAP_DIF_ERR_BITS_SHIFT 0 |
112 | #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 | 120 | #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_MASK 0x1 |
113 | #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 | 121 | #define CQE_ERROR_BITMAP_DATA_DIGEST_ERR_SHIFT 3 |
114 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 | 122 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_MASK 0x1 |
115 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 | 123 | #define CQE_ERROR_BITMAP_RCV_ON_INVALID_CONN_SHIFT 4 |
116 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 | 124 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_MASK 0x1 |
117 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 | 125 | #define CQE_ERROR_BITMAP_DATA_TRUNCATED_ERR_SHIFT 5 |
118 | #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 | 126 | #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_MASK 0x1 |
119 | #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 | 127 | #define CQE_ERROR_BITMAP_UNDER_RUN_ERR_SHIFT 6 |
120 | #define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 | 128 | #define CQE_ERROR_BITMAP_RESERVED2_MASK 0x1 |
121 | #define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 | 129 | #define CQE_ERROR_BITMAP_RESERVED2_SHIFT 7 |
122 | }; | 130 | }; |
123 | 131 | ||
124 | union cqe_error_status { | 132 | union cqe_error_status { |
@@ -126,86 +134,133 @@ union cqe_error_status { | |||
126 | struct cqe_error_bitmap error_bits; | 134 | struct cqe_error_bitmap error_bits; |
127 | }; | 135 | }; |
128 | 136 | ||
137 | /* iSCSI Login Response PDU header */ | ||
129 | struct data_hdr { | 138 | struct data_hdr { |
130 | __le32 data[12]; | 139 | __le32 data[12]; |
131 | }; | 140 | }; |
132 | 141 | ||
133 | struct iscsi_async_msg_hdr { | 142 | struct lun_mapper_addr_reserved { |
134 | __le16 reserved0; | 143 | struct regpair lun_mapper_addr; |
135 | u8 flags_attr; | 144 | u8 reserved0[8]; |
136 | #define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F | 145 | }; |
137 | #define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 | 146 | |
138 | #define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 | 147 | /* rdif conetxt for dif on immediate */ |
139 | #define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 | 148 | struct dif_on_immediate_params { |
140 | u8 opcode; | 149 | __le32 initial_ref_tag; |
141 | __le32 hdr_second_dword; | 150 | __le16 application_tag; |
142 | #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 151 | __le16 application_tag_mask; |
143 | #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 | 152 | __le16 flags1; |
144 | #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF | 153 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_MASK 0x1 |
145 | #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 | 154 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_GUARD_SHIFT 0 |
146 | struct regpair lun; | 155 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_MASK 0x1 |
147 | __le32 all_ones; | 156 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_APP_TAG_SHIFT 1 |
148 | __le32 reserved1; | 157 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_MASK 0x1 |
149 | __le32 stat_sn; | 158 | #define DIF_ON_IMMEDIATE_PARAMS_VALIDATE_REF_TAG_SHIFT 2 |
150 | __le32 exp_cmd_sn; | 159 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_MASK 0x1 |
151 | __le32 max_cmd_sn; | 160 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_GUARD_SHIFT 3 |
152 | __le16 param1_rsrv; | 161 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_MASK 0x1 |
153 | u8 async_vcode; | 162 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_SHIFT 4 |
154 | u8 async_event; | 163 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_MASK 0x1 |
155 | __le16 param3_rsrv; | 164 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_SHIFT 5 |
156 | __le16 param2_rsrv; | 165 | #define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_MASK 0x1 |
157 | __le32 reserved7; | 166 | #define DIF_ON_IMMEDIATE_PARAMS_INTERVAL_SIZE_SHIFT 6 |
167 | #define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_MASK 0x1 | ||
168 | #define DIF_ON_IMMEDIATE_PARAMS_NETWORK_INTERFACE_SHIFT 7 | ||
169 | #define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_MASK 0x3 | ||
170 | #define DIF_ON_IMMEDIATE_PARAMS_HOST_INTERFACE_SHIFT 8 | ||
171 | #define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_MASK 0xF | ||
172 | #define DIF_ON_IMMEDIATE_PARAMS_REF_TAG_MASK_SHIFT 10 | ||
173 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_MASK 0x1 | ||
174 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_APP_TAG_WITH_MASK_SHIFT 14 | ||
175 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_MASK 0x1 | ||
176 | #define DIF_ON_IMMEDIATE_PARAMS_FORWARD_REF_TAG_WITH_MASK_SHIFT 15 | ||
177 | u8 flags0; | ||
178 | #define DIF_ON_IMMEDIATE_PARAMS_RESERVED_MASK 0x1 | ||
179 | #define DIF_ON_IMMEDIATE_PARAMS_RESERVED_SHIFT 0 | ||
180 | #define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_MASK 0x1 | ||
181 | #define DIF_ON_IMMEDIATE_PARAMS_IGNORE_APP_TAG_SHIFT 1 | ||
182 | #define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_MASK 0x1 | ||
183 | #define DIF_ON_IMMEDIATE_PARAMS_INITIAL_REF_TAG_IS_VALID_SHIFT 2 | ||
184 | #define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_MASK 0x1 | ||
185 | #define DIF_ON_IMMEDIATE_PARAMS_HOST_GUARD_TYPE_SHIFT 3 | ||
186 | #define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_MASK 0x3 | ||
187 | #define DIF_ON_IMMEDIATE_PARAMS_PROTECTION_TYPE_SHIFT 4 | ||
188 | #define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_MASK 0x1 | ||
189 | #define DIF_ON_IMMEDIATE_PARAMS_CRC_SEED_SHIFT 6 | ||
190 | #define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_MASK 0x1 | ||
191 | #define DIF_ON_IMMEDIATE_PARAMS_KEEP_REF_TAG_CONST_SHIFT 7 | ||
192 | u8 reserved_zero[5]; | ||
193 | }; | ||
194 | |||
195 | /* iSCSI dif on immediate mode attributes union */ | ||
196 | union dif_configuration_params { | ||
197 | struct lun_mapper_addr_reserved lun_mapper_address; | ||
198 | struct dif_on_immediate_params def_dif_conf; | ||
199 | }; | ||
200 | |||
201 | /* Union of data/r2t sequence number */ | ||
202 | union iscsi_seq_num { | ||
203 | __le16 data_sn; | ||
204 | __le16 r2t_sn; | ||
158 | }; | 205 | }; |
159 | 206 | ||
160 | struct iscsi_cmd_hdr { | 207 | /* iSCSI DIF flags */ |
161 | __le16 reserved1; | 208 | struct iscsi_dif_flags { |
162 | u8 flags_attr; | 209 | u8 flags; |
163 | #define ISCSI_CMD_HDR_ATTR_MASK 0x7 | 210 | #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF |
164 | #define ISCSI_CMD_HDR_ATTR_SHIFT 0 | 211 | #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 |
165 | #define ISCSI_CMD_HDR_RSRV_MASK 0x3 | 212 | #define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 |
166 | #define ISCSI_CMD_HDR_RSRV_SHIFT 3 | 213 | #define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 |
167 | #define ISCSI_CMD_HDR_WRITE_MASK 0x1 | 214 | #define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 |
168 | #define ISCSI_CMD_HDR_WRITE_SHIFT 5 | 215 | #define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 |
169 | #define ISCSI_CMD_HDR_READ_MASK 0x1 | ||
170 | #define ISCSI_CMD_HDR_READ_SHIFT 6 | ||
171 | #define ISCSI_CMD_HDR_FINAL_MASK 0x1 | ||
172 | #define ISCSI_CMD_HDR_FINAL_SHIFT 7 | ||
173 | u8 hdr_first_byte; | ||
174 | #define ISCSI_CMD_HDR_OPCODE_MASK 0x3F | ||
175 | #define ISCSI_CMD_HDR_OPCODE_SHIFT 0 | ||
176 | #define ISCSI_CMD_HDR_IMM_MASK 0x1 | ||
177 | #define ISCSI_CMD_HDR_IMM_SHIFT 6 | ||
178 | #define ISCSI_CMD_HDR_RSRV1_MASK 0x1 | ||
179 | #define ISCSI_CMD_HDR_RSRV1_SHIFT 7 | ||
180 | __le32 hdr_second_dword; | ||
181 | #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | ||
182 | #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 | ||
183 | #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF | ||
184 | #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 | ||
185 | struct regpair lun; | ||
186 | __le32 itt; | ||
187 | __le32 expected_transfer_length; | ||
188 | __le32 cmd_sn; | ||
189 | __le32 exp_stat_sn; | ||
190 | __le32 cdb[4]; | ||
191 | }; | 216 | }; |
192 | 217 | ||
218 | /* The iscsi storm task context of Ystorm */ | ||
219 | struct ystorm_iscsi_task_state { | ||
220 | struct scsi_cached_sges data_desc; | ||
221 | struct scsi_sgl_params sgl_params; | ||
222 | __le32 exp_r2t_sn; | ||
223 | __le32 buffer_offset; | ||
224 | union iscsi_seq_num seq_num; | ||
225 | struct iscsi_dif_flags dif_flags; | ||
226 | u8 flags; | ||
227 | #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 | ||
228 | #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 | ||
229 | #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 | ||
230 | #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 | ||
231 | #define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_MASK 0x1 | ||
232 | #define YSTORM_ISCSI_TASK_STATE_SET_DIF_OFFSET_SHIFT 2 | ||
233 | #define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x1F | ||
234 | #define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 3 | ||
235 | }; | ||
236 | |||
237 | /* The iscsi storm task context of Ystorm */ | ||
238 | struct ystorm_iscsi_task_rxmit_opt { | ||
239 | __le32 fast_rxmit_sge_offset; | ||
240 | __le32 scan_start_buffer_offset; | ||
241 | __le32 fast_rxmit_buffer_offset; | ||
242 | u8 scan_start_sgl_index; | ||
243 | u8 fast_rxmit_sgl_index; | ||
244 | __le16 reserved; | ||
245 | }; | ||
246 | |||
247 | /* iSCSI Common PDU header */ | ||
193 | struct iscsi_common_hdr { | 248 | struct iscsi_common_hdr { |
194 | u8 hdr_status; | 249 | u8 hdr_status; |
195 | u8 hdr_response; | 250 | u8 hdr_response; |
196 | u8 hdr_flags; | 251 | u8 hdr_flags; |
197 | u8 hdr_first_byte; | 252 | u8 hdr_first_byte; |
198 | #define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F | 253 | #define ISCSI_COMMON_HDR_OPCODE_MASK 0x3F |
199 | #define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 | 254 | #define ISCSI_COMMON_HDR_OPCODE_SHIFT 0 |
200 | #define ISCSI_COMMON_HDR_IMM_MASK 0x1 | 255 | #define ISCSI_COMMON_HDR_IMM_MASK 0x1 |
201 | #define ISCSI_COMMON_HDR_IMM_SHIFT 6 | 256 | #define ISCSI_COMMON_HDR_IMM_SHIFT 6 |
202 | #define ISCSI_COMMON_HDR_RSRV_MASK 0x1 | 257 | #define ISCSI_COMMON_HDR_RSRV_MASK 0x1 |
203 | #define ISCSI_COMMON_HDR_RSRV_SHIFT 7 | 258 | #define ISCSI_COMMON_HDR_RSRV_SHIFT 7 |
204 | __le32 hdr_second_dword; | 259 | __le32 hdr_second_dword; |
205 | #define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 260 | #define ISCSI_COMMON_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
206 | #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 | 261 | #define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0 |
207 | #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF | 262 | #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF |
208 | #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 | 263 | #define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24 |
209 | struct regpair lun_reserved; | 264 | struct regpair lun_reserved; |
210 | __le32 itt; | 265 | __le32 itt; |
211 | __le32 ttt; | 266 | __le32 ttt; |
@@ -215,86 +270,60 @@ struct iscsi_common_hdr { | |||
215 | __le32 data[3]; | 270 | __le32 data[3]; |
216 | }; | 271 | }; |
217 | 272 | ||
218 | struct iscsi_conn_offload_params { | 273 | /* iSCSI Command PDU header */ |
219 | struct regpair sq_pbl_addr; | 274 | struct iscsi_cmd_hdr { |
220 | struct regpair r2tq_pbl_addr; | 275 | __le16 reserved1; |
221 | struct regpair xhq_pbl_addr; | 276 | u8 flags_attr; |
222 | struct regpair uhq_pbl_addr; | 277 | #define ISCSI_CMD_HDR_ATTR_MASK 0x7 |
223 | __le32 initial_ack; | 278 | #define ISCSI_CMD_HDR_ATTR_SHIFT 0 |
224 | __le16 physical_q0; | 279 | #define ISCSI_CMD_HDR_RSRV_MASK 0x3 |
225 | __le16 physical_q1; | 280 | #define ISCSI_CMD_HDR_RSRV_SHIFT 3 |
226 | u8 flags; | 281 | #define ISCSI_CMD_HDR_WRITE_MASK 0x1 |
227 | #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 | 282 | #define ISCSI_CMD_HDR_WRITE_SHIFT 5 |
228 | #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 | 283 | #define ISCSI_CMD_HDR_READ_MASK 0x1 |
229 | #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 | 284 | #define ISCSI_CMD_HDR_READ_SHIFT 6 |
230 | #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 | 285 | #define ISCSI_CMD_HDR_FINAL_MASK 0x1 |
231 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 | 286 | #define ISCSI_CMD_HDR_FINAL_SHIFT 7 |
232 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 | 287 | u8 hdr_first_byte; |
233 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F | 288 | #define ISCSI_CMD_HDR_OPCODE_MASK 0x3F |
234 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 | 289 | #define ISCSI_CMD_HDR_OPCODE_SHIFT 0 |
235 | u8 pbl_page_size_log; | 290 | #define ISCSI_CMD_HDR_IMM_MASK 0x1 |
236 | u8 pbe_page_size_log; | 291 | #define ISCSI_CMD_HDR_IMM_SHIFT 6 |
237 | u8 default_cq; | 292 | #define ISCSI_CMD_HDR_RSRV1_MASK 0x1 |
238 | __le32 stat_sn; | 293 | #define ISCSI_CMD_HDR_RSRV1_SHIFT 7 |
239 | }; | 294 | __le32 hdr_second_dword; |
240 | 295 | #define ISCSI_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | |
241 | struct iscsi_slow_path_hdr { | 296 | #define ISCSI_CMD_HDR_DATA_SEG_LEN_SHIFT 0 |
242 | u8 op_code; | 297 | #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_MASK 0xFF |
243 | u8 flags; | 298 | #define ISCSI_CMD_HDR_TOTAL_AHS_LEN_SHIFT 24 |
244 | #define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF | 299 | struct regpair lun; |
245 | #define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 | 300 | __le32 itt; |
246 | #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 | 301 | __le32 expected_transfer_length; |
247 | #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 | 302 | __le32 cmd_sn; |
248 | #define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 | ||
249 | #define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 | ||
250 | }; | ||
251 | |||
252 | struct iscsi_conn_update_ramrod_params { | ||
253 | struct iscsi_slow_path_hdr hdr; | ||
254 | __le16 conn_id; | ||
255 | __le32 fw_cid; | ||
256 | u8 flags; | ||
257 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 | ||
258 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 | ||
259 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 | ||
260 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 | ||
261 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 | ||
262 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 | ||
263 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 | ||
264 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 | ||
265 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 | ||
266 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 | ||
267 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 | ||
268 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 | ||
269 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3 | ||
270 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6 | ||
271 | u8 reserved0[3]; | ||
272 | __le32 max_seq_size; | ||
273 | __le32 max_send_pdu_length; | ||
274 | __le32 max_recv_pdu_length; | ||
275 | __le32 first_seq_length; | ||
276 | __le32 exp_stat_sn; | 303 | __le32 exp_stat_sn; |
304 | __le32 cdb[4]; | ||
277 | }; | 305 | }; |
278 | 306 | ||
307 | /* iSCSI Command PDU header with Extended CDB (Initiator Mode) */ | ||
279 | struct iscsi_ext_cdb_cmd_hdr { | 308 | struct iscsi_ext_cdb_cmd_hdr { |
280 | __le16 reserved1; | 309 | __le16 reserved1; |
281 | u8 flags_attr; | 310 | u8 flags_attr; |
282 | #define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 | 311 | #define ISCSI_EXT_CDB_CMD_HDR_ATTR_MASK 0x7 |
283 | #define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 | 312 | #define ISCSI_EXT_CDB_CMD_HDR_ATTR_SHIFT 0 |
284 | #define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 | 313 | #define ISCSI_EXT_CDB_CMD_HDR_RSRV_MASK 0x3 |
285 | #define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 | 314 | #define ISCSI_EXT_CDB_CMD_HDR_RSRV_SHIFT 3 |
286 | #define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 | 315 | #define ISCSI_EXT_CDB_CMD_HDR_WRITE_MASK 0x1 |
287 | #define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 | 316 | #define ISCSI_EXT_CDB_CMD_HDR_WRITE_SHIFT 5 |
288 | #define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 | 317 | #define ISCSI_EXT_CDB_CMD_HDR_READ_MASK 0x1 |
289 | #define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 | 318 | #define ISCSI_EXT_CDB_CMD_HDR_READ_SHIFT 6 |
290 | #define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 | 319 | #define ISCSI_EXT_CDB_CMD_HDR_FINAL_MASK 0x1 |
291 | #define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 | 320 | #define ISCSI_EXT_CDB_CMD_HDR_FINAL_SHIFT 7 |
292 | u8 opcode; | 321 | u8 opcode; |
293 | __le32 hdr_second_dword; | 322 | __le32 hdr_second_dword; |
294 | #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 323 | #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
295 | #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 | 324 | #define ISCSI_EXT_CDB_CMD_HDR_DATA_SEG_LEN_SHIFT 0 |
296 | #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF | 325 | #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_MASK 0xFF |
297 | #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 | 326 | #define ISCSI_EXT_CDB_CMD_HDR_CDB_SIZE_SHIFT 24 |
298 | struct regpair lun; | 327 | struct regpair lun; |
299 | __le32 itt; | 328 | __le32 itt; |
300 | __le32 expected_transfer_length; | 329 | __le32 expected_transfer_length; |
@@ -303,26 +332,27 @@ struct iscsi_ext_cdb_cmd_hdr { | |||
303 | struct scsi_sge cdb_sge; | 332 | struct scsi_sge cdb_sge; |
304 | }; | 333 | }; |
305 | 334 | ||
335 | /* iSCSI login request PDU header */ | ||
306 | struct iscsi_login_req_hdr { | 336 | struct iscsi_login_req_hdr { |
307 | u8 version_min; | 337 | u8 version_min; |
308 | u8 version_max; | 338 | u8 version_max; |
309 | u8 flags_attr; | 339 | u8 flags_attr; |
310 | #define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 | 340 | #define ISCSI_LOGIN_REQ_HDR_NSG_MASK 0x3 |
311 | #define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 | 341 | #define ISCSI_LOGIN_REQ_HDR_NSG_SHIFT 0 |
312 | #define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 | 342 | #define ISCSI_LOGIN_REQ_HDR_CSG_MASK 0x3 |
313 | #define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 | 343 | #define ISCSI_LOGIN_REQ_HDR_CSG_SHIFT 2 |
314 | #define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 | 344 | #define ISCSI_LOGIN_REQ_HDR_RSRV_MASK 0x3 |
315 | #define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 | 345 | #define ISCSI_LOGIN_REQ_HDR_RSRV_SHIFT 4 |
316 | #define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 | 346 | #define ISCSI_LOGIN_REQ_HDR_C_MASK 0x1 |
317 | #define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 | 347 | #define ISCSI_LOGIN_REQ_HDR_C_SHIFT 6 |
318 | #define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 | 348 | #define ISCSI_LOGIN_REQ_HDR_T_MASK 0x1 |
319 | #define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 | 349 | #define ISCSI_LOGIN_REQ_HDR_T_SHIFT 7 |
320 | u8 opcode; | 350 | u8 opcode; |
321 | __le32 hdr_second_dword; | 351 | __le32 hdr_second_dword; |
322 | #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 352 | #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
323 | #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 | 353 | #define ISCSI_LOGIN_REQ_HDR_DATA_SEG_LEN_SHIFT 0 |
324 | #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF | 354 | #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_MASK 0xFF |
325 | #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 | 355 | #define ISCSI_LOGIN_REQ_HDR_TOTAL_AHS_LEN_SHIFT 24 |
326 | __le32 isid_tabc; | 356 | __le32 isid_tabc; |
327 | __le16 tsih; | 357 | __le16 tsih; |
328 | __le16 isid_d; | 358 | __le16 isid_d; |
@@ -334,6 +364,7 @@ struct iscsi_login_req_hdr { | |||
334 | __le32 reserved2[4]; | 364 | __le32 reserved2[4]; |
335 | }; | 365 | }; |
336 | 366 | ||
367 | /* iSCSI logout request PDU header */ | ||
337 | struct iscsi_logout_req_hdr { | 368 | struct iscsi_logout_req_hdr { |
338 | __le16 reserved0; | 369 | __le16 reserved0; |
339 | u8 reason_code; | 370 | u8 reason_code; |
@@ -348,13 +379,14 @@ struct iscsi_logout_req_hdr { | |||
348 | __le32 reserved4[4]; | 379 | __le32 reserved4[4]; |
349 | }; | 380 | }; |
350 | 381 | ||
382 | /* iSCSI Data-out PDU header */ | ||
351 | struct iscsi_data_out_hdr { | 383 | struct iscsi_data_out_hdr { |
352 | __le16 reserved1; | 384 | __le16 reserved1; |
353 | u8 flags_attr; | 385 | u8 flags_attr; |
354 | #define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F | 386 | #define ISCSI_DATA_OUT_HDR_RSRV_MASK 0x7F |
355 | #define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 | 387 | #define ISCSI_DATA_OUT_HDR_RSRV_SHIFT 0 |
356 | #define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 | 388 | #define ISCSI_DATA_OUT_HDR_FINAL_MASK 0x1 |
357 | #define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 | 389 | #define ISCSI_DATA_OUT_HDR_FINAL_SHIFT 7 |
358 | u8 opcode; | 390 | u8 opcode; |
359 | __le32 reserved2; | 391 | __le32 reserved2; |
360 | struct regpair lun; | 392 | struct regpair lun; |
@@ -368,22 +400,23 @@ struct iscsi_data_out_hdr { | |||
368 | __le32 reserved5; | 400 | __le32 reserved5; |
369 | }; | 401 | }; |
370 | 402 | ||
403 | /* iSCSI Data-in PDU header */ | ||
371 | struct iscsi_data_in_hdr { | 404 | struct iscsi_data_in_hdr { |
372 | u8 status_rsvd; | 405 | u8 status_rsvd; |
373 | u8 reserved1; | 406 | u8 reserved1; |
374 | u8 flags; | 407 | u8 flags; |
375 | #define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 | 408 | #define ISCSI_DATA_IN_HDR_STATUS_MASK 0x1 |
376 | #define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 | 409 | #define ISCSI_DATA_IN_HDR_STATUS_SHIFT 0 |
377 | #define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 | 410 | #define ISCSI_DATA_IN_HDR_UNDERFLOW_MASK 0x1 |
378 | #define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 | 411 | #define ISCSI_DATA_IN_HDR_UNDERFLOW_SHIFT 1 |
379 | #define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 | 412 | #define ISCSI_DATA_IN_HDR_OVERFLOW_MASK 0x1 |
380 | #define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 | 413 | #define ISCSI_DATA_IN_HDR_OVERFLOW_SHIFT 2 |
381 | #define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 | 414 | #define ISCSI_DATA_IN_HDR_RSRV_MASK 0x7 |
382 | #define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 | 415 | #define ISCSI_DATA_IN_HDR_RSRV_SHIFT 3 |
383 | #define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 | 416 | #define ISCSI_DATA_IN_HDR_ACK_MASK 0x1 |
384 | #define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 | 417 | #define ISCSI_DATA_IN_HDR_ACK_SHIFT 6 |
385 | #define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 | 418 | #define ISCSI_DATA_IN_HDR_FINAL_MASK 0x1 |
386 | #define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 | 419 | #define ISCSI_DATA_IN_HDR_FINAL_SHIFT 7 |
387 | u8 opcode; | 420 | u8 opcode; |
388 | __le32 reserved2; | 421 | __le32 reserved2; |
389 | struct regpair lun; | 422 | struct regpair lun; |
@@ -397,6 +430,7 @@ struct iscsi_data_in_hdr { | |||
397 | __le32 residual_count; | 430 | __le32 residual_count; |
398 | }; | 431 | }; |
399 | 432 | ||
433 | /* iSCSI R2T PDU header */ | ||
400 | struct iscsi_r2t_hdr { | 434 | struct iscsi_r2t_hdr { |
401 | u8 reserved0[3]; | 435 | u8 reserved0[3]; |
402 | u8 opcode; | 436 | u8 opcode; |
@@ -412,13 +446,14 @@ struct iscsi_r2t_hdr { | |||
412 | __le32 desired_data_trns_len; | 446 | __le32 desired_data_trns_len; |
413 | }; | 447 | }; |
414 | 448 | ||
449 | /* iSCSI NOP-out PDU header */ | ||
415 | struct iscsi_nop_out_hdr { | 450 | struct iscsi_nop_out_hdr { |
416 | __le16 reserved1; | 451 | __le16 reserved1; |
417 | u8 flags_attr; | 452 | u8 flags_attr; |
418 | #define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F | 453 | #define ISCSI_NOP_OUT_HDR_RSRV_MASK 0x7F |
419 | #define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 | 454 | #define ISCSI_NOP_OUT_HDR_RSRV_SHIFT 0 |
420 | #define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 | 455 | #define ISCSI_NOP_OUT_HDR_CONST1_MASK 0x1 |
421 | #define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 | 456 | #define ISCSI_NOP_OUT_HDR_CONST1_SHIFT 7 |
422 | u8 opcode; | 457 | u8 opcode; |
423 | __le32 reserved2; | 458 | __le32 reserved2; |
424 | struct regpair lun; | 459 | struct regpair lun; |
@@ -432,19 +467,20 @@ struct iscsi_nop_out_hdr { | |||
432 | __le32 reserved6; | 467 | __le32 reserved6; |
433 | }; | 468 | }; |
434 | 469 | ||
470 | /* iSCSI NOP-in PDU header */ | ||
435 | struct iscsi_nop_in_hdr { | 471 | struct iscsi_nop_in_hdr { |
436 | __le16 reserved0; | 472 | __le16 reserved0; |
437 | u8 flags_attr; | 473 | u8 flags_attr; |
438 | #define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F | 474 | #define ISCSI_NOP_IN_HDR_RSRV_MASK 0x7F |
439 | #define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 | 475 | #define ISCSI_NOP_IN_HDR_RSRV_SHIFT 0 |
440 | #define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 | 476 | #define ISCSI_NOP_IN_HDR_CONST1_MASK 0x1 |
441 | #define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 | 477 | #define ISCSI_NOP_IN_HDR_CONST1_SHIFT 7 |
442 | u8 opcode; | 478 | u8 opcode; |
443 | __le32 hdr_second_dword; | 479 | __le32 hdr_second_dword; |
444 | #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 480 | #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
445 | #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 | 481 | #define ISCSI_NOP_IN_HDR_DATA_SEG_LEN_SHIFT 0 |
446 | #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF | 482 | #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_MASK 0xFF |
447 | #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 | 483 | #define ISCSI_NOP_IN_HDR_TOTAL_AHS_LEN_SHIFT 24 |
448 | struct regpair lun; | 484 | struct regpair lun; |
449 | __le32 itt; | 485 | __le32 itt; |
450 | __le32 ttt; | 486 | __le32 ttt; |
@@ -456,26 +492,27 @@ struct iscsi_nop_in_hdr { | |||
456 | __le32 reserved7; | 492 | __le32 reserved7; |
457 | }; | 493 | }; |
458 | 494 | ||
495 | /* iSCSI Login Response PDU header */ | ||
459 | struct iscsi_login_response_hdr { | 496 | struct iscsi_login_response_hdr { |
460 | u8 version_active; | 497 | u8 version_active; |
461 | u8 version_max; | 498 | u8 version_max; |
462 | u8 flags_attr; | 499 | u8 flags_attr; |
463 | #define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 | 500 | #define ISCSI_LOGIN_RESPONSE_HDR_NSG_MASK 0x3 |
464 | #define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 | 501 | #define ISCSI_LOGIN_RESPONSE_HDR_NSG_SHIFT 0 |
465 | #define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 | 502 | #define ISCSI_LOGIN_RESPONSE_HDR_CSG_MASK 0x3 |
466 | #define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 | 503 | #define ISCSI_LOGIN_RESPONSE_HDR_CSG_SHIFT 2 |
467 | #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 | 504 | #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_MASK 0x3 |
468 | #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 | 505 | #define ISCSI_LOGIN_RESPONSE_HDR_RSRV_SHIFT 4 |
469 | #define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 | 506 | #define ISCSI_LOGIN_RESPONSE_HDR_C_MASK 0x1 |
470 | #define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 | 507 | #define ISCSI_LOGIN_RESPONSE_HDR_C_SHIFT 6 |
471 | #define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 | 508 | #define ISCSI_LOGIN_RESPONSE_HDR_T_MASK 0x1 |
472 | #define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 | 509 | #define ISCSI_LOGIN_RESPONSE_HDR_T_SHIFT 7 |
473 | u8 opcode; | 510 | u8 opcode; |
474 | __le32 hdr_second_dword; | 511 | __le32 hdr_second_dword; |
475 | #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 512 | #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
476 | #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 | 513 | #define ISCSI_LOGIN_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 |
477 | #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF | 514 | #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF |
478 | #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 | 515 | #define ISCSI_LOGIN_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 |
479 | __le32 isid_tabc; | 516 | __le32 isid_tabc; |
480 | __le16 tsih; | 517 | __le16 tsih; |
481 | __le16 isid_d; | 518 | __le16 isid_d; |
@@ -490,16 +527,17 @@ struct iscsi_login_response_hdr { | |||
490 | __le32 reserved4[2]; | 527 | __le32 reserved4[2]; |
491 | }; | 528 | }; |
492 | 529 | ||
530 | /* iSCSI Logout Response PDU header */ | ||
493 | struct iscsi_logout_response_hdr { | 531 | struct iscsi_logout_response_hdr { |
494 | u8 reserved1; | 532 | u8 reserved1; |
495 | u8 response; | 533 | u8 response; |
496 | u8 flags; | 534 | u8 flags; |
497 | u8 opcode; | 535 | u8 opcode; |
498 | __le32 hdr_second_dword; | 536 | __le32 hdr_second_dword; |
499 | #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 537 | #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
500 | #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 | 538 | #define ISCSI_LOGOUT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 |
501 | #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF | 539 | #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF |
502 | #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 | 540 | #define ISCSI_LOGOUT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 |
503 | __le32 reserved2[2]; | 541 | __le32 reserved2[2]; |
504 | __le32 itt; | 542 | __le32 itt; |
505 | __le32 reserved3; | 543 | __le32 reserved3; |
@@ -512,21 +550,22 @@ struct iscsi_logout_response_hdr { | |||
512 | __le32 reserved5[1]; | 550 | __le32 reserved5[1]; |
513 | }; | 551 | }; |
514 | 552 | ||
553 | /* iSCSI Text Request PDU header */ | ||
515 | struct iscsi_text_request_hdr { | 554 | struct iscsi_text_request_hdr { |
516 | __le16 reserved0; | 555 | __le16 reserved0; |
517 | u8 flags_attr; | 556 | u8 flags_attr; |
518 | #define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F | 557 | #define ISCSI_TEXT_REQUEST_HDR_RSRV_MASK 0x3F |
519 | #define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 | 558 | #define ISCSI_TEXT_REQUEST_HDR_RSRV_SHIFT 0 |
520 | #define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 | 559 | #define ISCSI_TEXT_REQUEST_HDR_C_MASK 0x1 |
521 | #define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 | 560 | #define ISCSI_TEXT_REQUEST_HDR_C_SHIFT 6 |
522 | #define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 | 561 | #define ISCSI_TEXT_REQUEST_HDR_F_MASK 0x1 |
523 | #define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 | 562 | #define ISCSI_TEXT_REQUEST_HDR_F_SHIFT 7 |
524 | u8 opcode; | 563 | u8 opcode; |
525 | __le32 hdr_second_dword; | 564 | __le32 hdr_second_dword; |
526 | #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 565 | #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
527 | #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 | 566 | #define ISCSI_TEXT_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 |
528 | #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF | 567 | #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF |
529 | #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 | 568 | #define ISCSI_TEXT_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 |
530 | struct regpair lun; | 569 | struct regpair lun; |
531 | __le32 itt; | 570 | __le32 itt; |
532 | __le32 ttt; | 571 | __le32 ttt; |
@@ -535,21 +574,22 @@ struct iscsi_text_request_hdr { | |||
535 | __le32 reserved4[4]; | 574 | __le32 reserved4[4]; |
536 | }; | 575 | }; |
537 | 576 | ||
577 | /* iSCSI Text Response PDU header */ | ||
538 | struct iscsi_text_response_hdr { | 578 | struct iscsi_text_response_hdr { |
539 | __le16 reserved1; | 579 | __le16 reserved1; |
540 | u8 flags; | 580 | u8 flags; |
541 | #define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F | 581 | #define ISCSI_TEXT_RESPONSE_HDR_RSRV_MASK 0x3F |
542 | #define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 | 582 | #define ISCSI_TEXT_RESPONSE_HDR_RSRV_SHIFT 0 |
543 | #define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 | 583 | #define ISCSI_TEXT_RESPONSE_HDR_C_MASK 0x1 |
544 | #define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 | 584 | #define ISCSI_TEXT_RESPONSE_HDR_C_SHIFT 6 |
545 | #define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 | 585 | #define ISCSI_TEXT_RESPONSE_HDR_F_MASK 0x1 |
546 | #define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 | 586 | #define ISCSI_TEXT_RESPONSE_HDR_F_SHIFT 7 |
547 | u8 opcode; | 587 | u8 opcode; |
548 | __le32 hdr_second_dword; | 588 | __le32 hdr_second_dword; |
549 | #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 589 | #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
550 | #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 | 590 | #define ISCSI_TEXT_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 |
551 | #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF | 591 | #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF |
552 | #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 | 592 | #define ISCSI_TEXT_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 |
553 | struct regpair lun; | 593 | struct regpair lun; |
554 | __le32 itt; | 594 | __le32 itt; |
555 | __le32 ttt; | 595 | __le32 ttt; |
@@ -559,15 +599,16 @@ struct iscsi_text_response_hdr { | |||
559 | __le32 reserved4[3]; | 599 | __le32 reserved4[3]; |
560 | }; | 600 | }; |
561 | 601 | ||
602 | /* iSCSI TMF Request PDU header */ | ||
562 | struct iscsi_tmf_request_hdr { | 603 | struct iscsi_tmf_request_hdr { |
563 | __le16 reserved0; | 604 | __le16 reserved0; |
564 | u8 function; | 605 | u8 function; |
565 | u8 opcode; | 606 | u8 opcode; |
566 | __le32 hdr_second_dword; | 607 | __le32 hdr_second_dword; |
567 | #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 608 | #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
568 | #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 | 609 | #define ISCSI_TMF_REQUEST_HDR_DATA_SEG_LEN_SHIFT 0 |
569 | #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF | 610 | #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_MASK 0xFF |
570 | #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 | 611 | #define ISCSI_TMF_REQUEST_HDR_TOTAL_AHS_LEN_SHIFT 24 |
571 | struct regpair lun; | 612 | struct regpair lun; |
572 | __le32 itt; | 613 | __le32 itt; |
573 | __le32 rtt; | 614 | __le32 rtt; |
@@ -584,10 +625,10 @@ struct iscsi_tmf_response_hdr { | |||
584 | u8 hdr_flags; | 625 | u8 hdr_flags; |
585 | u8 opcode; | 626 | u8 opcode; |
586 | __le32 hdr_second_dword; | 627 | __le32 hdr_second_dword; |
587 | #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 628 | #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
588 | #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 | 629 | #define ISCSI_TMF_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 |
589 | #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF | 630 | #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF |
590 | #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 | 631 | #define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 |
591 | struct regpair reserved0; | 632 | struct regpair reserved0; |
592 | __le32 itt; | 633 | __le32 itt; |
593 | __le32 reserved1; | 634 | __le32 reserved1; |
@@ -597,16 +638,17 @@ struct iscsi_tmf_response_hdr { | |||
597 | __le32 reserved4[3]; | 638 | __le32 reserved4[3]; |
598 | }; | 639 | }; |
599 | 640 | ||
641 | /* iSCSI Response PDU header */ | ||
600 | struct iscsi_response_hdr { | 642 | struct iscsi_response_hdr { |
601 | u8 hdr_status; | 643 | u8 hdr_status; |
602 | u8 hdr_response; | 644 | u8 hdr_response; |
603 | u8 hdr_flags; | 645 | u8 hdr_flags; |
604 | u8 opcode; | 646 | u8 opcode; |
605 | __le32 hdr_second_dword; | 647 | __le32 hdr_second_dword; |
606 | #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 648 | #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
607 | #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 | 649 | #define ISCSI_RESPONSE_HDR_DATA_SEG_LEN_SHIFT 0 |
608 | #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF | 650 | #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_MASK 0xFF |
609 | #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 | 651 | #define ISCSI_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24 |
610 | struct regpair lun; | 652 | struct regpair lun; |
611 | __le32 itt; | 653 | __le32 itt; |
612 | __le32 snack_tag; | 654 | __le32 snack_tag; |
@@ -618,16 +660,17 @@ struct iscsi_response_hdr { | |||
618 | __le32 residual_count; | 660 | __le32 residual_count; |
619 | }; | 661 | }; |
620 | 662 | ||
663 | /* iSCSI Reject PDU header */ | ||
621 | struct iscsi_reject_hdr { | 664 | struct iscsi_reject_hdr { |
622 | u8 reserved4; | 665 | u8 reserved4; |
623 | u8 hdr_reason; | 666 | u8 hdr_reason; |
624 | u8 hdr_flags; | 667 | u8 hdr_flags; |
625 | u8 opcode; | 668 | u8 opcode; |
626 | __le32 hdr_second_dword; | 669 | __le32 hdr_second_dword; |
627 | #define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | 670 | #define ISCSI_REJECT_HDR_DATA_SEG_LEN_MASK 0xFFFFFF |
628 | #define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 | 671 | #define ISCSI_REJECT_HDR_DATA_SEG_LEN_SHIFT 0 |
629 | #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF | 672 | #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF |
630 | #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 | 673 | #define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24 |
631 | struct regpair reserved0; | 674 | struct regpair reserved0; |
632 | __le32 all_ones; | 675 | __le32 all_ones; |
633 | __le32 reserved2; | 676 | __le32 reserved2; |
@@ -638,6 +681,35 @@ struct iscsi_reject_hdr { | |||
638 | __le32 reserved3[2]; | 681 | __le32 reserved3[2]; |
639 | }; | 682 | }; |
640 | 683 | ||
684 | /* iSCSI Asynchronous Message PDU header */ | ||
685 | struct iscsi_async_msg_hdr { | ||
686 | __le16 reserved0; | ||
687 | u8 flags_attr; | ||
688 | #define ISCSI_ASYNC_MSG_HDR_RSRV_MASK 0x7F | ||
689 | #define ISCSI_ASYNC_MSG_HDR_RSRV_SHIFT 0 | ||
690 | #define ISCSI_ASYNC_MSG_HDR_CONST1_MASK 0x1 | ||
691 | #define ISCSI_ASYNC_MSG_HDR_CONST1_SHIFT 7 | ||
692 | u8 opcode; | ||
693 | __le32 hdr_second_dword; | ||
694 | #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_MASK 0xFFFFFF | ||
695 | #define ISCSI_ASYNC_MSG_HDR_DATA_SEG_LEN_SHIFT 0 | ||
696 | #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_MASK 0xFF | ||
697 | #define ISCSI_ASYNC_MSG_HDR_TOTAL_AHS_LEN_SHIFT 24 | ||
698 | struct regpair lun; | ||
699 | __le32 all_ones; | ||
700 | __le32 reserved1; | ||
701 | __le32 stat_sn; | ||
702 | __le32 exp_cmd_sn; | ||
703 | __le32 max_cmd_sn; | ||
704 | __le16 param1_rsrv; | ||
705 | u8 async_vcode; | ||
706 | u8 async_event; | ||
707 | __le16 param3_rsrv; | ||
708 | __le16 param2_rsrv; | ||
709 | __le32 reserved7; | ||
710 | }; | ||
711 | |||
712 | /* PDU header part of Ystorm task context */ | ||
641 | union iscsi_task_hdr { | 713 | union iscsi_task_hdr { |
642 | struct iscsi_common_hdr common; | 714 | struct iscsi_common_hdr common; |
643 | struct data_hdr data; | 715 | struct data_hdr data; |
@@ -661,6 +733,348 @@ union iscsi_task_hdr { | |||
661 | struct iscsi_async_msg_hdr async_msg; | 733 | struct iscsi_async_msg_hdr async_msg; |
662 | }; | 734 | }; |
663 | 735 | ||
736 | /* The iscsi storm task context of Ystorm */ | ||
737 | struct ystorm_iscsi_task_st_ctx { | ||
738 | struct ystorm_iscsi_task_state state; | ||
739 | struct ystorm_iscsi_task_rxmit_opt rxmit_opt; | ||
740 | union iscsi_task_hdr pdu_hdr; | ||
741 | }; | ||
742 | |||
743 | struct e4_ystorm_iscsi_task_ag_ctx { | ||
744 | u8 reserved; | ||
745 | u8 byte1; | ||
746 | __le16 word0; | ||
747 | u8 flags0; | ||
748 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF | ||
749 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 | ||
750 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 | ||
751 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 | ||
752 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
753 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
754 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 | ||
755 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 | ||
756 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 | ||
757 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 | ||
758 | u8 flags1; | ||
759 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 | ||
760 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 | ||
761 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 | ||
762 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 | ||
763 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 | ||
764 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 | ||
765 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 | ||
766 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 | ||
767 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 | ||
768 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 | ||
769 | u8 flags2; | ||
770 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 | ||
771 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 | ||
772 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 | ||
773 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 | ||
774 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
775 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 | ||
776 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 | ||
777 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 | ||
778 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
779 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 | ||
780 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
781 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 | ||
782 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
783 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 | ||
784 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
785 | #define E4_YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 | ||
786 | u8 byte2; | ||
787 | __le32 TTT; | ||
788 | u8 byte3; | ||
789 | u8 byte4; | ||
790 | __le16 word1; | ||
791 | }; | ||
792 | |||
793 | struct e4_mstorm_iscsi_task_ag_ctx { | ||
794 | u8 cdu_validation; | ||
795 | u8 byte1; | ||
796 | __le16 task_cid; | ||
797 | u8 flags0; | ||
798 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | ||
799 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | ||
800 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | ||
801 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | ||
802 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
803 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
804 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 | ||
805 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 | ||
806 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 | ||
807 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 | ||
808 | u8 flags1; | ||
809 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 | ||
810 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 | ||
811 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 | ||
812 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 | ||
813 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 | ||
814 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 | ||
815 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 | ||
816 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 | ||
817 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 | ||
818 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 | ||
819 | u8 flags2; | ||
820 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 | ||
821 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 | ||
822 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 | ||
823 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 | ||
824 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
825 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 | ||
826 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 | ||
827 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 | ||
828 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
829 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 | ||
830 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
831 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 | ||
832 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
833 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 | ||
834 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
835 | #define E4_MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 | ||
836 | u8 byte2; | ||
837 | __le32 reg0; | ||
838 | u8 byte3; | ||
839 | u8 byte4; | ||
840 | __le16 word1; | ||
841 | }; | ||
842 | |||
843 | struct e4_ustorm_iscsi_task_ag_ctx { | ||
844 | u8 reserved; | ||
845 | u8 state; | ||
846 | __le16 icid; | ||
847 | u8 flags0; | ||
848 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | ||
849 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | ||
850 | #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | ||
851 | #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | ||
852 | #define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
853 | #define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
854 | #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 | ||
855 | #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 | ||
856 | u8 flags1; | ||
857 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 | ||
858 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 | ||
859 | #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 | ||
860 | #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 | ||
861 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 | ||
862 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 | ||
863 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 | ||
864 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 | ||
865 | u8 flags2; | ||
866 | #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 | ||
867 | #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 | ||
868 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 | ||
869 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 | ||
870 | #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 | ||
871 | #define E4_USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 | ||
872 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 | ||
873 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 | ||
874 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 | ||
875 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 | ||
876 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 | ||
877 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 | ||
878 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
879 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 | ||
880 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 | ||
881 | #define E4_USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 | ||
882 | u8 flags3; | ||
883 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
884 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 | ||
885 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
886 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 | ||
887 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
888 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 | ||
889 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
890 | #define E4_USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 | ||
891 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF | ||
892 | #define E4_USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 | ||
893 | __le32 dif_err_intervals; | ||
894 | __le32 dif_error_1st_interval; | ||
895 | __le32 rcv_cont_len; | ||
896 | __le32 exp_cont_len; | ||
897 | __le32 total_data_acked; | ||
898 | __le32 exp_data_acked; | ||
899 | u8 next_tid_valid; | ||
900 | u8 byte3; | ||
901 | __le16 word1; | ||
902 | __le16 next_tid; | ||
903 | __le16 word3; | ||
904 | __le32 hdr_residual_count; | ||
905 | __le32 exp_r2t_sn; | ||
906 | }; | ||
907 | |||
908 | /* The iscsi storm task context of Mstorm */ | ||
909 | struct mstorm_iscsi_task_st_ctx { | ||
910 | struct scsi_cached_sges data_desc; | ||
911 | struct scsi_sgl_params sgl_params; | ||
912 | __le32 rem_task_size; | ||
913 | __le32 data_buffer_offset; | ||
914 | u8 task_type; | ||
915 | struct iscsi_dif_flags dif_flags; | ||
916 | __le16 dif_task_icid; | ||
917 | struct regpair sense_db; | ||
918 | __le32 expected_itt; | ||
919 | __le32 reserved1; | ||
920 | }; | ||
921 | |||
922 | struct iscsi_reg1 { | ||
923 | __le32 reg1_map; | ||
924 | #define ISCSI_REG1_NUM_SGES_MASK 0xF | ||
925 | #define ISCSI_REG1_NUM_SGES_SHIFT 0 | ||
926 | #define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF | ||
927 | #define ISCSI_REG1_RESERVED1_SHIFT 4 | ||
928 | }; | ||
929 | |||
930 | struct tqe_opaque { | ||
931 | __le16 opaque[2]; | ||
932 | }; | ||
933 | |||
934 | /* The iscsi storm task context of Ustorm */ | ||
935 | struct ustorm_iscsi_task_st_ctx { | ||
936 | __le32 rem_rcv_len; | ||
937 | __le32 exp_data_transfer_len; | ||
938 | __le32 exp_data_sn; | ||
939 | struct regpair lun; | ||
940 | struct iscsi_reg1 reg1; | ||
941 | u8 flags2; | ||
942 | #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 | ||
943 | #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 | ||
944 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F | ||
945 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 | ||
946 | struct iscsi_dif_flags dif_flags; | ||
947 | __le16 reserved3; | ||
948 | struct tqe_opaque tqe_opaque_list; | ||
949 | __le32 reserved5; | ||
950 | __le32 reserved6; | ||
951 | __le32 reserved7; | ||
952 | u8 task_type; | ||
953 | u8 error_flags; | ||
954 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 | ||
955 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 | ||
956 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 | ||
957 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 | ||
958 | #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 | ||
959 | #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 | ||
960 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F | ||
961 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 | ||
962 | u8 flags; | ||
963 | #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 | ||
964 | #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 | ||
965 | #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 | ||
966 | #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 | ||
967 | #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 | ||
968 | #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 | ||
969 | #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 | ||
970 | #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 | ||
971 | #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 | ||
972 | #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 | ||
973 | #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 | ||
974 | #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 | ||
975 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 | ||
976 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 | ||
977 | u8 cq_rss_number; | ||
978 | }; | ||
979 | |||
980 | /* iscsi task context */ | ||
981 | struct e4_iscsi_task_context { | ||
982 | struct ystorm_iscsi_task_st_ctx ystorm_st_context; | ||
983 | struct e4_ystorm_iscsi_task_ag_ctx ystorm_ag_context; | ||
984 | struct regpair ystorm_ag_padding[2]; | ||
985 | struct tdif_task_context tdif_context; | ||
986 | struct e4_mstorm_iscsi_task_ag_ctx mstorm_ag_context; | ||
987 | struct regpair mstorm_ag_padding[2]; | ||
988 | struct e4_ustorm_iscsi_task_ag_ctx ustorm_ag_context; | ||
989 | struct mstorm_iscsi_task_st_ctx mstorm_st_context; | ||
990 | struct ustorm_iscsi_task_st_ctx ustorm_st_context; | ||
991 | struct rdif_task_context rdif_context; | ||
992 | }; | ||
993 | |||
994 | /* iSCSI connection offload params passed by driver to FW in ISCSI offload | ||
995 | * ramrod. | ||
996 | */ | ||
997 | struct iscsi_conn_offload_params { | ||
998 | struct regpair sq_pbl_addr; | ||
999 | struct regpair r2tq_pbl_addr; | ||
1000 | struct regpair xhq_pbl_addr; | ||
1001 | struct regpair uhq_pbl_addr; | ||
1002 | __le32 initial_ack; | ||
1003 | __le16 physical_q0; | ||
1004 | __le16 physical_q1; | ||
1005 | u8 flags; | ||
1006 | #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_MASK 0x1 | ||
1007 | #define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0 | ||
1008 | #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1 | ||
1009 | #define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1 | ||
1010 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1 | ||
1011 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2 | ||
1012 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F | ||
1013 | #define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3 | ||
1014 | u8 pbl_page_size_log; | ||
1015 | u8 pbe_page_size_log; | ||
1016 | u8 default_cq; | ||
1017 | __le32 stat_sn; | ||
1018 | }; | ||
1019 | |||
1020 | /* iSCSI connection statistics */ | ||
1021 | struct iscsi_conn_stats_params { | ||
1022 | struct regpair iscsi_tcp_tx_packets_cnt; | ||
1023 | struct regpair iscsi_tcp_tx_bytes_cnt; | ||
1024 | struct regpair iscsi_tcp_tx_rxmit_cnt; | ||
1025 | struct regpair iscsi_tcp_rx_packets_cnt; | ||
1026 | struct regpair iscsi_tcp_rx_bytes_cnt; | ||
1027 | struct regpair iscsi_tcp_rx_dup_ack_cnt; | ||
1028 | __le32 iscsi_tcp_rx_chksum_err_cnt; | ||
1029 | __le32 reserved; | ||
1030 | }; | ||
1031 | |||
1032 | /* spe message header */ | ||
1033 | struct iscsi_slow_path_hdr { | ||
1034 | u8 op_code; | ||
1035 | u8 flags; | ||
1036 | #define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF | ||
1037 | #define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0 | ||
1038 | #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7 | ||
1039 | #define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4 | ||
1040 | #define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1 | ||
1041 | #define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7 | ||
1042 | }; | ||
1043 | |||
1044 | /* iSCSI connection update params passed by driver to FW in ISCSI update | ||
1045 | *ramrod. | ||
1046 | */ | ||
1047 | struct iscsi_conn_update_ramrod_params { | ||
1048 | struct iscsi_slow_path_hdr hdr; | ||
1049 | __le16 conn_id; | ||
1050 | __le32 fw_cid; | ||
1051 | u8 flags; | ||
1052 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1 | ||
1053 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0 | ||
1054 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_MASK 0x1 | ||
1055 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DD_EN_SHIFT 1 | ||
1056 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_MASK 0x1 | ||
1057 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2 | ||
1058 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1 | ||
1059 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3 | ||
1060 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1 | ||
1061 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4 | ||
1062 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1 | ||
1063 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5 | ||
1064 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_MASK 0x1 | ||
1065 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6 | ||
1066 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1 | ||
1067 | #define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7 | ||
1068 | u8 reserved0[3]; | ||
1069 | __le32 max_seq_size; | ||
1070 | __le32 max_send_pdu_length; | ||
1071 | __le32 max_recv_pdu_length; | ||
1072 | __le32 first_seq_length; | ||
1073 | __le32 exp_stat_sn; | ||
1074 | union dif_configuration_params dif_on_imme_params; | ||
1075 | }; | ||
1076 | |||
1077 | /* iSCSI CQ element */ | ||
664 | struct iscsi_cqe_common { | 1078 | struct iscsi_cqe_common { |
665 | __le16 conn_id; | 1079 | __le16 conn_id; |
666 | u8 cqe_type; | 1080 | u8 cqe_type; |
@@ -669,6 +1083,7 @@ struct iscsi_cqe_common { | |||
669 | union iscsi_task_hdr iscsi_hdr; | 1083 | union iscsi_task_hdr iscsi_hdr; |
670 | }; | 1084 | }; |
671 | 1085 | ||
1086 | /* iSCSI CQ element */ | ||
672 | struct iscsi_cqe_solicited { | 1087 | struct iscsi_cqe_solicited { |
673 | __le16 conn_id; | 1088 | __le16 conn_id; |
674 | u8 cqe_type; | 1089 | u8 cqe_type; |
@@ -678,10 +1093,11 @@ struct iscsi_cqe_solicited { | |||
678 | u8 fw_dbg_field; | 1093 | u8 fw_dbg_field; |
679 | u8 caused_conn_err; | 1094 | u8 caused_conn_err; |
680 | u8 reserved0[3]; | 1095 | u8 reserved0[3]; |
681 | __le32 reserved1[1]; | 1096 | __le32 data_truncated_bytes; |
682 | union iscsi_task_hdr iscsi_hdr; | 1097 | union iscsi_task_hdr iscsi_hdr; |
683 | }; | 1098 | }; |
684 | 1099 | ||
1100 | /* iSCSI CQ element */ | ||
685 | struct iscsi_cqe_unsolicited { | 1101 | struct iscsi_cqe_unsolicited { |
686 | __le16 conn_id; | 1102 | __le16 conn_id; |
687 | u8 cqe_type; | 1103 | u8 cqe_type; |
@@ -689,16 +1105,19 @@ struct iscsi_cqe_unsolicited { | |||
689 | __le16 reserved0; | 1105 | __le16 reserved0; |
690 | u8 reserved1; | 1106 | u8 reserved1; |
691 | u8 unsol_cqe_type; | 1107 | u8 unsol_cqe_type; |
692 | struct regpair rqe_opaque; | 1108 | __le16 rqe_opaque; |
1109 | __le16 reserved2[3]; | ||
693 | union iscsi_task_hdr iscsi_hdr; | 1110 | union iscsi_task_hdr iscsi_hdr; |
694 | }; | 1111 | }; |
695 | 1112 | ||
1113 | /* iSCSI CQ element */ | ||
696 | union iscsi_cqe { | 1114 | union iscsi_cqe { |
697 | struct iscsi_cqe_common cqe_common; | 1115 | struct iscsi_cqe_common cqe_common; |
698 | struct iscsi_cqe_solicited cqe_solicited; | 1116 | struct iscsi_cqe_solicited cqe_solicited; |
699 | struct iscsi_cqe_unsolicited cqe_unsolicited; | 1117 | struct iscsi_cqe_unsolicited cqe_unsolicited; |
700 | }; | 1118 | }; |
701 | 1119 | ||
1120 | /* iSCSI CQE type */ | ||
702 | enum iscsi_cqes_type { | 1121 | enum iscsi_cqes_type { |
703 | ISCSI_CQE_TYPE_SOLICITED = 1, | 1122 | ISCSI_CQE_TYPE_SOLICITED = 1, |
704 | ISCSI_CQE_TYPE_UNSOLICITED, | 1123 | ISCSI_CQE_TYPE_UNSOLICITED, |
@@ -708,6 +1127,7 @@ enum iscsi_cqes_type { | |||
708 | MAX_ISCSI_CQES_TYPE | 1127 | MAX_ISCSI_CQES_TYPE |
709 | }; | 1128 | }; |
710 | 1129 | ||
1130 | /* iSCSI CQE type */ | ||
711 | enum iscsi_cqe_unsolicited_type { | 1131 | enum iscsi_cqe_unsolicited_type { |
712 | ISCSI_CQE_UNSOLICITED_NONE, | 1132 | ISCSI_CQE_UNSOLICITED_NONE, |
713 | ISCSI_CQE_UNSOLICITED_SINGLE, | 1133 | ISCSI_CQE_UNSOLICITED_SINGLE, |
@@ -717,37 +1137,28 @@ enum iscsi_cqe_unsolicited_type { | |||
717 | MAX_ISCSI_CQE_UNSOLICITED_TYPE | 1137 | MAX_ISCSI_CQE_UNSOLICITED_TYPE |
718 | }; | 1138 | }; |
719 | 1139 | ||
720 | 1140 | /* iscsi debug modes */ | |
721 | struct iscsi_debug_modes { | 1141 | struct iscsi_debug_modes { |
722 | u8 flags; | 1142 | u8 flags; |
723 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 | 1143 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_MASK 0x1 |
724 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 | 1144 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RX_CONN_ERROR_SHIFT 0 |
725 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 | 1145 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_MASK 0x1 |
726 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 | 1146 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_RESET_SHIFT 1 |
727 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 | 1147 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_MASK 0x1 |
728 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 | 1148 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_FIN_SHIFT 2 |
729 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 | 1149 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_MASK 0x1 |
730 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 | 1150 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_CLEANUP_SHIFT 3 |
731 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 | 1151 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_MASK 0x1 |
732 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 | 1152 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4 |
733 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 | 1153 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1 |
734 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 | 1154 | #define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5 |
735 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1 | 1155 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_MASK 0x1 |
736 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6 | 1156 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_OR_DATA_DIGEST_ERROR_SHIFT 6 |
737 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1 | 1157 | #define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_MASK 0x1 |
738 | #define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7 | 1158 | #define ISCSI_DEBUG_MODES_ASSERT_IF_HQ_CORRUPT_SHIFT 7 |
739 | }; | 1159 | }; |
740 | 1160 | ||
741 | struct iscsi_dif_flags { | 1161 | /* iSCSI kernel completion queue IDs */ |
742 | u8 flags; | ||
743 | #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_MASK 0xF | ||
744 | #define ISCSI_DIF_FLAGS_PROT_INTERVAL_SIZE_LOG_SHIFT 0 | ||
745 | #define ISCSI_DIF_FLAGS_DIF_TO_PEER_MASK 0x1 | ||
746 | #define ISCSI_DIF_FLAGS_DIF_TO_PEER_SHIFT 4 | ||
747 | #define ISCSI_DIF_FLAGS_HOST_INTERFACE_MASK 0x7 | ||
748 | #define ISCSI_DIF_FLAGS_HOST_INTERFACE_SHIFT 5 | ||
749 | }; | ||
750 | |||
751 | enum iscsi_eqe_opcode { | 1162 | enum iscsi_eqe_opcode { |
752 | ISCSI_EVENT_TYPE_INIT_FUNC = 0, | 1163 | ISCSI_EVENT_TYPE_INIT_FUNC = 0, |
753 | ISCSI_EVENT_TYPE_DESTROY_FUNC, | 1164 | ISCSI_EVENT_TYPE_DESTROY_FUNC, |
@@ -756,9 +1167,9 @@ enum iscsi_eqe_opcode { | |||
756 | ISCSI_EVENT_TYPE_CLEAR_SQ, | 1167 | ISCSI_EVENT_TYPE_CLEAR_SQ, |
757 | ISCSI_EVENT_TYPE_TERMINATE_CONN, | 1168 | ISCSI_EVENT_TYPE_TERMINATE_CONN, |
758 | ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, | 1169 | ISCSI_EVENT_TYPE_MAC_UPDATE_CONN, |
1170 | ISCSI_EVENT_TYPE_COLLECT_STATS_CONN, | ||
759 | ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, | 1171 | ISCSI_EVENT_TYPE_ASYN_CONNECT_COMPLETE, |
760 | ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, | 1172 | ISCSI_EVENT_TYPE_ASYN_TERMINATE_DONE, |
761 | RESERVED9, | ||
762 | ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, | 1173 | ISCSI_EVENT_TYPE_START_OF_ERROR_TYPES = 10, |
763 | ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, | 1174 | ISCSI_EVENT_TYPE_ASYN_ABORT_RCVD, |
764 | ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, | 1175 | ISCSI_EVENT_TYPE_ASYN_CLOSE_RCVD, |
@@ -772,6 +1183,7 @@ enum iscsi_eqe_opcode { | |||
772 | MAX_ISCSI_EQE_OPCODE | 1183 | MAX_ISCSI_EQE_OPCODE |
773 | }; | 1184 | }; |
774 | 1185 | ||
1186 | /* iSCSI EQE and CQE completion status */ | ||
775 | enum iscsi_error_types { | 1187 | enum iscsi_error_types { |
776 | ISCSI_STATUS_NONE = 0, | 1188 | ISCSI_STATUS_NONE = 0, |
777 | ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, | 1189 | ISCSI_CQE_ERROR_UNSOLICITED_RCV_ON_INVALID_CONN = 1, |
@@ -823,7 +1235,7 @@ enum iscsi_error_types { | |||
823 | MAX_ISCSI_ERROR_TYPES | 1235 | MAX_ISCSI_ERROR_TYPES |
824 | }; | 1236 | }; |
825 | 1237 | ||
826 | 1238 | /* iSCSI Ramrod Command IDs */ | |
827 | enum iscsi_ramrod_cmd_id { | 1239 | enum iscsi_ramrod_cmd_id { |
828 | ISCSI_RAMROD_CMD_ID_UNUSED = 0, | 1240 | ISCSI_RAMROD_CMD_ID_UNUSED = 0, |
829 | ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, | 1241 | ISCSI_RAMROD_CMD_ID_INIT_FUNC = 1, |
@@ -833,22 +1245,11 @@ enum iscsi_ramrod_cmd_id { | |||
833 | ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, | 1245 | ISCSI_RAMROD_CMD_ID_TERMINATION_CONN = 5, |
834 | ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, | 1246 | ISCSI_RAMROD_CMD_ID_CLEAR_SQ = 6, |
835 | ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, | 1247 | ISCSI_RAMROD_CMD_ID_MAC_UPDATE = 7, |
1248 | ISCSI_RAMROD_CMD_ID_CONN_STATS = 8, | ||
836 | MAX_ISCSI_RAMROD_CMD_ID | 1249 | MAX_ISCSI_RAMROD_CMD_ID |
837 | }; | 1250 | }; |
838 | 1251 | ||
839 | struct iscsi_reg1 { | 1252 | /* iSCSI connection termination request */ |
840 | __le32 reg1_map; | ||
841 | #define ISCSI_REG1_NUM_SGES_MASK 0xF | ||
842 | #define ISCSI_REG1_NUM_SGES_SHIFT 0 | ||
843 | #define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF | ||
844 | #define ISCSI_REG1_RESERVED1_SHIFT 4 | ||
845 | }; | ||
846 | |||
847 | union iscsi_seq_num { | ||
848 | __le16 data_sn; | ||
849 | __le16 r2t_sn; | ||
850 | }; | ||
851 | |||
852 | struct iscsi_spe_conn_mac_update { | 1253 | struct iscsi_spe_conn_mac_update { |
853 | struct iscsi_slow_path_hdr hdr; | 1254 | struct iscsi_slow_path_hdr hdr; |
854 | __le16 conn_id; | 1255 | __le16 conn_id; |
@@ -859,6 +1260,9 @@ struct iscsi_spe_conn_mac_update { | |||
859 | u8 reserved0[2]; | 1260 | u8 reserved0[2]; |
860 | }; | 1261 | }; |
861 | 1262 | ||
1263 | /* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in | ||
1264 | * iSCSI offload ramrod. | ||
1265 | */ | ||
862 | struct iscsi_spe_conn_offload { | 1266 | struct iscsi_spe_conn_offload { |
863 | struct iscsi_slow_path_hdr hdr; | 1267 | struct iscsi_slow_path_hdr hdr; |
864 | __le16 conn_id; | 1268 | __le16 conn_id; |
@@ -867,6 +1271,9 @@ struct iscsi_spe_conn_offload { | |||
867 | struct tcp_offload_params tcp; | 1271 | struct tcp_offload_params tcp; |
868 | }; | 1272 | }; |
869 | 1273 | ||
1274 | /* iSCSI and TCP connection(Option 2) offload params passed by driver to FW in | ||
1275 | * iSCSI offload ramrod. | ||
1276 | */ | ||
870 | struct iscsi_spe_conn_offload_option2 { | 1277 | struct iscsi_spe_conn_offload_option2 { |
871 | struct iscsi_slow_path_hdr hdr; | 1278 | struct iscsi_slow_path_hdr hdr; |
872 | __le16 conn_id; | 1279 | __le16 conn_id; |
@@ -875,6 +1282,17 @@ struct iscsi_spe_conn_offload_option2 { | |||
875 | struct tcp_offload_params_opt2 tcp; | 1282 | struct tcp_offload_params_opt2 tcp; |
876 | }; | 1283 | }; |
877 | 1284 | ||
1285 | /* iSCSI collect connection statistics request */ | ||
1286 | struct iscsi_spe_conn_statistics { | ||
1287 | struct iscsi_slow_path_hdr hdr; | ||
1288 | __le16 conn_id; | ||
1289 | __le32 fw_cid; | ||
1290 | u8 reset_stats; | ||
1291 | u8 reserved0[7]; | ||
1292 | struct regpair stats_cnts_addr; | ||
1293 | }; | ||
1294 | |||
1295 | /* iSCSI connection termination request */ | ||
878 | struct iscsi_spe_conn_termination { | 1296 | struct iscsi_spe_conn_termination { |
879 | struct iscsi_slow_path_hdr hdr; | 1297 | struct iscsi_slow_path_hdr hdr; |
880 | __le16 conn_id; | 1298 | __le16 conn_id; |
@@ -885,12 +1303,14 @@ struct iscsi_spe_conn_termination { | |||
885 | struct regpair query_params_addr; | 1303 | struct regpair query_params_addr; |
886 | }; | 1304 | }; |
887 | 1305 | ||
1306 | /* iSCSI firmware function destroy parameters */ | ||
888 | struct iscsi_spe_func_dstry { | 1307 | struct iscsi_spe_func_dstry { |
889 | struct iscsi_slow_path_hdr hdr; | 1308 | struct iscsi_slow_path_hdr hdr; |
890 | __le16 reserved0; | 1309 | __le16 reserved0; |
891 | __le32 reserved1; | 1310 | __le32 reserved1; |
892 | }; | 1311 | }; |
893 | 1312 | ||
1313 | /* iSCSI firmware function init parameters */ | ||
894 | struct iscsi_spe_func_init { | 1314 | struct iscsi_spe_func_init { |
895 | struct iscsi_slow_path_hdr hdr; | 1315 | struct iscsi_slow_path_hdr hdr; |
896 | __le16 half_way_close_timeout; | 1316 | __le16 half_way_close_timeout; |
@@ -898,283 +1318,19 @@ struct iscsi_spe_func_init { | |||
898 | u8 num_r2tq_pages_in_ring; | 1318 | u8 num_r2tq_pages_in_ring; |
899 | u8 num_uhq_pages_in_ring; | 1319 | u8 num_uhq_pages_in_ring; |
900 | u8 ll2_rx_queue_id; | 1320 | u8 ll2_rx_queue_id; |
901 | u8 ooo_enable; | 1321 | u8 flags; |
1322 | #define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_MASK 0x1 | ||
1323 | #define ISCSI_SPE_FUNC_INIT_COUNTERS_EN_SHIFT 0 | ||
1324 | #define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F | ||
1325 | #define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1 | ||
902 | struct iscsi_debug_modes debug_mode; | 1326 | struct iscsi_debug_modes debug_mode; |
903 | __le16 reserved1; | 1327 | __le16 reserved1; |
904 | __le32 reserved2; | 1328 | __le32 reserved2; |
905 | __le32 reserved3; | ||
906 | __le32 reserved4; | ||
907 | struct scsi_init_func_params func_params; | 1329 | struct scsi_init_func_params func_params; |
908 | struct scsi_init_func_queues q_params; | 1330 | struct scsi_init_func_queues q_params; |
909 | }; | 1331 | }; |
910 | 1332 | ||
911 | struct ystorm_iscsi_task_state { | 1333 | /* iSCSI task type */ |
912 | struct scsi_cached_sges data_desc; | ||
913 | struct scsi_sgl_params sgl_params; | ||
914 | __le32 exp_r2t_sn; | ||
915 | __le32 buffer_offset; | ||
916 | union iscsi_seq_num seq_num; | ||
917 | struct iscsi_dif_flags dif_flags; | ||
918 | u8 flags; | ||
919 | #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1 | ||
920 | #define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0 | ||
921 | #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1 | ||
922 | #define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1 | ||
923 | #define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F | ||
924 | #define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2 | ||
925 | }; | ||
926 | |||
927 | struct ystorm_iscsi_task_rxmit_opt { | ||
928 | __le32 fast_rxmit_sge_offset; | ||
929 | __le32 scan_start_buffer_offset; | ||
930 | __le32 fast_rxmit_buffer_offset; | ||
931 | u8 scan_start_sgl_index; | ||
932 | u8 fast_rxmit_sgl_index; | ||
933 | __le16 reserved; | ||
934 | }; | ||
935 | |||
936 | struct ystorm_iscsi_task_st_ctx { | ||
937 | struct ystorm_iscsi_task_state state; | ||
938 | struct ystorm_iscsi_task_rxmit_opt rxmit_opt; | ||
939 | union iscsi_task_hdr pdu_hdr; | ||
940 | }; | ||
941 | |||
942 | struct ystorm_iscsi_task_ag_ctx { | ||
943 | u8 reserved; | ||
944 | u8 byte1; | ||
945 | __le16 word0; | ||
946 | u8 flags0; | ||
947 | #define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF | ||
948 | #define YSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 | ||
949 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 | ||
950 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 | ||
951 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
952 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
953 | #define YSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 | ||
954 | #define YSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 | ||
955 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 | ||
956 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 | ||
957 | u8 flags1; | ||
958 | #define YSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 | ||
959 | #define YSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 0 | ||
960 | #define YSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 | ||
961 | #define YSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 | ||
962 | #define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_MASK 0x3 | ||
963 | #define YSTORM_ISCSI_TASK_AG_CTX_CF2SPECIAL_SHIFT 4 | ||
964 | #define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 | ||
965 | #define YSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 6 | ||
966 | #define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 | ||
967 | #define YSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 | ||
968 | u8 flags2; | ||
969 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 | ||
970 | #define YSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 | ||
971 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 | ||
972 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 | ||
973 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
974 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 | ||
975 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 | ||
976 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 | ||
977 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
978 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 | ||
979 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
980 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 | ||
981 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
982 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 | ||
983 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
984 | #define YSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 | ||
985 | u8 byte2; | ||
986 | __le32 TTT; | ||
987 | u8 byte3; | ||
988 | u8 byte4; | ||
989 | __le16 word1; | ||
990 | }; | ||
991 | |||
992 | struct mstorm_iscsi_task_ag_ctx { | ||
993 | u8 cdu_validation; | ||
994 | u8 byte1; | ||
995 | __le16 task_cid; | ||
996 | u8 flags0; | ||
997 | #define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | ||
998 | #define MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | ||
999 | #define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | ||
1000 | #define MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | ||
1001 | #define MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
1002 | #define MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
1003 | #define MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 | ||
1004 | #define MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 | ||
1005 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 | ||
1006 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_SHIFT 7 | ||
1007 | u8 flags1; | ||
1008 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_MASK 0x3 | ||
1009 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_SHIFT 0 | ||
1010 | #define MSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 | ||
1011 | #define MSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 2 | ||
1012 | #define MSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 | ||
1013 | #define MSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 4 | ||
1014 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_MASK 0x1 | ||
1015 | #define MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_CF_EN_SHIFT 6 | ||
1016 | #define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 | ||
1017 | #define MSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 7 | ||
1018 | u8 flags2; | ||
1019 | #define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 | ||
1020 | #define MSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 0 | ||
1021 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 | ||
1022 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 1 | ||
1023 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
1024 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 2 | ||
1025 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 | ||
1026 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 3 | ||
1027 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
1028 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 4 | ||
1029 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
1030 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 5 | ||
1031 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
1032 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 6 | ||
1033 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
1034 | #define MSTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 7 | ||
1035 | u8 byte2; | ||
1036 | __le32 reg0; | ||
1037 | u8 byte3; | ||
1038 | u8 byte4; | ||
1039 | __le16 word1; | ||
1040 | }; | ||
1041 | |||
1042 | struct ustorm_iscsi_task_ag_ctx { | ||
1043 | u8 reserved; | ||
1044 | u8 state; | ||
1045 | __le16 icid; | ||
1046 | u8 flags0; | ||
1047 | #define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_MASK 0xF | ||
1048 | #define USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 | ||
1049 | #define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 | ||
1050 | #define USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 | ||
1051 | #define USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | ||
1052 | #define USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | ||
1053 | #define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 | ||
1054 | #define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 | ||
1055 | u8 flags1; | ||
1056 | #define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_MASK 0x3 | ||
1057 | #define USTORM_ISCSI_TASK_AG_CTX_RESERVED1_SHIFT 0 | ||
1058 | #define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_MASK 0x3 | ||
1059 | #define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_SHIFT 2 | ||
1060 | #define USTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 | ||
1061 | #define USTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 4 | ||
1062 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_MASK 0x3 | ||
1063 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_SHIFT 6 | ||
1064 | u8 flags2; | ||
1065 | #define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_MASK 0x1 | ||
1066 | #define USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_EN_SHIFT 0 | ||
1067 | #define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_MASK 0x1 | ||
1068 | #define USTORM_ISCSI_TASK_AG_CTX_DISABLE_DATA_ACKED_SHIFT 1 | ||
1069 | #define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_MASK 0x1 | ||
1070 | #define USTORM_ISCSI_TASK_AG_CTX_R2T2RECV_EN_SHIFT 2 | ||
1071 | #define USTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 | ||
1072 | #define USTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 3 | ||
1073 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_MASK 0x1 | ||
1074 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_CF_EN_SHIFT 4 | ||
1075 | #define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_MASK 0x1 | ||
1076 | #define USTORM_ISCSI_TASK_AG_CTX_CMP_DATA_TOTAL_EXP_EN_SHIFT 5 | ||
1077 | #define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | ||
1078 | #define USTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 6 | ||
1079 | #define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_MASK 0x1 | ||
1080 | #define USTORM_ISCSI_TASK_AG_CTX_CMP_CONT_RCV_EXP_EN_SHIFT 7 | ||
1081 | u8 flags3; | ||
1082 | #define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | ||
1083 | #define USTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 0 | ||
1084 | #define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | ||
1085 | #define USTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 1 | ||
1086 | #define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | ||
1087 | #define USTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 2 | ||
1088 | #define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_MASK 0x1 | ||
1089 | #define USTORM_ISCSI_TASK_AG_CTX_RULE6EN_SHIFT 3 | ||
1090 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_MASK 0xF | ||
1091 | #define USTORM_ISCSI_TASK_AG_CTX_DIF_ERROR_TYPE_SHIFT 4 | ||
1092 | __le32 dif_err_intervals; | ||
1093 | __le32 dif_error_1st_interval; | ||
1094 | __le32 rcv_cont_len; | ||
1095 | __le32 exp_cont_len; | ||
1096 | __le32 total_data_acked; | ||
1097 | __le32 exp_data_acked; | ||
1098 | u8 next_tid_valid; | ||
1099 | u8 byte3; | ||
1100 | __le16 word1; | ||
1101 | __le16 next_tid; | ||
1102 | __le16 word3; | ||
1103 | __le32 hdr_residual_count; | ||
1104 | __le32 exp_r2t_sn; | ||
1105 | }; | ||
1106 | |||
1107 | struct mstorm_iscsi_task_st_ctx { | ||
1108 | struct scsi_cached_sges data_desc; | ||
1109 | struct scsi_sgl_params sgl_params; | ||
1110 | __le32 rem_task_size; | ||
1111 | __le32 data_buffer_offset; | ||
1112 | u8 task_type; | ||
1113 | struct iscsi_dif_flags dif_flags; | ||
1114 | u8 reserved0[2]; | ||
1115 | struct regpair sense_db; | ||
1116 | __le32 expected_itt; | ||
1117 | __le32 reserved1; | ||
1118 | }; | ||
1119 | |||
1120 | struct ustorm_iscsi_task_st_ctx { | ||
1121 | __le32 rem_rcv_len; | ||
1122 | __le32 exp_data_transfer_len; | ||
1123 | __le32 exp_data_sn; | ||
1124 | struct regpair lun; | ||
1125 | struct iscsi_reg1 reg1; | ||
1126 | u8 flags2; | ||
1127 | #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_MASK 0x1 | ||
1128 | #define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0 | ||
1129 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F | ||
1130 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1 | ||
1131 | struct iscsi_dif_flags dif_flags; | ||
1132 | __le16 reserved3; | ||
1133 | __le32 reserved4; | ||
1134 | __le32 reserved5; | ||
1135 | __le32 reserved6; | ||
1136 | __le32 reserved7; | ||
1137 | u8 task_type; | ||
1138 | u8 error_flags; | ||
1139 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_MASK 0x1 | ||
1140 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_DIGEST_ERROR_SHIFT 0 | ||
1141 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_MASK 0x1 | ||
1142 | #define USTORM_ISCSI_TASK_ST_CTX_DATA_TRUNCATED_ERROR_SHIFT 1 | ||
1143 | #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_MASK 0x1 | ||
1144 | #define USTORM_ISCSI_TASK_ST_CTX_UNDER_RUN_ERROR_SHIFT 2 | ||
1145 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_MASK 0x1F | ||
1146 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED8_SHIFT 3 | ||
1147 | u8 flags; | ||
1148 | #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_MASK 0x3 | ||
1149 | #define USTORM_ISCSI_TASK_ST_CTX_CQE_WRITE_SHIFT 0 | ||
1150 | #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_MASK 0x1 | ||
1151 | #define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2 | ||
1152 | #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1 | ||
1153 | #define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3 | ||
1154 | #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1 | ||
1155 | #define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4 | ||
1156 | #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1 | ||
1157 | #define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5 | ||
1158 | #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1 | ||
1159 | #define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6 | ||
1160 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1 | ||
1161 | #define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_SHIFT 7 | ||
1162 | u8 cq_rss_number; | ||
1163 | }; | ||
1164 | |||
1165 | struct iscsi_task_context { | ||
1166 | struct ystorm_iscsi_task_st_ctx ystorm_st_context; | ||
1167 | struct ystorm_iscsi_task_ag_ctx ystorm_ag_context; | ||
1168 | struct regpair ystorm_ag_padding[2]; | ||
1169 | struct tdif_task_context tdif_context; | ||
1170 | struct mstorm_iscsi_task_ag_ctx mstorm_ag_context; | ||
1171 | struct regpair mstorm_ag_padding[2]; | ||
1172 | struct ustorm_iscsi_task_ag_ctx ustorm_ag_context; | ||
1173 | struct mstorm_iscsi_task_st_ctx mstorm_st_context; | ||
1174 | struct ustorm_iscsi_task_st_ctx ustorm_st_context; | ||
1175 | struct rdif_task_context rdif_context; | ||
1176 | }; | ||
1177 | |||
1178 | enum iscsi_task_type { | 1334 | enum iscsi_task_type { |
1179 | ISCSI_TASK_TYPE_INITIATOR_WRITE, | 1335 | ISCSI_TASK_TYPE_INITIATOR_WRITE, |
1180 | ISCSI_TASK_TYPE_INITIATOR_READ, | 1336 | ISCSI_TASK_TYPE_INITIATOR_READ, |
@@ -1186,53 +1342,57 @@ enum iscsi_task_type { | |||
1186 | ISCSI_TASK_TYPE_TARGET_READ, | 1342 | ISCSI_TASK_TYPE_TARGET_READ, |
1187 | ISCSI_TASK_TYPE_TARGET_RESPONSE, | 1343 | ISCSI_TASK_TYPE_TARGET_RESPONSE, |
1188 | ISCSI_TASK_TYPE_LOGIN_RESPONSE, | 1344 | ISCSI_TASK_TYPE_LOGIN_RESPONSE, |
1345 | ISCSI_TASK_TYPE_TARGET_IMM_W_DIF, | ||
1189 | MAX_ISCSI_TASK_TYPE | 1346 | MAX_ISCSI_TASK_TYPE |
1190 | }; | 1347 | }; |
1191 | 1348 | ||
1349 | /* iSCSI DesiredDataTransferLength/ttt union */ | ||
1192 | union iscsi_ttt_txlen_union { | 1350 | union iscsi_ttt_txlen_union { |
1193 | __le32 desired_tx_len; | 1351 | __le32 desired_tx_len; |
1194 | __le32 ttt; | 1352 | __le32 ttt; |
1195 | }; | 1353 | }; |
1196 | 1354 | ||
1355 | /* iSCSI uHQ element */ | ||
1197 | struct iscsi_uhqe { | 1356 | struct iscsi_uhqe { |
1198 | __le32 reg1; | 1357 | __le32 reg1; |
1199 | #define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF | 1358 | #define ISCSI_UHQE_PDU_PAYLOAD_LEN_MASK 0xFFFFF |
1200 | #define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 | 1359 | #define ISCSI_UHQE_PDU_PAYLOAD_LEN_SHIFT 0 |
1201 | #define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 | 1360 | #define ISCSI_UHQE_LOCAL_COMP_MASK 0x1 |
1202 | #define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 | 1361 | #define ISCSI_UHQE_LOCAL_COMP_SHIFT 20 |
1203 | #define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 | 1362 | #define ISCSI_UHQE_TOGGLE_BIT_MASK 0x1 |
1204 | #define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 | 1363 | #define ISCSI_UHQE_TOGGLE_BIT_SHIFT 21 |
1205 | #define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 | 1364 | #define ISCSI_UHQE_PURE_PAYLOAD_MASK 0x1 |
1206 | #define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 | 1365 | #define ISCSI_UHQE_PURE_PAYLOAD_SHIFT 22 |
1207 | #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 | 1366 | #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_MASK 0x1 |
1208 | #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 | 1367 | #define ISCSI_UHQE_LOGIN_RESPONSE_PDU_SHIFT 23 |
1209 | #define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF | 1368 | #define ISCSI_UHQE_TASK_ID_HI_MASK 0xFF |
1210 | #define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 | 1369 | #define ISCSI_UHQE_TASK_ID_HI_SHIFT 24 |
1211 | __le32 reg2; | 1370 | __le32 reg2; |
1212 | #define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF | 1371 | #define ISCSI_UHQE_BUFFER_OFFSET_MASK 0xFFFFFF |
1213 | #define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 | 1372 | #define ISCSI_UHQE_BUFFER_OFFSET_SHIFT 0 |
1214 | #define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF | 1373 | #define ISCSI_UHQE_TASK_ID_LO_MASK 0xFF |
1215 | #define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 | 1374 | #define ISCSI_UHQE_TASK_ID_LO_SHIFT 24 |
1216 | }; | 1375 | }; |
1217 | 1376 | ||
1218 | 1377 | /* iSCSI WQ element */ | |
1219 | struct iscsi_wqe { | 1378 | struct iscsi_wqe { |
1220 | __le16 task_id; | 1379 | __le16 task_id; |
1221 | u8 flags; | 1380 | u8 flags; |
1222 | #define ISCSI_WQE_WQE_TYPE_MASK 0x7 | 1381 | #define ISCSI_WQE_WQE_TYPE_MASK 0x7 |
1223 | #define ISCSI_WQE_WQE_TYPE_SHIFT 0 | 1382 | #define ISCSI_WQE_WQE_TYPE_SHIFT 0 |
1224 | #define ISCSI_WQE_NUM_SGES_MASK 0xF | 1383 | #define ISCSI_WQE_NUM_SGES_MASK 0xF |
1225 | #define ISCSI_WQE_NUM_SGES_SHIFT 3 | 1384 | #define ISCSI_WQE_NUM_SGES_SHIFT 3 |
1226 | #define ISCSI_WQE_RESPONSE_MASK 0x1 | 1385 | #define ISCSI_WQE_RESPONSE_MASK 0x1 |
1227 | #define ISCSI_WQE_RESPONSE_SHIFT 7 | 1386 | #define ISCSI_WQE_RESPONSE_SHIFT 7 |
1228 | struct iscsi_dif_flags prot_flags; | 1387 | struct iscsi_dif_flags prot_flags; |
1229 | __le32 contlen_cdbsize; | 1388 | __le32 contlen_cdbsize; |
1230 | #define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF | 1389 | #define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF |
1231 | #define ISCSI_WQE_CONT_LEN_SHIFT 0 | 1390 | #define ISCSI_WQE_CONT_LEN_SHIFT 0 |
1232 | #define ISCSI_WQE_CDB_SIZE_MASK 0xFF | 1391 | #define ISCSI_WQE_CDB_SIZE_MASK 0xFF |
1233 | #define ISCSI_WQE_CDB_SIZE_SHIFT 24 | 1392 | #define ISCSI_WQE_CDB_SIZE_SHIFT 24 |
1234 | }; | 1393 | }; |
1235 | 1394 | ||
1395 | /* iSCSI wqe type */ | ||
1236 | enum iscsi_wqe_type { | 1396 | enum iscsi_wqe_type { |
1237 | ISCSI_WQE_TYPE_NORMAL, | 1397 | ISCSI_WQE_TYPE_NORMAL, |
1238 | ISCSI_WQE_TYPE_TASK_CLEANUP, | 1398 | ISCSI_WQE_TYPE_TASK_CLEANUP, |
@@ -1244,6 +1404,7 @@ enum iscsi_wqe_type { | |||
1244 | MAX_ISCSI_WQE_TYPE | 1404 | MAX_ISCSI_WQE_TYPE |
1245 | }; | 1405 | }; |
1246 | 1406 | ||
1407 | /* iSCSI xHQ element */ | ||
1247 | struct iscsi_xhqe { | 1408 | struct iscsi_xhqe { |
1248 | union iscsi_ttt_txlen_union ttt_or_txlen; | 1409 | union iscsi_ttt_txlen_union ttt_or_txlen; |
1249 | __le32 exp_stat_sn; | 1410 | __le32 exp_stat_sn; |
@@ -1251,120 +1412,134 @@ struct iscsi_xhqe { | |||
1251 | u8 total_ahs_length; | 1412 | u8 total_ahs_length; |
1252 | u8 opcode; | 1413 | u8 opcode; |
1253 | u8 flags; | 1414 | u8 flags; |
1254 | #define ISCSI_XHQE_FINAL_MASK 0x1 | 1415 | #define ISCSI_XHQE_FINAL_MASK 0x1 |
1255 | #define ISCSI_XHQE_FINAL_SHIFT 0 | 1416 | #define ISCSI_XHQE_FINAL_SHIFT 0 |
1256 | #define ISCSI_XHQE_STATUS_BIT_MASK 0x1 | 1417 | #define ISCSI_XHQE_STATUS_BIT_MASK 0x1 |
1257 | #define ISCSI_XHQE_STATUS_BIT_SHIFT 1 | 1418 | #define ISCSI_XHQE_STATUS_BIT_SHIFT 1 |
1258 | #define ISCSI_XHQE_NUM_SGES_MASK 0xF | 1419 | #define ISCSI_XHQE_NUM_SGES_MASK 0xF |
1259 | #define ISCSI_XHQE_NUM_SGES_SHIFT 2 | 1420 | #define ISCSI_XHQE_NUM_SGES_SHIFT 2 |
1260 | #define ISCSI_XHQE_RESERVED0_MASK 0x3 | 1421 | #define ISCSI_XHQE_RESERVED0_MASK 0x3 |
1261 | #define ISCSI_XHQE_RESERVED0_SHIFT 6 | 1422 | #define ISCSI_XHQE_RESERVED0_SHIFT 6 |
1262 | union iscsi_seq_num seq_num; | 1423 | union iscsi_seq_num seq_num; |
1263 | __le16 reserved1; | 1424 | __le16 reserved1; |
1264 | }; | 1425 | }; |
1265 | 1426 | ||
1427 | /* Per PF iSCSI receive path statistics - mStorm RAM structure */ | ||
1266 | struct mstorm_iscsi_stats_drv { | 1428 | struct mstorm_iscsi_stats_drv { |
1267 | struct regpair iscsi_rx_dropped_pdus_task_not_valid; | 1429 | struct regpair iscsi_rx_dropped_pdus_task_not_valid; |
1430 | struct regpair iscsi_rx_dup_ack_cnt; | ||
1268 | }; | 1431 | }; |
1269 | 1432 | ||
1433 | /* Per PF iSCSI transmit path statistics - pStorm RAM structure */ | ||
1270 | struct pstorm_iscsi_stats_drv { | 1434 | struct pstorm_iscsi_stats_drv { |
1271 | struct regpair iscsi_tx_bytes_cnt; | 1435 | struct regpair iscsi_tx_bytes_cnt; |
1272 | struct regpair iscsi_tx_packet_cnt; | 1436 | struct regpair iscsi_tx_packet_cnt; |
1273 | }; | 1437 | }; |
1274 | 1438 | ||
1439 | /* Per PF iSCSI receive path statistics - tStorm RAM structure */ | ||
1275 | struct tstorm_iscsi_stats_drv { | 1440 | struct tstorm_iscsi_stats_drv { |
1276 | struct regpair iscsi_rx_bytes_cnt; | 1441 | struct regpair iscsi_rx_bytes_cnt; |
1277 | struct regpair iscsi_rx_packet_cnt; | 1442 | struct regpair iscsi_rx_packet_cnt; |
1278 | struct regpair iscsi_rx_new_ooo_isle_events_cnt; | 1443 | struct regpair iscsi_rx_new_ooo_isle_events_cnt; |
1444 | struct regpair iscsi_rx_tcp_payload_bytes_cnt; | ||
1445 | struct regpair iscsi_rx_tcp_pkt_cnt; | ||
1446 | struct regpair iscsi_rx_pure_ack_cnt; | ||
1279 | __le32 iscsi_cmdq_threshold_cnt; | 1447 | __le32 iscsi_cmdq_threshold_cnt; |
1280 | __le32 iscsi_rq_threshold_cnt; | 1448 | __le32 iscsi_rq_threshold_cnt; |
1281 | __le32 iscsi_immq_threshold_cnt; | 1449 | __le32 iscsi_immq_threshold_cnt; |
1282 | }; | 1450 | }; |
1283 | 1451 | ||
1452 | /* Per PF iSCSI receive path statistics - uStorm RAM structure */ | ||
1284 | struct ustorm_iscsi_stats_drv { | 1453 | struct ustorm_iscsi_stats_drv { |
1285 | struct regpair iscsi_rx_data_pdu_cnt; | 1454 | struct regpair iscsi_rx_data_pdu_cnt; |
1286 | struct regpair iscsi_rx_r2t_pdu_cnt; | 1455 | struct regpair iscsi_rx_r2t_pdu_cnt; |
1287 | struct regpair iscsi_rx_total_pdu_cnt; | 1456 | struct regpair iscsi_rx_total_pdu_cnt; |
1288 | }; | 1457 | }; |
1289 | 1458 | ||
1459 | /* Per PF iSCSI transmit path statistics - xStorm RAM structure */ | ||
1290 | struct xstorm_iscsi_stats_drv { | 1460 | struct xstorm_iscsi_stats_drv { |
1291 | struct regpair iscsi_tx_go_to_slow_start_event_cnt; | 1461 | struct regpair iscsi_tx_go_to_slow_start_event_cnt; |
1292 | struct regpair iscsi_tx_fast_retransmit_event_cnt; | 1462 | struct regpair iscsi_tx_fast_retransmit_event_cnt; |
1463 | struct regpair iscsi_tx_pure_ack_cnt; | ||
1464 | struct regpair iscsi_tx_delayed_ack_cnt; | ||
1293 | }; | 1465 | }; |
1294 | 1466 | ||
1467 | /* Per PF iSCSI transmit path statistics - yStorm RAM structure */ | ||
1295 | struct ystorm_iscsi_stats_drv { | 1468 | struct ystorm_iscsi_stats_drv { |
1296 | struct regpair iscsi_tx_data_pdu_cnt; | 1469 | struct regpair iscsi_tx_data_pdu_cnt; |
1297 | struct regpair iscsi_tx_r2t_pdu_cnt; | 1470 | struct regpair iscsi_tx_r2t_pdu_cnt; |
1298 | struct regpair iscsi_tx_total_pdu_cnt; | 1471 | struct regpair iscsi_tx_total_pdu_cnt; |
1472 | struct regpair iscsi_tx_tcp_payload_bytes_cnt; | ||
1473 | struct regpair iscsi_tx_tcp_pkt_cnt; | ||
1299 | }; | 1474 | }; |
1300 | 1475 | ||
1301 | struct tstorm_iscsi_task_ag_ctx { | 1476 | struct e4_tstorm_iscsi_task_ag_ctx { |
1302 | u8 byte0; | 1477 | u8 byte0; |
1303 | u8 byte1; | 1478 | u8 byte1; |
1304 | __le16 word0; | 1479 | __le16 word0; |
1305 | u8 flags0; | 1480 | u8 flags0; |
1306 | #define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF | 1481 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_MASK 0xF |
1307 | #define TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 | 1482 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_NIBBLE0_SHIFT 0 |
1308 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 | 1483 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_MASK 0x1 |
1309 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 | 1484 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT0_SHIFT 4 |
1310 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 | 1485 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 |
1311 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 | 1486 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 |
1312 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 | 1487 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_MASK 0x1 |
1313 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 | 1488 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT2_SHIFT 6 |
1314 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 | 1489 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_MASK 0x1 |
1315 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 | 1490 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT3_SHIFT 7 |
1316 | u8 flags1; | 1491 | u8 flags1; |
1317 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 | 1492 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_MASK 0x1 |
1318 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 | 1493 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT4_SHIFT 0 |
1319 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 | 1494 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_MASK 0x1 |
1320 | #define TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 | 1495 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_BIT5_SHIFT 1 |
1321 | #define TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 | 1496 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_MASK 0x3 |
1322 | #define TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 | 1497 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0_SHIFT 2 |
1323 | #define TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 | 1498 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_MASK 0x3 |
1324 | #define TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 | 1499 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1_SHIFT 4 |
1325 | #define TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 | 1500 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_MASK 0x3 |
1326 | #define TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 | 1501 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2_SHIFT 6 |
1327 | u8 flags2; | 1502 | u8 flags2; |
1328 | #define TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 | 1503 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_MASK 0x3 |
1329 | #define TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 | 1504 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3_SHIFT 0 |
1330 | #define TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 | 1505 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_MASK 0x3 |
1331 | #define TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 | 1506 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4_SHIFT 2 |
1332 | #define TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 | 1507 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_MASK 0x3 |
1333 | #define TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 | 1508 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5_SHIFT 4 |
1334 | #define TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 | 1509 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_MASK 0x3 |
1335 | #define TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 | 1510 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6_SHIFT 6 |
1336 | u8 flags3; | 1511 | u8 flags3; |
1337 | #define TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 | 1512 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_MASK 0x3 |
1338 | #define TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 | 1513 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7_SHIFT 0 |
1339 | #define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 | 1514 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_MASK 0x1 |
1340 | #define TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 | 1515 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF0EN_SHIFT 2 |
1341 | #define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 | 1516 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_MASK 0x1 |
1342 | #define TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 | 1517 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF1EN_SHIFT 3 |
1343 | #define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 | 1518 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_MASK 0x1 |
1344 | #define TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 | 1519 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF2EN_SHIFT 4 |
1345 | #define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 | 1520 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_MASK 0x1 |
1346 | #define TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 | 1521 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF3EN_SHIFT 5 |
1347 | #define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 | 1522 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_MASK 0x1 |
1348 | #define TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 | 1523 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF4EN_SHIFT 6 |
1349 | #define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 | 1524 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_MASK 0x1 |
1350 | #define TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 | 1525 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF5EN_SHIFT 7 |
1351 | u8 flags4; | 1526 | u8 flags4; |
1352 | #define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 | 1527 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_MASK 0x1 |
1353 | #define TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 | 1528 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF6EN_SHIFT 0 |
1354 | #define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 | 1529 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_MASK 0x1 |
1355 | #define TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 | 1530 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_CF7EN_SHIFT 1 |
1356 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 | 1531 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_MASK 0x1 |
1357 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 | 1532 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE0EN_SHIFT 2 |
1358 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 | 1533 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_MASK 0x1 |
1359 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 | 1534 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE1EN_SHIFT 3 |
1360 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 | 1535 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_MASK 0x1 |
1361 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 | 1536 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE2EN_SHIFT 4 |
1362 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 | 1537 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_MASK 0x1 |
1363 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 | 1538 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE3EN_SHIFT 5 |
1364 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 | 1539 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_MASK 0x1 |
1365 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 | 1540 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE4EN_SHIFT 6 |
1366 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 | 1541 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_MASK 0x1 |
1367 | #define TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 | 1542 | #define E4_TSTORM_ISCSI_TASK_AG_CTX_RULE5EN_SHIFT 7 |
1368 | u8 byte2; | 1543 | u8 byte2; |
1369 | __le16 word1; | 1544 | __le16 word1; |
1370 | __le32 reg0; | 1545 | __le32 reg0; |
@@ -1376,18 +1551,20 @@ struct tstorm_iscsi_task_ag_ctx { | |||
1376 | __le32 reg1; | 1551 | __le32 reg1; |
1377 | __le32 reg2; | 1552 | __le32 reg2; |
1378 | }; | 1553 | }; |
1554 | |||
1555 | /* iSCSI doorbell data */ | ||
1379 | struct iscsi_db_data { | 1556 | struct iscsi_db_data { |
1380 | u8 params; | 1557 | u8 params; |
1381 | #define ISCSI_DB_DATA_DEST_MASK 0x3 | 1558 | #define ISCSI_DB_DATA_DEST_MASK 0x3 |
1382 | #define ISCSI_DB_DATA_DEST_SHIFT 0 | 1559 | #define ISCSI_DB_DATA_DEST_SHIFT 0 |
1383 | #define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 | 1560 | #define ISCSI_DB_DATA_AGG_CMD_MASK 0x3 |
1384 | #define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 | 1561 | #define ISCSI_DB_DATA_AGG_CMD_SHIFT 2 |
1385 | #define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 | 1562 | #define ISCSI_DB_DATA_BYPASS_EN_MASK 0x1 |
1386 | #define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 | 1563 | #define ISCSI_DB_DATA_BYPASS_EN_SHIFT 4 |
1387 | #define ISCSI_DB_DATA_RESERVED_MASK 0x1 | 1564 | #define ISCSI_DB_DATA_RESERVED_MASK 0x1 |
1388 | #define ISCSI_DB_DATA_RESERVED_SHIFT 5 | 1565 | #define ISCSI_DB_DATA_RESERVED_SHIFT 5 |
1389 | #define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 | 1566 | #define ISCSI_DB_DATA_AGG_VAL_SEL_MASK 0x3 |
1390 | #define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 | 1567 | #define ISCSI_DB_DATA_AGG_VAL_SEL_SHIFT 6 |
1391 | u8 agg_flags; | 1568 | u8 agg_flags; |
1392 | __le16 sq_prod; | 1569 | __le16 sq_prod; |
1393 | }; | 1570 | }; |
diff --git a/include/linux/qed/iwarp_common.h b/include/linux/qed/iwarp_common.h index b8b3e1cfae90..c6cfd39cd910 100644 --- a/include/linux/qed/iwarp_common.h +++ b/include/linux/qed/iwarp_common.h | |||
@@ -29,9 +29,12 @@ | |||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | 29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE |
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | |||
32 | #ifndef __IWARP_COMMON__ | 33 | #ifndef __IWARP_COMMON__ |
33 | #define __IWARP_COMMON__ | 34 | #define __IWARP_COMMON__ |
35 | |||
34 | #include <linux/qed/rdma_common.h> | 36 | #include <linux/qed/rdma_common.h> |
37 | |||
35 | /************************/ | 38 | /************************/ |
36 | /* IWARP FW CONSTANTS */ | 39 | /* IWARP FW CONSTANTS */ |
37 | /************************/ | 40 | /************************/ |
@@ -40,14 +43,14 @@ | |||
40 | #define IWARP_PASSIVE_MODE 1 | 43 | #define IWARP_PASSIVE_MODE 1 |
41 | 44 | ||
42 | #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) | 45 | #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) |
43 | #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) | 46 | #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) |
44 | #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) | 47 | #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) |
45 | #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) | 48 | #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) |
46 | #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) | 49 | #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) |
47 | 50 | ||
48 | #define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) | 51 | #define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) |
49 | #define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) | 52 | #define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) |
50 | 53 | ||
51 | #define IWARP_MAX_QPS (64 * 1024) | 54 | #define IWARP_MAX_QPS (64 * 1024) |
52 | 55 | ||
53 | #endif /* __IWARP_COMMON__ */ | 56 | #endif /* __IWARP_COMMON__ */ |
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h index d60de4a39810..147d08ccf813 100644 --- a/include/linux/qed/qed_eth_if.h +++ b/include/linux/qed/qed_eth_if.h | |||
@@ -61,6 +61,35 @@ struct qed_txq_start_ret_params { | |||
61 | void *p_handle; | 61 | void *p_handle; |
62 | }; | 62 | }; |
63 | 63 | ||
64 | enum qed_filter_config_mode { | ||
65 | QED_FILTER_CONFIG_MODE_DISABLE, | ||
66 | QED_FILTER_CONFIG_MODE_5_TUPLE, | ||
67 | QED_FILTER_CONFIG_MODE_L4_PORT, | ||
68 | QED_FILTER_CONFIG_MODE_IP_DEST, | ||
69 | }; | ||
70 | |||
71 | struct qed_ntuple_filter_params { | ||
72 | /* Physically mapped address containing header of buffer to be used | ||
73 | * as filter. | ||
74 | */ | ||
75 | dma_addr_t addr; | ||
76 | |||
77 | /* Length of header in bytes */ | ||
78 | u16 length; | ||
79 | |||
80 | /* Relative queue-id to receive classified packet */ | ||
81 | #define QED_RFS_NTUPLE_QID_RSS ((u16)-1) | ||
82 | u16 qid; | ||
83 | |||
84 | /* Identifier can either be according to vport-id or vfid */ | ||
85 | bool b_is_vf; | ||
86 | u8 vport_id; | ||
87 | u8 vf_id; | ||
88 | |||
89 | /* true iff this filter is to be added. Else to be removed */ | ||
90 | bool b_is_add; | ||
91 | }; | ||
92 | |||
64 | struct qed_dev_eth_info { | 93 | struct qed_dev_eth_info { |
65 | struct qed_dev_info common; | 94 | struct qed_dev_info common; |
66 | 95 | ||
@@ -316,13 +345,12 @@ struct qed_eth_ops { | |||
316 | int (*tunn_config)(struct qed_dev *cdev, | 345 | int (*tunn_config)(struct qed_dev *cdev, |
317 | struct qed_tunn_params *params); | 346 | struct qed_tunn_params *params); |
318 | 347 | ||
319 | int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie, | 348 | int (*ntuple_filter_config)(struct qed_dev *cdev, |
320 | dma_addr_t mapping, u16 length, | 349 | void *cookie, |
321 | u16 vport_id, u16 rx_queue_id, | 350 | struct qed_ntuple_filter_params *params); |
322 | bool add_filter); | ||
323 | 351 | ||
324 | int (*configure_arfs_searcher)(struct qed_dev *cdev, | 352 | int (*configure_arfs_searcher)(struct qed_dev *cdev, |
325 | bool en_searcher); | 353 | enum qed_filter_config_mode mode); |
326 | int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); | 354 | int (*get_coalesce)(struct qed_dev *cdev, u16 *coal, void *handle); |
327 | }; | 355 | }; |
328 | 356 | ||
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index cc646ca97974..15e398c7230e 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
@@ -244,16 +244,11 @@ struct qed_fcoe_pf_params { | |||
244 | /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ | 244 | /* Most of the the parameters below are described in the FW iSCSI / TCP HSI */ |
245 | struct qed_iscsi_pf_params { | 245 | struct qed_iscsi_pf_params { |
246 | u64 glbl_q_params_addr; | 246 | u64 glbl_q_params_addr; |
247 | u64 bdq_pbl_base_addr[2]; | 247 | u64 bdq_pbl_base_addr[3]; |
248 | u32 max_cwnd; | ||
249 | u16 cq_num_entries; | 248 | u16 cq_num_entries; |
250 | u16 cmdq_num_entries; | 249 | u16 cmdq_num_entries; |
251 | u32 two_msl_timer; | 250 | u32 two_msl_timer; |
252 | u16 dup_ack_threshold; | ||
253 | u16 tx_sws_timer; | 251 | u16 tx_sws_timer; |
254 | u16 min_rto; | ||
255 | u16 min_rto_rt; | ||
256 | u16 max_rto; | ||
257 | 252 | ||
258 | /* The following parameters are used during HW-init | 253 | /* The following parameters are used during HW-init |
259 | * and these parameters need to be passed as arguments | 254 | * and these parameters need to be passed as arguments |
@@ -264,8 +259,8 @@ struct qed_iscsi_pf_params { | |||
264 | 259 | ||
265 | /* The following parameters are used during protocol-init */ | 260 | /* The following parameters are used during protocol-init */ |
266 | u16 half_way_close_timeout; | 261 | u16 half_way_close_timeout; |
267 | u16 bdq_xoff_threshold[2]; | 262 | u16 bdq_xoff_threshold[3]; |
268 | u16 bdq_xon_threshold[2]; | 263 | u16 bdq_xon_threshold[3]; |
269 | u16 cmdq_xoff_threshold; | 264 | u16 cmdq_xoff_threshold; |
270 | u16 cmdq_xon_threshold; | 265 | u16 cmdq_xon_threshold; |
271 | u16 rq_buffer_size; | 266 | u16 rq_buffer_size; |
@@ -281,10 +276,11 @@ struct qed_iscsi_pf_params { | |||
281 | u8 gl_cmd_pi; | 276 | u8 gl_cmd_pi; |
282 | u8 debug_mode; | 277 | u8 debug_mode; |
283 | u8 ll2_ooo_queue_id; | 278 | u8 ll2_ooo_queue_id; |
284 | u8 ooo_enable; | ||
285 | 279 | ||
286 | u8 is_target; | 280 | u8 is_target; |
287 | u8 bdq_pbl_num_entries[2]; | 281 | u8 is_soc_en; |
282 | u8 soc_num_of_blocks_log; | ||
283 | u8 bdq_pbl_num_entries[3]; | ||
288 | }; | 284 | }; |
289 | 285 | ||
290 | struct qed_rdma_pf_params { | 286 | struct qed_rdma_pf_params { |
@@ -316,16 +312,16 @@ enum qed_int_mode { | |||
316 | }; | 312 | }; |
317 | 313 | ||
318 | struct qed_sb_info { | 314 | struct qed_sb_info { |
319 | struct status_block *sb_virt; | 315 | struct status_block_e4 *sb_virt; |
320 | dma_addr_t sb_phys; | 316 | dma_addr_t sb_phys; |
321 | u32 sb_ack; /* Last given ack */ | 317 | u32 sb_ack; /* Last given ack */ |
322 | u16 igu_sb_id; | 318 | u16 igu_sb_id; |
323 | void __iomem *igu_addr; | 319 | void __iomem *igu_addr; |
324 | u8 flags; | 320 | u8 flags; |
325 | #define QED_SB_INFO_INIT 0x1 | 321 | #define QED_SB_INFO_INIT 0x1 |
326 | #define QED_SB_INFO_SETUP 0x2 | 322 | #define QED_SB_INFO_SETUP 0x2 |
327 | 323 | ||
328 | struct qed_dev *cdev; | 324 | struct qed_dev *cdev; |
329 | }; | 325 | }; |
330 | 326 | ||
331 | enum qed_dev_type { | 327 | enum qed_dev_type { |
@@ -939,7 +935,7 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) | |||
939 | u16 rc = 0; | 935 | u16 rc = 0; |
940 | 936 | ||
941 | prod = le32_to_cpu(sb_info->sb_virt->prod_index) & | 937 | prod = le32_to_cpu(sb_info->sb_virt->prod_index) & |
942 | STATUS_BLOCK_PROD_INDEX_MASK; | 938 | STATUS_BLOCK_E4_PROD_INDEX_MASK; |
943 | if (sb_info->sb_ack != prod) { | 939 | if (sb_info->sb_ack != prod) { |
944 | sb_info->sb_ack = prod; | 940 | sb_info->sb_ack = prod; |
945 | rc |= QED_SB_IDX; | 941 | rc |= QED_SB_IDX; |
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 111e606a74c8..d0df1bec5357 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h | |||
@@ -102,7 +102,6 @@ struct qed_iscsi_params_offload { | |||
102 | u32 ss_thresh; | 102 | u32 ss_thresh; |
103 | u16 srtt; | 103 | u16 srtt; |
104 | u16 rtt_var; | 104 | u16 rtt_var; |
105 | u32 ts_time; | ||
106 | u32 ts_recent; | 105 | u32 ts_recent; |
107 | u32 ts_recent_age; | 106 | u32 ts_recent_age; |
108 | u32 total_rt; | 107 | u32 total_rt; |
@@ -124,7 +123,6 @@ struct qed_iscsi_params_offload { | |||
124 | u16 mss; | 123 | u16 mss; |
125 | u8 snd_wnd_scale; | 124 | u8 snd_wnd_scale; |
126 | u8 rcv_wnd_scale; | 125 | u8 rcv_wnd_scale; |
127 | u32 ts_ticks_per_second; | ||
128 | u16 da_timeout_value; | 126 | u16 da_timeout_value; |
129 | u8 ack_frequency; | 127 | u8 ack_frequency; |
130 | }; | 128 | }; |
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index e755954d85fd..266c1fb45387 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h | |||
@@ -116,7 +116,7 @@ struct qed_ll2_comp_rx_data { | |||
116 | u32 opaque_data_1; | 116 | u32 opaque_data_1; |
117 | 117 | ||
118 | /* GSI only */ | 118 | /* GSI only */ |
119 | u32 gid_dst[4]; | 119 | u32 src_qp; |
120 | u16 qp_id; | 120 | u16 qp_id; |
121 | 121 | ||
122 | union { | 122 | union { |
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h index a9b3050f469c..c1a446ebe362 100644 --- a/include/linux/qed/rdma_common.h +++ b/include/linux/qed/rdma_common.h | |||
@@ -32,28 +32,29 @@ | |||
32 | 32 | ||
33 | #ifndef __RDMA_COMMON__ | 33 | #ifndef __RDMA_COMMON__ |
34 | #define __RDMA_COMMON__ | 34 | #define __RDMA_COMMON__ |
35 | |||
35 | /************************/ | 36 | /************************/ |
36 | /* RDMA FW CONSTANTS */ | 37 | /* RDMA FW CONSTANTS */ |
37 | /************************/ | 38 | /************************/ |
38 | 39 | ||
39 | #define RDMA_RESERVED_LKEY (0) | 40 | #define RDMA_RESERVED_LKEY (0) |
40 | #define RDMA_RING_PAGE_SIZE (0x1000) | 41 | #define RDMA_RING_PAGE_SIZE (0x1000) |
41 | 42 | ||
42 | #define RDMA_MAX_SGE_PER_SQ_WQE (4) | 43 | #define RDMA_MAX_SGE_PER_SQ_WQE (4) |
43 | #define RDMA_MAX_SGE_PER_RQ_WQE (4) | 44 | #define RDMA_MAX_SGE_PER_RQ_WQE (4) |
44 | 45 | ||
45 | #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) | 46 | #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) |
46 | 47 | ||
47 | #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) | 48 | #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) |
48 | #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) | 49 | #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) |
49 | 50 | ||
50 | #define RDMA_MAX_CQS (64 * 1024) | 51 | #define RDMA_MAX_CQS (64 * 1024) |
51 | #define RDMA_MAX_TIDS (128 * 1024 - 1) | 52 | #define RDMA_MAX_TIDS (128 * 1024 - 1) |
52 | #define RDMA_MAX_PDS (64 * 1024) | 53 | #define RDMA_MAX_PDS (64 * 1024) |
53 | 54 | ||
54 | #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS | 55 | #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS |
55 | #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 | 56 | #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 |
56 | #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB | 57 | #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB |
57 | 58 | ||
58 | #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) | 59 | #define RDMA_TASK_TYPE (PROTOCOLID_ROCE) |
59 | 60 | ||
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h index fe6a33e45977..e15e0da71240 100644 --- a/include/linux/qed/roce_common.h +++ b/include/linux/qed/roce_common.h | |||
@@ -33,13 +33,18 @@ | |||
33 | #ifndef __ROCE_COMMON__ | 33 | #ifndef __ROCE_COMMON__ |
34 | #define __ROCE_COMMON__ | 34 | #define __ROCE_COMMON__ |
35 | 35 | ||
36 | #define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) | 36 | /************************/ |
37 | #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) | 37 | /* ROCE FW CONSTANTS */ |
38 | /************************/ | ||
38 | 39 | ||
39 | #define ROCE_MAX_QPS (32 * 1024) | 40 | #define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) |
40 | #define ROCE_DCQCN_NP_MAX_QPS (64) | 41 | #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) |
41 | #define ROCE_DCQCN_RP_MAX_QPS (64) | ||
42 | 42 | ||
43 | #define ROCE_MAX_QPS (32 * 1024) | ||
44 | #define ROCE_DCQCN_NP_MAX_QPS (64) | ||
45 | #define ROCE_DCQCN_RP_MAX_QPS (64) | ||
46 | |||
47 | /* Affiliated asynchronous events / errors enumeration */ | ||
43 | enum roce_async_events_type { | 48 | enum roce_async_events_type { |
44 | ROCE_ASYNC_EVENT_NONE = 0, | 49 | ROCE_ASYNC_EVENT_NONE = 0, |
45 | ROCE_ASYNC_EVENT_COMM_EST = 1, | 50 | ROCE_ASYNC_EVENT_COMM_EST = 1, |
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h index 08df82a096b6..505c0b48a761 100644 --- a/include/linux/qed/storage_common.h +++ b/include/linux/qed/storage_common.h | |||
@@ -33,43 +33,77 @@ | |||
33 | #ifndef __STORAGE_COMMON__ | 33 | #ifndef __STORAGE_COMMON__ |
34 | #define __STORAGE_COMMON__ | 34 | #define __STORAGE_COMMON__ |
35 | 35 | ||
36 | #define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) | 36 | /*********************/ |
37 | #define BDQ_NUM_RESOURCES (4) | 37 | /* SCSI CONSTANTS */ |
38 | 38 | /*********************/ | |
39 | #define BDQ_ID_RQ (0) | 39 | |
40 | #define BDQ_ID_IMM_DATA (1) | 40 | #define SCSI_MAX_NUM_OF_CMDQS (NUM_OF_GLOBAL_QUEUES / 2) |
41 | #define BDQ_NUM_IDS (2) | 41 | #define BDQ_NUM_RESOURCES (4) |
42 | 42 | ||
43 | #define SCSI_NUM_SGES_SLOW_SGL_THR 8 | 43 | #define BDQ_ID_RQ (0) |
44 | #define BDQ_ID_IMM_DATA (1) | ||
45 | #define BDQ_ID_TQ (2) | ||
46 | #define BDQ_NUM_IDS (3) | ||
47 | |||
48 | #define SCSI_NUM_SGES_SLOW_SGL_THR 8 | ||
49 | |||
50 | #define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15) | ||
51 | |||
52 | /* SCSI op codes */ | ||
53 | #define SCSI_OPCODE_COMPARE_AND_WRITE (0x89) | ||
54 | #define SCSI_OPCODE_READ_10 (0x28) | ||
55 | #define SCSI_OPCODE_WRITE_6 (0x0A) | ||
56 | #define SCSI_OPCODE_WRITE_10 (0x2A) | ||
57 | #define SCSI_OPCODE_WRITE_12 (0xAA) | ||
58 | #define SCSI_OPCODE_WRITE_16 (0x8A) | ||
59 | #define SCSI_OPCODE_WRITE_AND_VERIFY_10 (0x2E) | ||
60 | #define SCSI_OPCODE_WRITE_AND_VERIFY_12 (0xAE) | ||
61 | #define SCSI_OPCODE_WRITE_AND_VERIFY_16 (0x8E) | ||
62 | |||
63 | /* iSCSI Drv opaque */ | ||
64 | struct iscsi_drv_opaque { | ||
65 | __le16 reserved_zero[3]; | ||
66 | __le16 opaque; | ||
67 | }; | ||
44 | 68 | ||
45 | #define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) | 69 | /* Scsi 2B/8B opaque union */ |
70 | union scsi_opaque { | ||
71 | struct regpair fcoe_opaque; | ||
72 | struct iscsi_drv_opaque iscsi_opaque; | ||
73 | }; | ||
46 | 74 | ||
75 | /* SCSI buffer descriptor */ | ||
47 | struct scsi_bd { | 76 | struct scsi_bd { |
48 | struct regpair address; | 77 | struct regpair address; |
49 | struct regpair opaque; | 78 | union scsi_opaque opaque; |
50 | }; | 79 | }; |
51 | 80 | ||
81 | /* Scsi Drv BDQ struct */ | ||
52 | struct scsi_bdq_ram_drv_data { | 82 | struct scsi_bdq_ram_drv_data { |
53 | __le16 external_producer; | 83 | __le16 external_producer; |
54 | __le16 reserved0[3]; | 84 | __le16 reserved0[3]; |
55 | }; | 85 | }; |
56 | 86 | ||
87 | /* SCSI SGE entry */ | ||
57 | struct scsi_sge { | 88 | struct scsi_sge { |
58 | struct regpair sge_addr; | 89 | struct regpair sge_addr; |
59 | __le32 sge_len; | 90 | __le32 sge_len; |
60 | __le32 reserved; | 91 | __le32 reserved; |
61 | }; | 92 | }; |
62 | 93 | ||
94 | /* Cached SGEs section */ | ||
63 | struct scsi_cached_sges { | 95 | struct scsi_cached_sges { |
64 | struct scsi_sge sge[4]; | 96 | struct scsi_sge sge[4]; |
65 | }; | 97 | }; |
66 | 98 | ||
99 | /* Scsi Drv CMDQ struct */ | ||
67 | struct scsi_drv_cmdq { | 100 | struct scsi_drv_cmdq { |
68 | __le16 cmdq_cons; | 101 | __le16 cmdq_cons; |
69 | __le16 reserved0; | 102 | __le16 reserved0; |
70 | __le32 reserved1; | 103 | __le32 reserved1; |
71 | }; | 104 | }; |
72 | 105 | ||
106 | /* Common SCSI init params passed by driver to FW in function init ramrod */ | ||
73 | struct scsi_init_func_params { | 107 | struct scsi_init_func_params { |
74 | __le16 num_tasks; | 108 | __le16 num_tasks; |
75 | u8 log_page_size; | 109 | u8 log_page_size; |
@@ -77,6 +111,7 @@ struct scsi_init_func_params { | |||
77 | u8 reserved2[12]; | 111 | u8 reserved2[12]; |
78 | }; | 112 | }; |
79 | 113 | ||
114 | /* SCSI RQ/CQ/CMDQ firmware function init parameters */ | ||
80 | struct scsi_init_func_queues { | 115 | struct scsi_init_func_queues { |
81 | struct regpair glbl_q_params_addr; | 116 | struct regpair glbl_q_params_addr; |
82 | __le16 rq_buffer_size; | 117 | __le16 rq_buffer_size; |
@@ -84,39 +119,45 @@ struct scsi_init_func_queues { | |||
84 | __le16 cmdq_num_entries; | 119 | __le16 cmdq_num_entries; |
85 | u8 bdq_resource_id; | 120 | u8 bdq_resource_id; |
86 | u8 q_validity; | 121 | u8 q_validity; |
87 | #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 | 122 | #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 |
88 | #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 | 123 | #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 |
89 | #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 | 124 | #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 |
90 | #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 | 125 | #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 |
91 | #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 | 126 | #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 |
92 | #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 | 127 | #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 |
93 | #define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F | 128 | #define SCSI_INIT_FUNC_QUEUES_TQ_VALID_MASK 0x1 |
94 | #define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 | 129 | #define SCSI_INIT_FUNC_QUEUES_TQ_VALID_SHIFT 3 |
130 | #define SCSI_INIT_FUNC_QUEUES_SOC_EN_MASK 0x1 | ||
131 | #define SCSI_INIT_FUNC_QUEUES_SOC_EN_SHIFT 4 | ||
132 | #define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_MASK 0x7 | ||
133 | #define SCSI_INIT_FUNC_QUEUES_SOC_NUM_OF_BLOCKS_LOG_SHIFT 5 | ||
134 | __le16 cq_cmdq_sb_num_arr[SCSI_MAX_NUM_OF_CMDQS]; | ||
95 | u8 num_queues; | 135 | u8 num_queues; |
96 | u8 queue_relative_offset; | 136 | u8 queue_relative_offset; |
97 | u8 cq_sb_pi; | 137 | u8 cq_sb_pi; |
98 | u8 cmdq_sb_pi; | 138 | u8 cmdq_sb_pi; |
99 | __le16 cq_cmdq_sb_num_arr[NUM_OF_CMDQS_CQS]; | ||
100 | __le16 reserved0; | ||
101 | u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; | 139 | u8 bdq_pbl_num_entries[BDQ_NUM_IDS]; |
140 | u8 reserved1; | ||
102 | struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; | 141 | struct regpair bdq_pbl_base_address[BDQ_NUM_IDS]; |
103 | __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; | 142 | __le16 bdq_xoff_threshold[BDQ_NUM_IDS]; |
104 | __le16 bdq_xon_threshold[BDQ_NUM_IDS]; | ||
105 | __le16 cmdq_xoff_threshold; | 143 | __le16 cmdq_xoff_threshold; |
144 | __le16 bdq_xon_threshold[BDQ_NUM_IDS]; | ||
106 | __le16 cmdq_xon_threshold; | 145 | __le16 cmdq_xon_threshold; |
107 | __le32 reserved1; | ||
108 | }; | 146 | }; |
109 | 147 | ||
148 | /* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */ | ||
110 | struct scsi_ram_per_bdq_resource_drv_data { | 149 | struct scsi_ram_per_bdq_resource_drv_data { |
111 | struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; | 150 | struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; |
112 | }; | 151 | }; |
113 | 152 | ||
153 | /* SCSI SGL types */ | ||
114 | enum scsi_sgl_mode { | 154 | enum scsi_sgl_mode { |
115 | SCSI_TX_SLOW_SGL, | 155 | SCSI_TX_SLOW_SGL, |
116 | SCSI_FAST_SGL, | 156 | SCSI_FAST_SGL, |
117 | MAX_SCSI_SGL_MODE | 157 | MAX_SCSI_SGL_MODE |
118 | }; | 158 | }; |
119 | 159 | ||
160 | /* SCSI SGL parameters */ | ||
120 | struct scsi_sgl_params { | 161 | struct scsi_sgl_params { |
121 | struct regpair sgl_addr; | 162 | struct regpair sgl_addr; |
122 | __le32 sgl_total_length; | 163 | __le32 sgl_total_length; |
@@ -126,10 +167,16 @@ struct scsi_sgl_params { | |||
126 | u8 reserved; | 167 | u8 reserved; |
127 | }; | 168 | }; |
128 | 169 | ||
170 | /* SCSI terminate connection params */ | ||
129 | struct scsi_terminate_extra_params { | 171 | struct scsi_terminate_extra_params { |
130 | __le16 unsolicited_cq_count; | 172 | __le16 unsolicited_cq_count; |
131 | __le16 cmdq_count; | 173 | __le16 cmdq_count; |
132 | u8 reserved[4]; | 174 | u8 reserved[4]; |
133 | }; | 175 | }; |
134 | 176 | ||
177 | /* SCSI Task Queue Element */ | ||
178 | struct scsi_tqe { | ||
179 | __le16 itid; | ||
180 | }; | ||
181 | |||
135 | #endif /* __STORAGE_COMMON__ */ | 182 | #endif /* __STORAGE_COMMON__ */ |
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h index dbf7a43c3e1f..4a4845193539 100644 --- a/include/linux/qed/tcp_common.h +++ b/include/linux/qed/tcp_common.h | |||
@@ -33,8 +33,13 @@ | |||
33 | #ifndef __TCP_COMMON__ | 33 | #ifndef __TCP_COMMON__ |
34 | #define __TCP_COMMON__ | 34 | #define __TCP_COMMON__ |
35 | 35 | ||
36 | #define TCP_INVALID_TIMEOUT_VAL -1 | 36 | /********************/ |
37 | /* TCP FW CONSTANTS */ | ||
38 | /********************/ | ||
37 | 39 | ||
40 | #define TCP_INVALID_TIMEOUT_VAL -1 | ||
41 | |||
42 | /* OOO opaque data received from LL2 */ | ||
38 | struct ooo_opaque { | 43 | struct ooo_opaque { |
39 | __le32 cid; | 44 | __le32 cid; |
40 | u8 drop_isle; | 45 | u8 drop_isle; |
@@ -43,25 +48,29 @@ struct ooo_opaque { | |||
43 | u8 ooo_isle; | 48 | u8 ooo_isle; |
44 | }; | 49 | }; |
45 | 50 | ||
51 | /* tcp connect mode enum */ | ||
46 | enum tcp_connect_mode { | 52 | enum tcp_connect_mode { |
47 | TCP_CONNECT_ACTIVE, | 53 | TCP_CONNECT_ACTIVE, |
48 | TCP_CONNECT_PASSIVE, | 54 | TCP_CONNECT_PASSIVE, |
49 | MAX_TCP_CONNECT_MODE | 55 | MAX_TCP_CONNECT_MODE |
50 | }; | 56 | }; |
51 | 57 | ||
58 | /* tcp function init parameters */ | ||
52 | struct tcp_init_params { | 59 | struct tcp_init_params { |
53 | __le32 two_msl_timer; | 60 | __le32 two_msl_timer; |
54 | __le16 tx_sws_timer; | 61 | __le16 tx_sws_timer; |
55 | u8 maxfinrt; | 62 | u8 max_fin_rt; |
56 | u8 reserved[9]; | 63 | u8 reserved[9]; |
57 | }; | 64 | }; |
58 | 65 | ||
66 | /* tcp IPv4/IPv6 enum */ | ||
59 | enum tcp_ip_version { | 67 | enum tcp_ip_version { |
60 | TCP_IPV4, | 68 | TCP_IPV4, |
61 | TCP_IPV6, | 69 | TCP_IPV6, |
62 | MAX_TCP_IP_VERSION | 70 | MAX_TCP_IP_VERSION |
63 | }; | 71 | }; |
64 | 72 | ||
73 | /* tcp offload parameters */ | ||
65 | struct tcp_offload_params { | 74 | struct tcp_offload_params { |
66 | __le16 local_mac_addr_lo; | 75 | __le16 local_mac_addr_lo; |
67 | __le16 local_mac_addr_mid; | 76 | __le16 local_mac_addr_mid; |
@@ -70,24 +79,29 @@ struct tcp_offload_params { | |||
70 | __le16 remote_mac_addr_mid; | 79 | __le16 remote_mac_addr_mid; |
71 | __le16 remote_mac_addr_hi; | 80 | __le16 remote_mac_addr_hi; |
72 | __le16 vlan_id; | 81 | __le16 vlan_id; |
73 | u8 flags; | 82 | __le16 flags; |
74 | #define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 | 83 | #define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 |
75 | #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 | 84 | #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 |
76 | #define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 | 85 | #define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 |
77 | #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 | 86 | #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 |
78 | #define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 | 87 | #define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 |
79 | #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 | 88 | #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 |
80 | #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 | 89 | #define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_MASK 0x1 |
81 | #define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 | 90 | #define TCP_OFFLOAD_PARAMS_ECN_SENDER_EN_SHIFT 3 |
82 | #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 | 91 | #define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_MASK 0x1 |
83 | #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 | 92 | #define TCP_OFFLOAD_PARAMS_ECN_RECEIVER_EN_SHIFT 4 |
84 | #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 | 93 | #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 |
85 | #define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 | 94 | #define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 5 |
86 | #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 | 95 | #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 |
87 | #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 | 96 | #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 6 |
88 | #define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 | 97 | #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 |
89 | #define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 | 98 | #define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 7 |
99 | #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 | ||
100 | #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 8 | ||
101 | #define TCP_OFFLOAD_PARAMS_RESERVED_MASK 0x7F | ||
102 | #define TCP_OFFLOAD_PARAMS_RESERVED_SHIFT 9 | ||
90 | u8 ip_version; | 103 | u8 ip_version; |
104 | u8 reserved0[3]; | ||
91 | __le32 remote_ip[4]; | 105 | __le32 remote_ip[4]; |
92 | __le32 local_ip[4]; | 106 | __le32 local_ip[4]; |
93 | __le32 flow_label; | 107 | __le32 flow_label; |
@@ -99,17 +113,21 @@ struct tcp_offload_params { | |||
99 | u8 rcv_wnd_scale; | 113 | u8 rcv_wnd_scale; |
100 | u8 connect_mode; | 114 | u8 connect_mode; |
101 | __le16 srtt; | 115 | __le16 srtt; |
102 | __le32 cwnd; | ||
103 | __le32 ss_thresh; | 116 | __le32 ss_thresh; |
104 | __le16 reserved1; | 117 | __le32 rcv_wnd; |
118 | __le32 cwnd; | ||
105 | u8 ka_max_probe_cnt; | 119 | u8 ka_max_probe_cnt; |
106 | u8 dup_ack_theshold; | 120 | u8 dup_ack_theshold; |
121 | __le16 reserved1; | ||
122 | __le32 ka_timeout; | ||
123 | __le32 ka_interval; | ||
124 | __le32 max_rt_time; | ||
125 | __le32 initial_rcv_wnd; | ||
107 | __le32 rcv_next; | 126 | __le32 rcv_next; |
108 | __le32 snd_una; | 127 | __le32 snd_una; |
109 | __le32 snd_next; | 128 | __le32 snd_next; |
110 | __le32 snd_max; | 129 | __le32 snd_max; |
111 | __le32 snd_wnd; | 130 | __le32 snd_wnd; |
112 | __le32 rcv_wnd; | ||
113 | __le32 snd_wl1; | 131 | __le32 snd_wl1; |
114 | __le32 ts_recent; | 132 | __le32 ts_recent; |
115 | __le32 ts_recent_age; | 133 | __le32 ts_recent_age; |
@@ -122,16 +140,13 @@ struct tcp_offload_params { | |||
122 | u8 rt_cnt; | 140 | u8 rt_cnt; |
123 | __le16 rtt_var; | 141 | __le16 rtt_var; |
124 | __le16 fw_internal; | 142 | __le16 fw_internal; |
125 | __le32 ka_timeout; | ||
126 | __le32 ka_interval; | ||
127 | __le32 max_rt_time; | ||
128 | __le32 initial_rcv_wnd; | ||
129 | u8 snd_wnd_scale; | 143 | u8 snd_wnd_scale; |
130 | u8 ack_frequency; | 144 | u8 ack_frequency; |
131 | __le16 da_timeout_value; | 145 | __le16 da_timeout_value; |
132 | __le32 reserved3[2]; | 146 | __le32 reserved3; |
133 | }; | 147 | }; |
134 | 148 | ||
149 | /* tcp offload parameters */ | ||
135 | struct tcp_offload_params_opt2 { | 150 | struct tcp_offload_params_opt2 { |
136 | __le16 local_mac_addr_lo; | 151 | __le16 local_mac_addr_lo; |
137 | __le16 local_mac_addr_mid; | 152 | __le16 local_mac_addr_mid; |
@@ -140,16 +155,19 @@ struct tcp_offload_params_opt2 { | |||
140 | __le16 remote_mac_addr_mid; | 155 | __le16 remote_mac_addr_mid; |
141 | __le16 remote_mac_addr_hi; | 156 | __le16 remote_mac_addr_hi; |
142 | __le16 vlan_id; | 157 | __le16 vlan_id; |
143 | u8 flags; | 158 | __le16 flags; |
144 | #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 | 159 | #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 |
145 | #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 | 160 | #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 |
146 | #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 | 161 | #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 |
147 | #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 | 162 | #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 |
148 | #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 | 163 | #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 |
149 | #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 | 164 | #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 |
150 | #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F | 165 | #define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_MASK 0x1 |
151 | #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 | 166 | #define TCP_OFFLOAD_PARAMS_OPT2_ECN_EN_SHIFT 3 |
167 | #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0xFFF | ||
168 | #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 4 | ||
152 | u8 ip_version; | 169 | u8 ip_version; |
170 | u8 reserved1[3]; | ||
153 | __le32 remote_ip[4]; | 171 | __le32 remote_ip[4]; |
154 | __le32 local_ip[4]; | 172 | __le32 local_ip[4]; |
155 | __le32 flow_label; | 173 | __le32 flow_label; |
@@ -163,9 +181,16 @@ struct tcp_offload_params_opt2 { | |||
163 | __le16 syn_ip_payload_length; | 181 | __le16 syn_ip_payload_length; |
164 | __le32 syn_phy_addr_lo; | 182 | __le32 syn_phy_addr_lo; |
165 | __le32 syn_phy_addr_hi; | 183 | __le32 syn_phy_addr_hi; |
166 | __le32 reserved1[22]; | 184 | __le32 cwnd; |
185 | u8 ka_max_probe_cnt; | ||
186 | u8 reserved2[3]; | ||
187 | __le32 ka_timeout; | ||
188 | __le32 ka_interval; | ||
189 | __le32 max_rt_time; | ||
190 | __le32 reserved3[16]; | ||
167 | }; | 191 | }; |
168 | 192 | ||
193 | /* tcp IPv4/IPv6 enum */ | ||
169 | enum tcp_seg_placement_event { | 194 | enum tcp_seg_placement_event { |
170 | TCP_EVENT_ADD_PEN, | 195 | TCP_EVENT_ADD_PEN, |
171 | TCP_EVENT_ADD_NEW_ISLE, | 196 | TCP_EVENT_ADD_NEW_ISLE, |
@@ -177,40 +202,41 @@ enum tcp_seg_placement_event { | |||
177 | MAX_TCP_SEG_PLACEMENT_EVENT | 202 | MAX_TCP_SEG_PLACEMENT_EVENT |
178 | }; | 203 | }; |
179 | 204 | ||
205 | /* tcp init parameters */ | ||
180 | struct tcp_update_params { | 206 | struct tcp_update_params { |
181 | __le16 flags; | 207 | __le16 flags; |
182 | #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 | 208 | #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 |
183 | #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 | 209 | #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 |
184 | #define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 | 210 | #define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 |
185 | #define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 | 211 | #define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 |
186 | #define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 | 212 | #define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 |
187 | #define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 | 213 | #define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 |
188 | #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 | 214 | #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 |
189 | #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 | 215 | #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 |
190 | #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 | 216 | #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 |
191 | #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 | 217 | #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 |
192 | #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 | 218 | #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 |
193 | #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 | 219 | #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 |
194 | #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 | 220 | #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 |
195 | #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 | 221 | #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 |
196 | #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 | 222 | #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 |
197 | #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 | 223 | #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 |
198 | #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 | 224 | #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 |
199 | #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 | 225 | #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 |
200 | #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 | 226 | #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 |
201 | #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 | 227 | #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 |
202 | #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 | 228 | #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 |
203 | #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 | 229 | #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 |
204 | #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 | 230 | #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 |
205 | #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 | 231 | #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 |
206 | #define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 | 232 | #define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 |
207 | #define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 | 233 | #define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 |
208 | #define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 | 234 | #define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 |
209 | #define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 | 235 | #define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 |
210 | #define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 | 236 | #define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 |
211 | #define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 | 237 | #define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 |
212 | #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 | 238 | #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 |
213 | #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 | 239 | #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 |
214 | __le16 remote_mac_addr_lo; | 240 | __le16 remote_mac_addr_lo; |
215 | __le16 remote_mac_addr_mid; | 241 | __le16 remote_mac_addr_mid; |
216 | __le16 remote_mac_addr_hi; | 242 | __le16 remote_mac_addr_hi; |
@@ -226,6 +252,7 @@ struct tcp_update_params { | |||
226 | u8 reserved1[7]; | 252 | u8 reserved1[7]; |
227 | }; | 253 | }; |
228 | 254 | ||
255 | /* toe upload parameters */ | ||
229 | struct tcp_upload_params { | 256 | struct tcp_upload_params { |
230 | __le32 rcv_next; | 257 | __le32 rcv_next; |
231 | __le32 snd_una; | 258 | __le32 snd_una; |
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h index 23a9c89c7ad9..fc55ff31eca7 100644 --- a/include/linux/radix-tree.h +++ b/include/linux/radix-tree.h | |||
@@ -356,24 +356,9 @@ int radix_tree_split(struct radix_tree_root *, unsigned long index, | |||
356 | int radix_tree_join(struct radix_tree_root *, unsigned long index, | 356 | int radix_tree_join(struct radix_tree_root *, unsigned long index, |
357 | unsigned new_order, void *); | 357 | unsigned new_order, void *); |
358 | 358 | ||
359 | void __rcu **idr_get_free_cmn(struct radix_tree_root *root, | 359 | void __rcu **idr_get_free(struct radix_tree_root *root, |
360 | struct radix_tree_iter *iter, gfp_t gfp, | 360 | struct radix_tree_iter *iter, gfp_t gfp, |
361 | unsigned long max); | 361 | unsigned long max); |
362 | static inline void __rcu **idr_get_free(struct radix_tree_root *root, | ||
363 | struct radix_tree_iter *iter, | ||
364 | gfp_t gfp, | ||
365 | int end) | ||
366 | { | ||
367 | return idr_get_free_cmn(root, iter, gfp, end > 0 ? end - 1 : INT_MAX); | ||
368 | } | ||
369 | |||
370 | static inline void __rcu **idr_get_free_ext(struct radix_tree_root *root, | ||
371 | struct radix_tree_iter *iter, | ||
372 | gfp_t gfp, | ||
373 | unsigned long end) | ||
374 | { | ||
375 | return idr_get_free_cmn(root, iter, gfp, end - 1); | ||
376 | } | ||
377 | 362 | ||
378 | enum { | 363 | enum { |
379 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ | 364 | RADIX_TREE_ITER_TAG_MASK = 0x0f, /* tag index in lower nybble */ |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index a6ddc42f87a5..043d04784675 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -197,7 +197,7 @@ static inline void exit_tasks_rcu_finish(void) { } | |||
197 | #define cond_resched_rcu_qs() \ | 197 | #define cond_resched_rcu_qs() \ |
198 | do { \ | 198 | do { \ |
199 | if (!cond_resched()) \ | 199 | if (!cond_resched()) \ |
200 | rcu_note_voluntary_context_switch(current); \ | 200 | rcu_note_voluntary_context_switch_lite(current); \ |
201 | } while (0) | 201 | } while (0) |
202 | 202 | ||
203 | /* | 203 | /* |
@@ -433,12 +433,12 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
433 | * @p: The pointer to read | 433 | * @p: The pointer to read |
434 | * | 434 | * |
435 | * Return the value of the specified RCU-protected pointer, but omit the | 435 | * Return the value of the specified RCU-protected pointer, but omit the |
436 | * smp_read_barrier_depends() and keep the READ_ONCE(). This is useful | 436 | * lockdep checks for being in an RCU read-side critical section. This is |
437 | * when the value of this pointer is accessed, but the pointer is not | 437 | * useful when the value of this pointer is accessed, but the pointer is |
438 | * dereferenced, for example, when testing an RCU-protected pointer against | 438 | * not dereferenced, for example, when testing an RCU-protected pointer |
439 | * NULL. Although rcu_access_pointer() may also be used in cases where | 439 | * against NULL. Although rcu_access_pointer() may also be used in cases |
440 | * update-side locks prevent the value of the pointer from changing, you | 440 | * where update-side locks prevent the value of the pointer from changing, |
441 | * should instead use rcu_dereference_protected() for this use case. | 441 | * you should instead use rcu_dereference_protected() for this use case. |
442 | * | 442 | * |
443 | * It is also permissible to use rcu_access_pointer() when read-side | 443 | * It is also permissible to use rcu_access_pointer() when read-side |
444 | * access to the pointer was removed at least one grace period ago, as | 444 | * access to the pointer was removed at least one grace period ago, as |
@@ -521,12 +521,11 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
521 | * @c: The conditions under which the dereference will take place | 521 | * @c: The conditions under which the dereference will take place |
522 | * | 522 | * |
523 | * Return the value of the specified RCU-protected pointer, but omit | 523 | * Return the value of the specified RCU-protected pointer, but omit |
524 | * both the smp_read_barrier_depends() and the READ_ONCE(). This | 524 | * the READ_ONCE(). This is useful in cases where update-side locks |
525 | * is useful in cases where update-side locks prevent the value of the | 525 | * prevent the value of the pointer from changing. Please note that this |
526 | * pointer from changing. Please note that this primitive does *not* | 526 | * primitive does *not* prevent the compiler from repeating this reference |
527 | * prevent the compiler from repeating this reference or combining it | 527 | * or combining it with other references, so it should not be used without |
528 | * with other references, so it should not be used without protection | 528 | * protection of appropriate locks. |
529 | * of appropriate locks. | ||
530 | * | 529 | * |
531 | * This function is only for update-side use. Using this function | 530 | * This function is only for update-side use. Using this function |
532 | * when protected only by rcu_read_lock() will result in infrequent | 531 | * when protected only by rcu_read_lock() will result in infrequent |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index b3dbf9502fd0..ce9beec35e34 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
@@ -111,7 +111,6 @@ static inline void rcu_cpu_stall_reset(void) { } | |||
111 | static inline void rcu_idle_enter(void) { } | 111 | static inline void rcu_idle_enter(void) { } |
112 | static inline void rcu_idle_exit(void) { } | 112 | static inline void rcu_idle_exit(void) { } |
113 | static inline void rcu_irq_enter(void) { } | 113 | static inline void rcu_irq_enter(void) { } |
114 | static inline bool rcu_irq_enter_disabled(void) { return false; } | ||
115 | static inline void rcu_irq_exit_irqson(void) { } | 114 | static inline void rcu_irq_exit_irqson(void) { } |
116 | static inline void rcu_irq_enter_irqson(void) { } | 115 | static inline void rcu_irq_enter_irqson(void) { } |
117 | static inline void rcu_irq_exit(void) { } | 116 | static inline void rcu_irq_exit(void) { } |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 37d6fd3b7ff8..fd996cdf1833 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
@@ -85,7 +85,6 @@ void rcu_irq_enter(void); | |||
85 | void rcu_irq_exit(void); | 85 | void rcu_irq_exit(void); |
86 | void rcu_irq_enter_irqson(void); | 86 | void rcu_irq_enter_irqson(void); |
87 | void rcu_irq_exit_irqson(void); | 87 | void rcu_irq_exit_irqson(void); |
88 | bool rcu_irq_enter_disabled(void); | ||
89 | 88 | ||
90 | void exit_rcu(void); | 89 | void exit_rcu(void); |
91 | 90 | ||
diff --git a/include/linux/refcount.h b/include/linux/refcount.h index e8286585e149..4193c41e383a 100644 --- a/include/linux/refcount.h +++ b/include/linux/refcount.h | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/kernel.h> | 8 | #include <linux/kernel.h> |
9 | 9 | ||
10 | /** | 10 | /** |
11 | * refcount_t - variant of atomic_t specialized for reference counts | 11 | * struct refcount_t - variant of atomic_t specialized for reference counts |
12 | * @refs: atomic_t counter field | 12 | * @refs: atomic_t counter field |
13 | * | 13 | * |
14 | * The counter saturates at UINT_MAX and will not move once | 14 | * The counter saturates at UINT_MAX and will not move once |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 15eddc1353ba..6a3aeba40e9e 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
@@ -24,12 +24,14 @@ struct module; | |||
24 | struct device; | 24 | struct device; |
25 | struct i2c_client; | 25 | struct i2c_client; |
26 | struct irq_domain; | 26 | struct irq_domain; |
27 | struct slim_device; | ||
27 | struct spi_device; | 28 | struct spi_device; |
28 | struct spmi_device; | 29 | struct spmi_device; |
29 | struct regmap; | 30 | struct regmap; |
30 | struct regmap_range_cfg; | 31 | struct regmap_range_cfg; |
31 | struct regmap_field; | 32 | struct regmap_field; |
32 | struct snd_ac97; | 33 | struct snd_ac97; |
34 | struct sdw_slave; | ||
33 | 35 | ||
34 | /* An enum of all the supported cache types */ | 36 | /* An enum of all the supported cache types */ |
35 | enum regcache_type { | 37 | enum regcache_type { |
@@ -264,6 +266,9 @@ typedef void (*regmap_unlock)(void *); | |||
264 | * field is NULL but precious_table (see below) is not, the | 266 | * field is NULL but precious_table (see below) is not, the |
265 | * check is performed on such table (a register is precious if | 267 | * check is performed on such table (a register is precious if |
266 | * it belongs to one of the ranges specified by precious_table). | 268 | * it belongs to one of the ranges specified by precious_table). |
269 | * @disable_locking: This regmap is either protected by external means or | ||
270 | * is guaranteed not be be accessed from multiple threads. | ||
271 | * Don't use any locking mechanisms. | ||
267 | * @lock: Optional lock callback (overrides regmap's default lock | 272 | * @lock: Optional lock callback (overrides regmap's default lock |
268 | * function, based on spinlock or mutex). | 273 | * function, based on spinlock or mutex). |
269 | * @unlock: As above for unlocking. | 274 | * @unlock: As above for unlocking. |
@@ -296,7 +301,10 @@ typedef void (*regmap_unlock)(void *); | |||
296 | * a read. | 301 | * a read. |
297 | * @write_flag_mask: Mask to be set in the top bytes of the register when doing | 302 | * @write_flag_mask: Mask to be set in the top bytes of the register when doing |
298 | * a write. If both read_flag_mask and write_flag_mask are | 303 | * a write. If both read_flag_mask and write_flag_mask are |
299 | * empty the regmap_bus default masks are used. | 304 | * empty and zero_flag_mask is not set the regmap_bus default |
305 | * masks are used. | ||
306 | * @zero_flag_mask: If set, read_flag_mask and write_flag_mask are used even | ||
307 | * if they are both empty. | ||
300 | * @use_single_rw: If set, converts the bulk read and write operations into | 308 | * @use_single_rw: If set, converts the bulk read and write operations into |
301 | * a series of single read and write operations. This is useful | 309 | * a series of single read and write operations. This is useful |
302 | * for device that does not support bulk read and write. | 310 | * for device that does not support bulk read and write. |
@@ -317,6 +325,7 @@ typedef void (*regmap_unlock)(void *); | |||
317 | * | 325 | * |
318 | * @ranges: Array of configuration entries for virtual address ranges. | 326 | * @ranges: Array of configuration entries for virtual address ranges. |
319 | * @num_ranges: Number of range configuration entries. | 327 | * @num_ranges: Number of range configuration entries. |
328 | * @use_hwlock: Indicate if a hardware spinlock should be used. | ||
320 | * @hwlock_id: Specify the hardware spinlock id. | 329 | * @hwlock_id: Specify the hardware spinlock id. |
321 | * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, | 330 | * @hwlock_mode: The hardware spinlock mode, should be HWLOCK_IRQSTATE, |
322 | * HWLOCK_IRQ or 0. | 331 | * HWLOCK_IRQ or 0. |
@@ -333,6 +342,8 @@ struct regmap_config { | |||
333 | bool (*readable_reg)(struct device *dev, unsigned int reg); | 342 | bool (*readable_reg)(struct device *dev, unsigned int reg); |
334 | bool (*volatile_reg)(struct device *dev, unsigned int reg); | 343 | bool (*volatile_reg)(struct device *dev, unsigned int reg); |
335 | bool (*precious_reg)(struct device *dev, unsigned int reg); | 344 | bool (*precious_reg)(struct device *dev, unsigned int reg); |
345 | |||
346 | bool disable_locking; | ||
336 | regmap_lock lock; | 347 | regmap_lock lock; |
337 | regmap_unlock unlock; | 348 | regmap_unlock unlock; |
338 | void *lock_arg; | 349 | void *lock_arg; |
@@ -355,6 +366,7 @@ struct regmap_config { | |||
355 | 366 | ||
356 | unsigned long read_flag_mask; | 367 | unsigned long read_flag_mask; |
357 | unsigned long write_flag_mask; | 368 | unsigned long write_flag_mask; |
369 | bool zero_flag_mask; | ||
358 | 370 | ||
359 | bool use_single_rw; | 371 | bool use_single_rw; |
360 | bool can_multi_write; | 372 | bool can_multi_write; |
@@ -365,6 +377,7 @@ struct regmap_config { | |||
365 | const struct regmap_range_cfg *ranges; | 377 | const struct regmap_range_cfg *ranges; |
366 | unsigned int num_ranges; | 378 | unsigned int num_ranges; |
367 | 379 | ||
380 | bool use_hwlock; | ||
368 | unsigned int hwlock_id; | 381 | unsigned int hwlock_id; |
369 | unsigned int hwlock_mode; | 382 | unsigned int hwlock_mode; |
370 | }; | 383 | }; |
@@ -499,6 +512,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c, | |||
499 | const struct regmap_config *config, | 512 | const struct regmap_config *config, |
500 | struct lock_class_key *lock_key, | 513 | struct lock_class_key *lock_key, |
501 | const char *lock_name); | 514 | const char *lock_name); |
515 | struct regmap *__regmap_init_slimbus(struct slim_device *slimbus, | ||
516 | const struct regmap_config *config, | ||
517 | struct lock_class_key *lock_key, | ||
518 | const char *lock_name); | ||
502 | struct regmap *__regmap_init_spi(struct spi_device *dev, | 519 | struct regmap *__regmap_init_spi(struct spi_device *dev, |
503 | const struct regmap_config *config, | 520 | const struct regmap_config *config, |
504 | struct lock_class_key *lock_key, | 521 | struct lock_class_key *lock_key, |
@@ -524,6 +541,10 @@ struct regmap *__regmap_init_ac97(struct snd_ac97 *ac97, | |||
524 | const struct regmap_config *config, | 541 | const struct regmap_config *config, |
525 | struct lock_class_key *lock_key, | 542 | struct lock_class_key *lock_key, |
526 | const char *lock_name); | 543 | const char *lock_name); |
544 | struct regmap *__regmap_init_sdw(struct sdw_slave *sdw, | ||
545 | const struct regmap_config *config, | ||
546 | struct lock_class_key *lock_key, | ||
547 | const char *lock_name); | ||
527 | 548 | ||
528 | struct regmap *__devm_regmap_init(struct device *dev, | 549 | struct regmap *__devm_regmap_init(struct device *dev, |
529 | const struct regmap_bus *bus, | 550 | const struct regmap_bus *bus, |
@@ -561,6 +582,10 @@ struct regmap *__devm_regmap_init_ac97(struct snd_ac97 *ac97, | |||
561 | const struct regmap_config *config, | 582 | const struct regmap_config *config, |
562 | struct lock_class_key *lock_key, | 583 | struct lock_class_key *lock_key, |
563 | const char *lock_name); | 584 | const char *lock_name); |
585 | struct regmap *__devm_regmap_init_sdw(struct sdw_slave *sdw, | ||
586 | const struct regmap_config *config, | ||
587 | struct lock_class_key *lock_key, | ||
588 | const char *lock_name); | ||
564 | 589 | ||
565 | /* | 590 | /* |
566 | * Wrapper for regmap_init macros to include a unique lockdep key and name | 591 | * Wrapper for regmap_init macros to include a unique lockdep key and name |
@@ -616,6 +641,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, | |||
616 | i2c, config) | 641 | i2c, config) |
617 | 642 | ||
618 | /** | 643 | /** |
644 | * regmap_init_slimbus() - Initialise register map | ||
645 | * | ||
646 | * @slimbus: Device that will be interacted with | ||
647 | * @config: Configuration for register map | ||
648 | * | ||
649 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
650 | * a struct regmap. | ||
651 | */ | ||
652 | #define regmap_init_slimbus(slimbus, config) \ | ||
653 | __regmap_lockdep_wrapper(__regmap_init_slimbus, #config, \ | ||
654 | slimbus, config) | ||
655 | |||
656 | /** | ||
619 | * regmap_init_spi() - Initialise register map | 657 | * regmap_init_spi() - Initialise register map |
620 | * | 658 | * |
621 | * @dev: Device that will be interacted with | 659 | * @dev: Device that will be interacted with |
@@ -710,6 +748,20 @@ int regmap_attach_dev(struct device *dev, struct regmap *map, | |||
710 | bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | 748 | bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); |
711 | 749 | ||
712 | /** | 750 | /** |
751 | * regmap_init_sdw() - Initialise register map | ||
752 | * | ||
753 | * @sdw: Device that will be interacted with | ||
754 | * @config: Configuration for register map | ||
755 | * | ||
756 | * The return value will be an ERR_PTR() on error or a valid pointer to | ||
757 | * a struct regmap. | ||
758 | */ | ||
759 | #define regmap_init_sdw(sdw, config) \ | ||
760 | __regmap_lockdep_wrapper(__regmap_init_sdw, #config, \ | ||
761 | sdw, config) | ||
762 | |||
763 | |||
764 | /** | ||
713 | * devm_regmap_init() - Initialise managed register map | 765 | * devm_regmap_init() - Initialise managed register map |
714 | * | 766 | * |
715 | * @dev: Device that will be interacted with | 767 | * @dev: Device that will be interacted with |
@@ -839,6 +891,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg); | |||
839 | __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ | 891 | __regmap_lockdep_wrapper(__devm_regmap_init_ac97, #config, \ |
840 | ac97, config) | 892 | ac97, config) |
841 | 893 | ||
894 | /** | ||
895 | * devm_regmap_init_sdw() - Initialise managed register map | ||
896 | * | ||
897 | * @sdw: Device that will be interacted with | ||
898 | * @config: Configuration for register map | ||
899 | * | ||
900 | * The return value will be an ERR_PTR() on error or a valid pointer | ||
901 | * to a struct regmap. The regmap will be automatically freed by the | ||
902 | * device management code. | ||
903 | */ | ||
904 | #define devm_regmap_init_sdw(sdw, config) \ | ||
905 | __regmap_lockdep_wrapper(__devm_regmap_init_sdw, #config, \ | ||
906 | sdw, config) | ||
907 | |||
842 | void regmap_exit(struct regmap *map); | 908 | void regmap_exit(struct regmap *map); |
843 | int regmap_reinit_cache(struct regmap *map, | 909 | int regmap_reinit_cache(struct regmap *map, |
844 | const struct regmap_config *config); | 910 | const struct regmap_config *config); |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 94417b4226bd..4c00486b7a78 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
@@ -214,6 +214,8 @@ struct regulator_ops { | |||
214 | /* set regulator suspend operating mode (defined in consumer.h) */ | 214 | /* set regulator suspend operating mode (defined in consumer.h) */ |
215 | int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); | 215 | int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode); |
216 | 216 | ||
217 | int (*resume_early)(struct regulator_dev *rdev); | ||
218 | |||
217 | int (*set_pull_down) (struct regulator_dev *); | 219 | int (*set_pull_down) (struct regulator_dev *); |
218 | }; | 220 | }; |
219 | 221 | ||
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index 9cd4fef37203..93a04893c739 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -42,6 +42,16 @@ struct regulator; | |||
42 | #define REGULATOR_CHANGE_DRMS 0x10 | 42 | #define REGULATOR_CHANGE_DRMS 0x10 |
43 | #define REGULATOR_CHANGE_BYPASS 0x20 | 43 | #define REGULATOR_CHANGE_BYPASS 0x20 |
44 | 44 | ||
45 | /* | ||
46 | * operations in suspend mode | ||
47 | * DO_NOTHING_IN_SUSPEND - the default value | ||
48 | * DISABLE_IN_SUSPEND - turn off regulator in suspend states | ||
49 | * ENABLE_IN_SUSPEND - keep regulator on in suspend states | ||
50 | */ | ||
51 | #define DO_NOTHING_IN_SUSPEND (-1) | ||
52 | #define DISABLE_IN_SUSPEND 0 | ||
53 | #define ENABLE_IN_SUSPEND 1 | ||
54 | |||
45 | /* Regulator active discharge flags */ | 55 | /* Regulator active discharge flags */ |
46 | enum regulator_active_discharge { | 56 | enum regulator_active_discharge { |
47 | REGULATOR_ACTIVE_DISCHARGE_DEFAULT, | 57 | REGULATOR_ACTIVE_DISCHARGE_DEFAULT, |
@@ -56,16 +66,24 @@ enum regulator_active_discharge { | |||
56 | * state. One of enabled or disabled must be set for the | 66 | * state. One of enabled or disabled must be set for the |
57 | * configuration to be applied. | 67 | * configuration to be applied. |
58 | * | 68 | * |
59 | * @uV: Operating voltage during suspend. | 69 | * @uV: Default operating voltage during suspend, it can be adjusted |
70 | * among <min_uV, max_uV>. | ||
71 | * @min_uV: Minimum suspend voltage may be set. | ||
72 | * @max_uV: Maximum suspend voltage may be set. | ||
60 | * @mode: Operating mode during suspend. | 73 | * @mode: Operating mode during suspend. |
61 | * @enabled: Enabled during suspend. | 74 | * @enabled: operations during suspend. |
62 | * @disabled: Disabled during suspend. | 75 | * - DO_NOTHING_IN_SUSPEND |
76 | * - DISABLE_IN_SUSPEND | ||
77 | * - ENABLE_IN_SUSPEND | ||
78 | * @changeable: Is this state can be switched between enabled/disabled, | ||
63 | */ | 79 | */ |
64 | struct regulator_state { | 80 | struct regulator_state { |
65 | int uV; /* suspend voltage */ | 81 | int uV; |
66 | unsigned int mode; /* suspend regulator operating mode */ | 82 | int min_uV; |
67 | int enabled; /* is regulator enabled in this suspend state */ | 83 | int max_uV; |
68 | int disabled; /* is the regulator disabled in this suspend state */ | 84 | unsigned int mode; |
85 | int enabled; | ||
86 | bool changeable; | ||
69 | }; | 87 | }; |
70 | 88 | ||
71 | /** | 89 | /** |
@@ -225,12 +243,12 @@ struct regulator_init_data { | |||
225 | 243 | ||
226 | #ifdef CONFIG_REGULATOR | 244 | #ifdef CONFIG_REGULATOR |
227 | void regulator_has_full_constraints(void); | 245 | void regulator_has_full_constraints(void); |
228 | int regulator_suspend_prepare(suspend_state_t state); | ||
229 | int regulator_suspend_finish(void); | ||
230 | #else | 246 | #else |
231 | static inline void regulator_has_full_constraints(void) | 247 | static inline void regulator_has_full_constraints(void) |
232 | { | 248 | { |
233 | } | 249 | } |
250 | #endif | ||
251 | |||
234 | static inline int regulator_suspend_prepare(suspend_state_t state) | 252 | static inline int regulator_suspend_prepare(suspend_state_t state) |
235 | { | 253 | { |
236 | return 0; | 254 | return 0; |
@@ -239,6 +257,5 @@ static inline int regulator_suspend_finish(void) | |||
239 | { | 257 | { |
240 | return 0; | 258 | return 0; |
241 | } | 259 | } |
242 | #endif | ||
243 | 260 | ||
244 | #endif | 261 | #endif |
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 44e630eb3d94..728d421fffe9 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h | |||
@@ -324,6 +324,7 @@ struct rproc_mem_entry { | |||
324 | }; | 324 | }; |
325 | 325 | ||
326 | struct rproc; | 326 | struct rproc; |
327 | struct firmware; | ||
327 | 328 | ||
328 | /** | 329 | /** |
329 | * struct rproc_ops - platform-specific device handlers | 330 | * struct rproc_ops - platform-specific device handlers |
@@ -331,12 +332,24 @@ struct rproc; | |||
331 | * @stop: power off the device | 332 | * @stop: power off the device |
332 | * @kick: kick a virtqueue (virtqueue id given as a parameter) | 333 | * @kick: kick a virtqueue (virtqueue id given as a parameter) |
333 | * @da_to_va: optional platform hook to perform address translations | 334 | * @da_to_va: optional platform hook to perform address translations |
335 | * @load_rsc_table: load resource table from firmware image | ||
336 | * @find_loaded_rsc_table: find the loaded resouce table | ||
337 | * @load: load firmeware to memory, where the remote processor | ||
338 | * expects to find it | ||
339 | * @sanity_check: sanity check the fw image | ||
340 | * @get_boot_addr: get boot address to entry point specified in firmware | ||
334 | */ | 341 | */ |
335 | struct rproc_ops { | 342 | struct rproc_ops { |
336 | int (*start)(struct rproc *rproc); | 343 | int (*start)(struct rproc *rproc); |
337 | int (*stop)(struct rproc *rproc); | 344 | int (*stop)(struct rproc *rproc); |
338 | void (*kick)(struct rproc *rproc, int vqid); | 345 | void (*kick)(struct rproc *rproc, int vqid); |
339 | void * (*da_to_va)(struct rproc *rproc, u64 da, int len); | 346 | void * (*da_to_va)(struct rproc *rproc, u64 da, int len); |
347 | int (*load_rsc_table)(struct rproc *rproc, const struct firmware *fw); | ||
348 | struct resource_table *(*find_loaded_rsc_table)( | ||
349 | struct rproc *rproc, const struct firmware *fw); | ||
350 | int (*load)(struct rproc *rproc, const struct firmware *fw); | ||
351 | int (*sanity_check)(struct rproc *rproc, const struct firmware *fw); | ||
352 | u32 (*get_boot_addr)(struct rproc *rproc, const struct firmware *fw); | ||
340 | }; | 353 | }; |
341 | 354 | ||
342 | /** | 355 | /** |
@@ -390,7 +403,6 @@ enum rproc_crash_type { | |||
390 | * @priv: private data which belongs to the platform-specific rproc module | 403 | * @priv: private data which belongs to the platform-specific rproc module |
391 | * @ops: platform-specific start/stop rproc handlers | 404 | * @ops: platform-specific start/stop rproc handlers |
392 | * @dev: virtual device for refcounting and common remoteproc behavior | 405 | * @dev: virtual device for refcounting and common remoteproc behavior |
393 | * @fw_ops: firmware-specific handlers | ||
394 | * @power: refcount of users who need this rproc powered up | 406 | * @power: refcount of users who need this rproc powered up |
395 | * @state: state of the device | 407 | * @state: state of the device |
396 | * @lock: lock which protects concurrent manipulations of the rproc | 408 | * @lock: lock which protects concurrent manipulations of the rproc |
@@ -406,11 +418,11 @@ enum rproc_crash_type { | |||
406 | * @index: index of this rproc device | 418 | * @index: index of this rproc device |
407 | * @crash_handler: workqueue for handling a crash | 419 | * @crash_handler: workqueue for handling a crash |
408 | * @crash_cnt: crash counter | 420 | * @crash_cnt: crash counter |
409 | * @crash_comp: completion used to sync crash handler and the rproc reload | ||
410 | * @recovery_disabled: flag that state if recovery was disabled | 421 | * @recovery_disabled: flag that state if recovery was disabled |
411 | * @max_notifyid: largest allocated notify id. | 422 | * @max_notifyid: largest allocated notify id. |
412 | * @table_ptr: pointer to the resource table in effect | 423 | * @table_ptr: pointer to the resource table in effect |
413 | * @cached_table: copy of the resource table | 424 | * @cached_table: copy of the resource table |
425 | * @table_sz: size of @cached_table | ||
414 | * @has_iommu: flag to indicate if remote processor is behind an MMU | 426 | * @has_iommu: flag to indicate if remote processor is behind an MMU |
415 | */ | 427 | */ |
416 | struct rproc { | 428 | struct rproc { |
@@ -419,9 +431,8 @@ struct rproc { | |||
419 | const char *name; | 431 | const char *name; |
420 | char *firmware; | 432 | char *firmware; |
421 | void *priv; | 433 | void *priv; |
422 | const struct rproc_ops *ops; | 434 | struct rproc_ops *ops; |
423 | struct device dev; | 435 | struct device dev; |
424 | const struct rproc_fw_ops *fw_ops; | ||
425 | atomic_t power; | 436 | atomic_t power; |
426 | unsigned int state; | 437 | unsigned int state; |
427 | struct mutex lock; | 438 | struct mutex lock; |
@@ -437,11 +448,11 @@ struct rproc { | |||
437 | int index; | 448 | int index; |
438 | struct work_struct crash_handler; | 449 | struct work_struct crash_handler; |
439 | unsigned int crash_cnt; | 450 | unsigned int crash_cnt; |
440 | struct completion crash_comp; | ||
441 | bool recovery_disabled; | 451 | bool recovery_disabled; |
442 | int max_notifyid; | 452 | int max_notifyid; |
443 | struct resource_table *table_ptr; | 453 | struct resource_table *table_ptr; |
444 | struct resource_table *cached_table; | 454 | struct resource_table *cached_table; |
455 | size_t table_sz; | ||
445 | bool has_iommu; | 456 | bool has_iommu; |
446 | bool auto_boot; | 457 | bool auto_boot; |
447 | }; | 458 | }; |
diff --git a/include/linux/reservation.h b/include/linux/reservation.h index 21fc84d82d41..02166e815afb 100644 --- a/include/linux/reservation.h +++ b/include/linux/reservation.h | |||
@@ -167,6 +167,29 @@ reservation_object_lock(struct reservation_object *obj, | |||
167 | } | 167 | } |
168 | 168 | ||
169 | /** | 169 | /** |
170 | * reservation_object_lock_interruptible - lock the reservation object | ||
171 | * @obj: the reservation object | ||
172 | * @ctx: the locking context | ||
173 | * | ||
174 | * Locks the reservation object interruptible for exclusive access and | ||
175 | * modification. Note, that the lock is only against other writers, readers | ||
176 | * will run concurrently with a writer under RCU. The seqlock is used to | ||
177 | * notify readers if they overlap with a writer. | ||
178 | * | ||
179 | * As the reservation object may be locked by multiple parties in an | ||
180 | * undefined order, a #ww_acquire_ctx is passed to unwind if a cycle | ||
181 | * is detected. See ww_mutex_lock() and ww_acquire_init(). A reservation | ||
182 | * object may be locked by itself by passing NULL as @ctx. | ||
183 | */ | ||
184 | static inline int | ||
185 | reservation_object_lock_interruptible(struct reservation_object *obj, | ||
186 | struct ww_acquire_ctx *ctx) | ||
187 | { | ||
188 | return ww_mutex_lock_interruptible(&obj->lock, ctx); | ||
189 | } | ||
190 | |||
191 | |||
192 | /** | ||
170 | * reservation_object_trylock - trylock the reservation object | 193 | * reservation_object_trylock - trylock the reservation object |
171 | * @obj: the reservation object | 194 | * @obj: the reservation object |
172 | * | 195 | * |
diff --git a/include/linux/reset.h b/include/linux/reset.h index 4c7871ddf3c6..09732c36f351 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h | |||
@@ -2,8 +2,10 @@ | |||
2 | #ifndef _LINUX_RESET_H_ | 2 | #ifndef _LINUX_RESET_H_ |
3 | #define _LINUX_RESET_H_ | 3 | #define _LINUX_RESET_H_ |
4 | 4 | ||
5 | #include <linux/device.h> | 5 | #include <linux/types.h> |
6 | 6 | ||
7 | struct device; | ||
8 | struct device_node; | ||
7 | struct reset_control; | 9 | struct reset_control; |
8 | 10 | ||
9 | #ifdef CONFIG_RESET_CONTROLLER | 11 | #ifdef CONFIG_RESET_CONTROLLER |
@@ -20,22 +22,16 @@ struct reset_control *__reset_control_get(struct device *dev, const char *id, | |||
20 | int index, bool shared, | 22 | int index, bool shared, |
21 | bool optional); | 23 | bool optional); |
22 | void reset_control_put(struct reset_control *rstc); | 24 | void reset_control_put(struct reset_control *rstc); |
25 | int __device_reset(struct device *dev, bool optional); | ||
23 | struct reset_control *__devm_reset_control_get(struct device *dev, | 26 | struct reset_control *__devm_reset_control_get(struct device *dev, |
24 | const char *id, int index, bool shared, | 27 | const char *id, int index, bool shared, |
25 | bool optional); | 28 | bool optional); |
26 | 29 | ||
27 | int __must_check device_reset(struct device *dev); | ||
28 | |||
29 | struct reset_control *devm_reset_control_array_get(struct device *dev, | 30 | struct reset_control *devm_reset_control_array_get(struct device *dev, |
30 | bool shared, bool optional); | 31 | bool shared, bool optional); |
31 | struct reset_control *of_reset_control_array_get(struct device_node *np, | 32 | struct reset_control *of_reset_control_array_get(struct device_node *np, |
32 | bool shared, bool optional); | 33 | bool shared, bool optional); |
33 | 34 | ||
34 | static inline int device_reset_optional(struct device *dev) | ||
35 | { | ||
36 | return device_reset(dev); | ||
37 | } | ||
38 | |||
39 | #else | 35 | #else |
40 | 36 | ||
41 | static inline int reset_control_reset(struct reset_control *rstc) | 37 | static inline int reset_control_reset(struct reset_control *rstc) |
@@ -62,15 +58,9 @@ static inline void reset_control_put(struct reset_control *rstc) | |||
62 | { | 58 | { |
63 | } | 59 | } |
64 | 60 | ||
65 | static inline int __must_check device_reset(struct device *dev) | 61 | static inline int __device_reset(struct device *dev, bool optional) |
66 | { | ||
67 | WARN_ON(1); | ||
68 | return -ENOTSUPP; | ||
69 | } | ||
70 | |||
71 | static inline int device_reset_optional(struct device *dev) | ||
72 | { | 62 | { |
73 | return -ENOTSUPP; | 63 | return optional ? 0 : -ENOTSUPP; |
74 | } | 64 | } |
75 | 65 | ||
76 | static inline struct reset_control *__of_reset_control_get( | 66 | static inline struct reset_control *__of_reset_control_get( |
@@ -109,6 +99,16 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) | |||
109 | 99 | ||
110 | #endif /* CONFIG_RESET_CONTROLLER */ | 100 | #endif /* CONFIG_RESET_CONTROLLER */ |
111 | 101 | ||
102 | static inline int __must_check device_reset(struct device *dev) | ||
103 | { | ||
104 | return __device_reset(dev, false); | ||
105 | } | ||
106 | |||
107 | static inline int device_reset_optional(struct device *dev) | ||
108 | { | ||
109 | return __device_reset(dev, true); | ||
110 | } | ||
111 | |||
112 | /** | 112 | /** |
113 | * reset_control_get_exclusive - Lookup and obtain an exclusive reference | 113 | * reset_control_get_exclusive - Lookup and obtain an exclusive reference |
114 | * to a reset controller. | 114 | * to a reset controller. |
@@ -127,9 +127,6 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) | |||
127 | static inline struct reset_control * | 127 | static inline struct reset_control * |
128 | __must_check reset_control_get_exclusive(struct device *dev, const char *id) | 128 | __must_check reset_control_get_exclusive(struct device *dev, const char *id) |
129 | { | 129 | { |
130 | #ifndef CONFIG_RESET_CONTROLLER | ||
131 | WARN_ON(1); | ||
132 | #endif | ||
133 | return __reset_control_get(dev, id, 0, false, false); | 130 | return __reset_control_get(dev, id, 0, false, false); |
134 | } | 131 | } |
135 | 132 | ||
@@ -275,9 +272,6 @@ static inline struct reset_control * | |||
275 | __must_check devm_reset_control_get_exclusive(struct device *dev, | 272 | __must_check devm_reset_control_get_exclusive(struct device *dev, |
276 | const char *id) | 273 | const char *id) |
277 | { | 274 | { |
278 | #ifndef CONFIG_RESET_CONTROLLER | ||
279 | WARN_ON(1); | ||
280 | #endif | ||
281 | return __devm_reset_control_get(dev, id, 0, false, false); | 275 | return __devm_reset_control_get(dev, id, 0, false, false); |
282 | } | 276 | } |
283 | 277 | ||
@@ -350,18 +344,6 @@ devm_reset_control_get_shared_by_index(struct device *dev, int index) | |||
350 | * These inline function calls will be removed once all consumers | 344 | * These inline function calls will be removed once all consumers |
351 | * have been moved over to the new explicit API. | 345 | * have been moved over to the new explicit API. |
352 | */ | 346 | */ |
353 | static inline struct reset_control *reset_control_get( | ||
354 | struct device *dev, const char *id) | ||
355 | { | ||
356 | return reset_control_get_exclusive(dev, id); | ||
357 | } | ||
358 | |||
359 | static inline struct reset_control *reset_control_get_optional( | ||
360 | struct device *dev, const char *id) | ||
361 | { | ||
362 | return reset_control_get_optional_exclusive(dev, id); | ||
363 | } | ||
364 | |||
365 | static inline struct reset_control *of_reset_control_get( | 347 | static inline struct reset_control *of_reset_control_get( |
366 | struct device_node *node, const char *id) | 348 | struct device_node *node, const char *id) |
367 | { | 349 | { |
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 361c08e35dbc..c9df2527e0cd 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
@@ -207,6 +207,7 @@ struct rhashtable_iter { | |||
207 | struct rhashtable_walker walker; | 207 | struct rhashtable_walker walker; |
208 | unsigned int slot; | 208 | unsigned int slot; |
209 | unsigned int skip; | 209 | unsigned int skip; |
210 | bool end_of_table; | ||
210 | }; | 211 | }; |
211 | 212 | ||
212 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) | 213 | static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash) |
@@ -239,34 +240,42 @@ static inline unsigned int rht_bucket_index(const struct bucket_table *tbl, | |||
239 | return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); | 240 | return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1); |
240 | } | 241 | } |
241 | 242 | ||
242 | static inline unsigned int rht_key_hashfn( | 243 | static inline unsigned int rht_key_get_hash(struct rhashtable *ht, |
243 | struct rhashtable *ht, const struct bucket_table *tbl, | 244 | const void *key, const struct rhashtable_params params, |
244 | const void *key, const struct rhashtable_params params) | 245 | unsigned int hash_rnd) |
245 | { | 246 | { |
246 | unsigned int hash; | 247 | unsigned int hash; |
247 | 248 | ||
248 | /* params must be equal to ht->p if it isn't constant. */ | 249 | /* params must be equal to ht->p if it isn't constant. */ |
249 | if (!__builtin_constant_p(params.key_len)) | 250 | if (!__builtin_constant_p(params.key_len)) |
250 | hash = ht->p.hashfn(key, ht->key_len, tbl->hash_rnd); | 251 | hash = ht->p.hashfn(key, ht->key_len, hash_rnd); |
251 | else if (params.key_len) { | 252 | else if (params.key_len) { |
252 | unsigned int key_len = params.key_len; | 253 | unsigned int key_len = params.key_len; |
253 | 254 | ||
254 | if (params.hashfn) | 255 | if (params.hashfn) |
255 | hash = params.hashfn(key, key_len, tbl->hash_rnd); | 256 | hash = params.hashfn(key, key_len, hash_rnd); |
256 | else if (key_len & (sizeof(u32) - 1)) | 257 | else if (key_len & (sizeof(u32) - 1)) |
257 | hash = jhash(key, key_len, tbl->hash_rnd); | 258 | hash = jhash(key, key_len, hash_rnd); |
258 | else | 259 | else |
259 | hash = jhash2(key, key_len / sizeof(u32), | 260 | hash = jhash2(key, key_len / sizeof(u32), hash_rnd); |
260 | tbl->hash_rnd); | ||
261 | } else { | 261 | } else { |
262 | unsigned int key_len = ht->p.key_len; | 262 | unsigned int key_len = ht->p.key_len; |
263 | 263 | ||
264 | if (params.hashfn) | 264 | if (params.hashfn) |
265 | hash = params.hashfn(key, key_len, tbl->hash_rnd); | 265 | hash = params.hashfn(key, key_len, hash_rnd); |
266 | else | 266 | else |
267 | hash = jhash(key, key_len, tbl->hash_rnd); | 267 | hash = jhash(key, key_len, hash_rnd); |
268 | } | 268 | } |
269 | 269 | ||
270 | return hash; | ||
271 | } | ||
272 | |||
273 | static inline unsigned int rht_key_hashfn( | ||
274 | struct rhashtable *ht, const struct bucket_table *tbl, | ||
275 | const void *key, const struct rhashtable_params params) | ||
276 | { | ||
277 | unsigned int hash = rht_key_get_hash(ht, key, params, tbl->hash_rnd); | ||
278 | |||
270 | return rht_bucket_index(tbl, hash); | 279 | return rht_bucket_index(tbl, hash); |
271 | } | 280 | } |
272 | 281 | ||
@@ -378,8 +387,15 @@ void *rhashtable_insert_slow(struct rhashtable *ht, const void *key, | |||
378 | void rhashtable_walk_enter(struct rhashtable *ht, | 387 | void rhashtable_walk_enter(struct rhashtable *ht, |
379 | struct rhashtable_iter *iter); | 388 | struct rhashtable_iter *iter); |
380 | void rhashtable_walk_exit(struct rhashtable_iter *iter); | 389 | void rhashtable_walk_exit(struct rhashtable_iter *iter); |
381 | int rhashtable_walk_start(struct rhashtable_iter *iter) __acquires(RCU); | 390 | int rhashtable_walk_start_check(struct rhashtable_iter *iter) __acquires(RCU); |
391 | |||
392 | static inline void rhashtable_walk_start(struct rhashtable_iter *iter) | ||
393 | { | ||
394 | (void)rhashtable_walk_start_check(iter); | ||
395 | } | ||
396 | |||
382 | void *rhashtable_walk_next(struct rhashtable_iter *iter); | 397 | void *rhashtable_walk_next(struct rhashtable_iter *iter); |
398 | void *rhashtable_walk_peek(struct rhashtable_iter *iter); | ||
383 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); | 399 | void rhashtable_walk_stop(struct rhashtable_iter *iter) __releases(RCU); |
384 | 400 | ||
385 | void rhashtable_free_and_destroy(struct rhashtable *ht, | 401 | void rhashtable_free_and_destroy(struct rhashtable *ht, |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 289e4d54e3e0..7d9eb39fa76a 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
@@ -96,7 +96,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k | |||
96 | }) | 96 | }) |
97 | 97 | ||
98 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); | 98 | int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full); |
99 | int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | 99 | __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, |
100 | struct file *filp, poll_table *poll_table); | 100 | struct file *filp, poll_table *poll_table); |
101 | 101 | ||
102 | 102 | ||
diff --git a/include/linux/rpmsg.h b/include/linux/rpmsg.h index 10d6ae8bbb7d..ca07366c4c33 100644 --- a/include/linux/rpmsg.h +++ b/include/linux/rpmsg.h | |||
@@ -157,7 +157,7 @@ int rpmsg_trysendto(struct rpmsg_endpoint *ept, void *data, int len, u32 dst); | |||
157 | int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, | 157 | int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, u32 dst, |
158 | void *data, int len); | 158 | void *data, int len); |
159 | 159 | ||
160 | unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, | 160 | __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, struct file *filp, |
161 | poll_table *wait); | 161 | poll_table *wait); |
162 | 162 | ||
163 | #else | 163 | #else |
@@ -258,7 +258,7 @@ static inline int rpmsg_trysend_offchannel(struct rpmsg_endpoint *ept, u32 src, | |||
258 | return -ENXIO; | 258 | return -ENXIO; |
259 | } | 259 | } |
260 | 260 | ||
261 | static inline unsigned int rpmsg_poll(struct rpmsg_endpoint *ept, | 261 | static inline __poll_t rpmsg_poll(struct rpmsg_endpoint *ept, |
262 | struct file *filp, poll_table *wait) | 262 | struct file *filp, poll_table *wait) |
263 | { | 263 | { |
264 | /* This shouldn't be possible */ | 264 | /* This shouldn't be possible */ |
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 41319a2e409b..fc6c90b57be0 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -87,7 +87,6 @@ struct rtc_class_ops { | |||
87 | int (*set_offset)(struct device *, long offset); | 87 | int (*set_offset)(struct device *, long offset); |
88 | }; | 88 | }; |
89 | 89 | ||
90 | #define RTC_DEVICE_NAME_SIZE 20 | ||
91 | typedef struct rtc_task { | 90 | typedef struct rtc_task { |
92 | void (*func)(void *private_data); | 91 | void (*func)(void *private_data); |
93 | void *private_data; | 92 | void *private_data; |
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h index 2032ce2eb20b..1fdcde96eb65 100644 --- a/include/linux/rtnetlink.h +++ b/include/linux/rtnetlink.h | |||
@@ -19,10 +19,11 @@ extern int rtnl_put_cacheinfo(struct sk_buff *skb, struct dst_entry *dst, | |||
19 | 19 | ||
20 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); | 20 | void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change, gfp_t flags); |
21 | void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, | 21 | void rtmsg_ifinfo_newnet(int type, struct net_device *dev, unsigned int change, |
22 | gfp_t flags, int *new_nsid); | 22 | gfp_t flags, int *new_nsid, int new_ifindex); |
23 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, | 23 | struct sk_buff *rtmsg_ifinfo_build_skb(int type, struct net_device *dev, |
24 | unsigned change, u32 event, | 24 | unsigned change, u32 event, |
25 | gfp_t flags, int *new_nsid); | 25 | gfp_t flags, int *new_nsid, |
26 | int new_ifindex); | ||
26 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, | 27 | void rtmsg_ifinfo_send(struct sk_buff *skb, struct net_device *dev, |
27 | gfp_t flags); | 28 | gfp_t flags); |
28 | 29 | ||
@@ -70,8 +71,7 @@ static inline bool lockdep_rtnl_is_held(void) | |||
70 | * @p: The pointer to read, prior to dereferencing | 71 | * @p: The pointer to read, prior to dereferencing |
71 | * | 72 | * |
72 | * Return the value of the specified RCU-protected pointer, but omit | 73 | * Return the value of the specified RCU-protected pointer, but omit |
73 | * both the smp_read_barrier_depends() and the READ_ONCE(), because | 74 | * the READ_ONCE(), because caller holds RTNL. |
74 | * caller holds RTNL. | ||
75 | */ | 75 | */ |
76 | #define rtnl_dereference(p) \ | 76 | #define rtnl_dereference(p) \ |
77 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) | 77 | rcu_dereference_protected(p, lockdep_rtnl_is_held()) |
@@ -97,13 +97,9 @@ void rtnetlink_init(void); | |||
97 | void __rtnl_unlock(void); | 97 | void __rtnl_unlock(void); |
98 | void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); | 98 | void rtnl_kfree_skbs(struct sk_buff *head, struct sk_buff *tail); |
99 | 99 | ||
100 | #define ASSERT_RTNL() do { \ | 100 | #define ASSERT_RTNL() \ |
101 | if (unlikely(!rtnl_is_locked())) { \ | 101 | WARN_ONCE(!rtnl_is_locked(), \ |
102 | printk(KERN_ERR "RTNL: assertion failed at %s (%d)\n", \ | 102 | "RTNL: assertion failed at %s (%d)\n", __FILE__, __LINE__) |
103 | __FILE__, __LINE__); \ | ||
104 | dump_stack(); \ | ||
105 | } \ | ||
106 | } while(0) | ||
107 | 103 | ||
108 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, | 104 | extern int ndo_dflt_fdb_dump(struct sk_buff *skb, |
109 | struct netlink_callback *cb, | 105 | struct netlink_callback *cb, |
diff --git a/include/linux/mfd/rtsx_common.h b/include/linux/rtsx_common.h index 443176ee1ab0..443176ee1ab0 100644 --- a/include/linux/mfd/rtsx_common.h +++ b/include/linux/rtsx_common.h | |||
diff --git a/include/linux/mfd/rtsx_pci.h b/include/linux/rtsx_pci.h index c3d3f04d8cc6..478acf6efac6 100644 --- a/include/linux/mfd/rtsx_pci.h +++ b/include/linux/rtsx_pci.h | |||
@@ -24,7 +24,7 @@ | |||
24 | 24 | ||
25 | #include <linux/sched.h> | 25 | #include <linux/sched.h> |
26 | #include <linux/pci.h> | 26 | #include <linux/pci.h> |
27 | #include <linux/mfd/rtsx_common.h> | 27 | #include <linux/rtsx_common.h> |
28 | 28 | ||
29 | #define MAX_RW_REG_CNT 1024 | 29 | #define MAX_RW_REG_CNT 1024 |
30 | 30 | ||
@@ -203,6 +203,7 @@ | |||
203 | #define SD_DDR_MODE 0x04 | 203 | #define SD_DDR_MODE 0x04 |
204 | #define SD_30_MODE 0x08 | 204 | #define SD_30_MODE 0x08 |
205 | #define SD_CLK_DIVIDE_MASK 0xC0 | 205 | #define SD_CLK_DIVIDE_MASK 0xC0 |
206 | #define SD_MODE_SELECT_MASK 0x0C | ||
206 | #define SD_CFG2 0xFDA1 | 207 | #define SD_CFG2 0xFDA1 |
207 | #define SD_CALCULATE_CRC7 0x00 | 208 | #define SD_CALCULATE_CRC7 0x00 |
208 | #define SD_NO_CALCULATE_CRC7 0x80 | 209 | #define SD_NO_CALCULATE_CRC7 0x80 |
@@ -226,6 +227,7 @@ | |||
226 | #define SD_RSP_TYPE_R6 0x01 | 227 | #define SD_RSP_TYPE_R6 0x01 |
227 | #define SD_RSP_TYPE_R7 0x01 | 228 | #define SD_RSP_TYPE_R7 0x01 |
228 | #define SD_CFG3 0xFDA2 | 229 | #define SD_CFG3 0xFDA2 |
230 | #define SD30_CLK_END_EN 0x10 | ||
229 | #define SD_RSP_80CLK_TIMEOUT_EN 0x01 | 231 | #define SD_RSP_80CLK_TIMEOUT_EN 0x01 |
230 | 232 | ||
231 | #define SD_STAT1 0xFDA3 | 233 | #define SD_STAT1 0xFDA3 |
@@ -309,6 +311,12 @@ | |||
309 | 311 | ||
310 | #define SD_DATA_STATE 0xFDB6 | 312 | #define SD_DATA_STATE 0xFDB6 |
311 | #define SD_DATA_IDLE 0x80 | 313 | #define SD_DATA_IDLE 0x80 |
314 | #define REG_SD_STOP_SDCLK_CFG 0xFDB8 | ||
315 | #define SD30_CLK_STOP_CFG_EN 0x04 | ||
316 | #define SD30_CLK_STOP_CFG1 0x02 | ||
317 | #define SD30_CLK_STOP_CFG0 0x01 | ||
318 | #define REG_PRE_RW_MODE 0xFD70 | ||
319 | #define EN_INFINITE_MODE 0x01 | ||
312 | 320 | ||
313 | #define SRCTL 0xFC13 | 321 | #define SRCTL 0xFC13 |
314 | 322 | ||
@@ -434,6 +442,7 @@ | |||
434 | #define CARD_CLK_EN 0xFD69 | 442 | #define CARD_CLK_EN 0xFD69 |
435 | #define SD_CLK_EN 0x04 | 443 | #define SD_CLK_EN 0x04 |
436 | #define MS_CLK_EN 0x08 | 444 | #define MS_CLK_EN 0x08 |
445 | #define SD40_CLK_EN 0x10 | ||
437 | #define SDIO_CTRL 0xFD6B | 446 | #define SDIO_CTRL 0xFD6B |
438 | #define CD_PAD_CTL 0xFD73 | 447 | #define CD_PAD_CTL 0xFD73 |
439 | #define CD_DISABLE_MASK 0x07 | 448 | #define CD_DISABLE_MASK 0x07 |
@@ -453,8 +462,8 @@ | |||
453 | #define FPDCTL 0xFC00 | 462 | #define FPDCTL 0xFC00 |
454 | #define SSC_POWER_DOWN 0x01 | 463 | #define SSC_POWER_DOWN 0x01 |
455 | #define SD_OC_POWER_DOWN 0x02 | 464 | #define SD_OC_POWER_DOWN 0x02 |
456 | #define ALL_POWER_DOWN 0x07 | 465 | #define ALL_POWER_DOWN 0x03 |
457 | #define OC_POWER_DOWN 0x06 | 466 | #define OC_POWER_DOWN 0x02 |
458 | #define PDINFO 0xFC01 | 467 | #define PDINFO 0xFC01 |
459 | 468 | ||
460 | #define CLK_CTL 0xFC02 | 469 | #define CLK_CTL 0xFC02 |
@@ -490,6 +499,9 @@ | |||
490 | 499 | ||
491 | #define FPGA_PULL_CTL 0xFC1D | 500 | #define FPGA_PULL_CTL 0xFC1D |
492 | #define OLT_LED_CTL 0xFC1E | 501 | #define OLT_LED_CTL 0xFC1E |
502 | #define LED_SHINE_MASK 0x08 | ||
503 | #define LED_SHINE_EN 0x08 | ||
504 | #define LED_SHINE_DISABLE 0x00 | ||
493 | #define GPIO_CTL 0xFC1F | 505 | #define GPIO_CTL 0xFC1F |
494 | 506 | ||
495 | #define LDO_CTL 0xFC1E | 507 | #define LDO_CTL 0xFC1E |
@@ -511,7 +523,11 @@ | |||
511 | #define BPP_LDO_ON 0x00 | 523 | #define BPP_LDO_ON 0x00 |
512 | #define BPP_LDO_SUSPEND 0x02 | 524 | #define BPP_LDO_SUSPEND 0x02 |
513 | #define BPP_LDO_OFF 0x03 | 525 | #define BPP_LDO_OFF 0x03 |
526 | #define EFUSE_CTL 0xFC30 | ||
527 | #define EFUSE_ADD 0xFC31 | ||
514 | #define SYS_VER 0xFC32 | 528 | #define SYS_VER 0xFC32 |
529 | #define EFUSE_DATAL 0xFC34 | ||
530 | #define EFUSE_DATAH 0xFC35 | ||
515 | 531 | ||
516 | #define CARD_PULL_CTL1 0xFD60 | 532 | #define CARD_PULL_CTL1 0xFD60 |
517 | #define CARD_PULL_CTL2 0xFD61 | 533 | #define CARD_PULL_CTL2 0xFD61 |
@@ -553,6 +569,9 @@ | |||
553 | #define RBBC1 0xFE2F | 569 | #define RBBC1 0xFE2F |
554 | #define RBDAT 0xFE30 | 570 | #define RBDAT 0xFE30 |
555 | #define RBCTL 0xFE34 | 571 | #define RBCTL 0xFE34 |
572 | #define U_AUTO_DMA_EN_MASK 0x20 | ||
573 | #define U_AUTO_DMA_DISABLE 0x00 | ||
574 | #define RB_FLUSH 0x80 | ||
556 | #define CFGADDR0 0xFE35 | 575 | #define CFGADDR0 0xFE35 |
557 | #define CFGADDR1 0xFE36 | 576 | #define CFGADDR1 0xFE36 |
558 | #define CFGDATA0 0xFE37 | 577 | #define CFGDATA0 0xFE37 |
@@ -581,6 +600,8 @@ | |||
581 | #define LTR_LATENCY_MODE_HW 0 | 600 | #define LTR_LATENCY_MODE_HW 0 |
582 | #define LTR_LATENCY_MODE_SW BIT(6) | 601 | #define LTR_LATENCY_MODE_SW BIT(6) |
583 | #define OBFF_CFG 0xFE4C | 602 | #define OBFF_CFG 0xFE4C |
603 | #define OBFF_EN_MASK 0x03 | ||
604 | #define OBFF_DISABLE 0x00 | ||
584 | 605 | ||
585 | #define CDRESUMECTL 0xFE52 | 606 | #define CDRESUMECTL 0xFE52 |
586 | #define WAKE_SEL_CTL 0xFE54 | 607 | #define WAKE_SEL_CTL 0xFE54 |
@@ -595,6 +616,7 @@ | |||
595 | #define FORCE_ASPM_L0_EN 0x01 | 616 | #define FORCE_ASPM_L0_EN 0x01 |
596 | #define FORCE_ASPM_NO_ASPM 0x00 | 617 | #define FORCE_ASPM_NO_ASPM 0x00 |
597 | #define PM_CLK_FORCE_CTL 0xFE58 | 618 | #define PM_CLK_FORCE_CTL 0xFE58 |
619 | #define CLK_PM_EN 0x01 | ||
598 | #define FUNC_FORCE_CTL 0xFE59 | 620 | #define FUNC_FORCE_CTL 0xFE59 |
599 | #define FUNC_FORCE_UPME_XMT_DBG 0x02 | 621 | #define FUNC_FORCE_UPME_XMT_DBG 0x02 |
600 | #define PERST_GLITCH_WIDTH 0xFE5C | 622 | #define PERST_GLITCH_WIDTH 0xFE5C |
@@ -620,14 +642,23 @@ | |||
620 | #define LDO_PWR_SEL 0xFE78 | 642 | #define LDO_PWR_SEL 0xFE78 |
621 | 643 | ||
622 | #define L1SUB_CONFIG1 0xFE8D | 644 | #define L1SUB_CONFIG1 0xFE8D |
645 | #define AUX_CLK_ACTIVE_SEL_MASK 0x01 | ||
646 | #define MAC_CKSW_DONE 0x00 | ||
623 | #define L1SUB_CONFIG2 0xFE8E | 647 | #define L1SUB_CONFIG2 0xFE8E |
624 | #define L1SUB_AUTO_CFG 0x02 | 648 | #define L1SUB_AUTO_CFG 0x02 |
625 | #define L1SUB_CONFIG3 0xFE8F | 649 | #define L1SUB_CONFIG3 0xFE8F |
626 | #define L1OFF_MBIAS2_EN_5250 BIT(7) | 650 | #define L1OFF_MBIAS2_EN_5250 BIT(7) |
627 | 651 | ||
628 | #define DUMMY_REG_RESET_0 0xFE90 | 652 | #define DUMMY_REG_RESET_0 0xFE90 |
653 | #define IC_VERSION_MASK 0x0F | ||
629 | 654 | ||
655 | #define REG_VREF 0xFE97 | ||
656 | #define PWD_SUSPND_EN 0x10 | ||
657 | #define RTS5260_DMA_RST_CTL_0 0xFEBF | ||
658 | #define RTS5260_DMA_RST 0x80 | ||
659 | #define RTS5260_ADMA3_RST 0x40 | ||
630 | #define AUTOLOAD_CFG_BASE 0xFF00 | 660 | #define AUTOLOAD_CFG_BASE 0xFF00 |
661 | #define RELINK_TIME_MASK 0x01 | ||
631 | #define PETXCFG 0xFF03 | 662 | #define PETXCFG 0xFF03 |
632 | #define FORCE_CLKREQ_DELINK_MASK BIT(7) | 663 | #define FORCE_CLKREQ_DELINK_MASK BIT(7) |
633 | #define FORCE_CLKREQ_LOW 0x80 | 664 | #define FORCE_CLKREQ_LOW 0x80 |
@@ -667,15 +698,24 @@ | |||
667 | #define LDO_DV18_CFG 0xFF70 | 698 | #define LDO_DV18_CFG 0xFF70 |
668 | #define LDO_DV18_SR_MASK 0xC0 | 699 | #define LDO_DV18_SR_MASK 0xC0 |
669 | #define LDO_DV18_SR_DF 0x40 | 700 | #define LDO_DV18_SR_DF 0x40 |
701 | #define DV331812_MASK 0x70 | ||
702 | #define DV331812_33 0x70 | ||
703 | #define DV331812_17 0x30 | ||
670 | 704 | ||
671 | #define LDO_CONFIG2 0xFF71 | 705 | #define LDO_CONFIG2 0xFF71 |
672 | #define LDO_D3318_MASK 0x07 | 706 | #define LDO_D3318_MASK 0x07 |
673 | #define LDO_D3318_33V 0x07 | 707 | #define LDO_D3318_33V 0x07 |
674 | #define LDO_D3318_18V 0x02 | 708 | #define LDO_D3318_18V 0x02 |
709 | #define DV331812_VDD1 0x04 | ||
710 | #define DV331812_POWERON 0x08 | ||
711 | #define DV331812_POWEROFF 0x00 | ||
675 | 712 | ||
676 | #define LDO_VCC_CFG0 0xFF72 | 713 | #define LDO_VCC_CFG0 0xFF72 |
677 | #define LDO_VCC_LMTVTH_MASK 0x30 | 714 | #define LDO_VCC_LMTVTH_MASK 0x30 |
678 | #define LDO_VCC_LMTVTH_2A 0x10 | 715 | #define LDO_VCC_LMTVTH_2A 0x10 |
716 | /*RTS5260*/ | ||
717 | #define RTS5260_DVCC_TUNE_MASK 0x70 | ||
718 | #define RTS5260_DVCC_33 0x70 | ||
679 | 719 | ||
680 | #define LDO_VCC_CFG1 0xFF73 | 720 | #define LDO_VCC_CFG1 0xFF73 |
681 | #define LDO_VCC_REF_TUNE_MASK 0x30 | 721 | #define LDO_VCC_REF_TUNE_MASK 0x30 |
@@ -684,6 +724,10 @@ | |||
684 | #define LDO_VCC_1V8 0x04 | 724 | #define LDO_VCC_1V8 0x04 |
685 | #define LDO_VCC_3V3 0x07 | 725 | #define LDO_VCC_3V3 0x07 |
686 | #define LDO_VCC_LMT_EN 0x08 | 726 | #define LDO_VCC_LMT_EN 0x08 |
727 | /*RTS5260*/ | ||
728 | #define LDO_POW_SDVDD1_MASK 0x08 | ||
729 | #define LDO_POW_SDVDD1_ON 0x08 | ||
730 | #define LDO_POW_SDVDD1_OFF 0x00 | ||
687 | 731 | ||
688 | #define LDO_VIO_CFG 0xFF75 | 732 | #define LDO_VIO_CFG 0xFF75 |
689 | #define LDO_VIO_SR_MASK 0xC0 | 733 | #define LDO_VIO_SR_MASK 0xC0 |
@@ -711,6 +755,160 @@ | |||
711 | #define SD_VIO_LDO_1V8 0x40 | 755 | #define SD_VIO_LDO_1V8 0x40 |
712 | #define SD_VIO_LDO_3V3 0x70 | 756 | #define SD_VIO_LDO_3V3 0x70 |
713 | 757 | ||
758 | #define RTS5260_AUTOLOAD_CFG4 0xFF7F | ||
759 | #define RTS5260_MIMO_DISABLE 0x8A | ||
760 | |||
761 | #define RTS5260_REG_GPIO_CTL0 0xFC1A | ||
762 | #define RTS5260_REG_GPIO_MASK 0x01 | ||
763 | #define RTS5260_REG_GPIO_ON 0x01 | ||
764 | #define RTS5260_REG_GPIO_OFF 0x00 | ||
765 | |||
766 | #define PWR_GLOBAL_CTRL 0xF200 | ||
767 | #define PCIE_L1_2_EN 0x0C | ||
768 | #define PCIE_L1_1_EN 0x0A | ||
769 | #define PCIE_L1_0_EN 0x09 | ||
770 | #define PWR_FE_CTL 0xF201 | ||
771 | #define PCIE_L1_2_PD_FE_EN 0x0C | ||
772 | #define PCIE_L1_1_PD_FE_EN 0x0A | ||
773 | #define PCIE_L1_0_PD_FE_EN 0x09 | ||
774 | #define CFG_PCIE_APHY_OFF_0 0xF204 | ||
775 | #define CFG_PCIE_APHY_OFF_0_DEFAULT 0xBF | ||
776 | #define CFG_PCIE_APHY_OFF_1 0xF205 | ||
777 | #define CFG_PCIE_APHY_OFF_1_DEFAULT 0xFF | ||
778 | #define CFG_PCIE_APHY_OFF_2 0xF206 | ||
779 | #define CFG_PCIE_APHY_OFF_2_DEFAULT 0x01 | ||
780 | #define CFG_PCIE_APHY_OFF_3 0xF207 | ||
781 | #define CFG_PCIE_APHY_OFF_3_DEFAULT 0x00 | ||
782 | #define CFG_L1_0_PCIE_MAC_RET_VALUE 0xF20C | ||
783 | #define CFG_L1_0_PCIE_DPHY_RET_VALUE 0xF20E | ||
784 | #define CFG_L1_0_SYS_RET_VALUE 0xF210 | ||
785 | #define CFG_L1_0_CRC_MISC_RET_VALUE 0xF212 | ||
786 | #define CFG_L1_0_CRC_SD30_RET_VALUE 0xF214 | ||
787 | #define CFG_L1_0_CRC_SD40_RET_VALUE 0xF216 | ||
788 | #define CFG_LP_FPWM_VALUE 0xF219 | ||
789 | #define CFG_LP_FPWM_VALUE_DEFAULT 0x18 | ||
790 | #define PWC_CDR 0xF253 | ||
791 | #define PWC_CDR_DEFAULT 0x03 | ||
792 | #define CFG_L1_0_RET_VALUE_DEFAULT 0x1B | ||
793 | #define CFG_L1_0_CRC_MISC_RET_VALUE_DEFAULT 0x0C | ||
794 | |||
795 | /* OCPCTL */ | ||
796 | #define SD_DETECT_EN 0x08 | ||
797 | #define SD_OCP_INT_EN 0x04 | ||
798 | #define SD_OCP_INT_CLR 0x02 | ||
799 | #define SD_OC_CLR 0x01 | ||
800 | |||
801 | #define SDVIO_DETECT_EN (1 << 7) | ||
802 | #define SDVIO_OCP_INT_EN (1 << 6) | ||
803 | #define SDVIO_OCP_INT_CLR (1 << 5) | ||
804 | #define SDVIO_OC_CLR (1 << 4) | ||
805 | |||
806 | /* OCPSTAT */ | ||
807 | #define SD_OCP_DETECT 0x08 | ||
808 | #define SD_OC_NOW 0x04 | ||
809 | #define SD_OC_EVER 0x02 | ||
810 | |||
811 | #define SDVIO_OC_NOW (1 << 6) | ||
812 | #define SDVIO_OC_EVER (1 << 5) | ||
813 | |||
814 | #define REG_OCPCTL 0xFD6A | ||
815 | #define REG_OCPSTAT 0xFD6E | ||
816 | #define REG_OCPGLITCH 0xFD6C | ||
817 | #define REG_OCPPARA1 0xFD6B | ||
818 | #define REG_OCPPARA2 0xFD6D | ||
819 | |||
820 | /* rts5260 DV3318 OCP-related registers */ | ||
821 | #define REG_DV3318_OCPCTL 0xFD89 | ||
822 | #define DV3318_OCP_TIME_MASK 0xF0 | ||
823 | #define DV3318_DETECT_EN 0x08 | ||
824 | #define DV3318_OCP_INT_EN 0x04 | ||
825 | #define DV3318_OCP_INT_CLR 0x02 | ||
826 | #define DV3318_OCP_CLR 0x01 | ||
827 | |||
828 | #define REG_DV3318_OCPSTAT 0xFD8A | ||
829 | #define DV3318_OCP_GlITCH_TIME_MASK 0xF0 | ||
830 | #define DV3318_OCP_DETECT 0x08 | ||
831 | #define DV3318_OCP_NOW 0x04 | ||
832 | #define DV3318_OCP_EVER 0x02 | ||
833 | |||
834 | #define SD_OCP_GLITCH_MASK 0x0F | ||
835 | |||
836 | /* OCPPARA1 */ | ||
837 | #define SDVIO_OCP_TIME_60 0x00 | ||
838 | #define SDVIO_OCP_TIME_100 0x10 | ||
839 | #define SDVIO_OCP_TIME_200 0x20 | ||
840 | #define SDVIO_OCP_TIME_400 0x30 | ||
841 | #define SDVIO_OCP_TIME_600 0x40 | ||
842 | #define SDVIO_OCP_TIME_800 0x50 | ||
843 | #define SDVIO_OCP_TIME_1100 0x60 | ||
844 | #define SDVIO_OCP_TIME_MASK 0x70 | ||
845 | |||
846 | #define SD_OCP_TIME_60 0x00 | ||
847 | #define SD_OCP_TIME_100 0x01 | ||
848 | #define SD_OCP_TIME_200 0x02 | ||
849 | #define SD_OCP_TIME_400 0x03 | ||
850 | #define SD_OCP_TIME_600 0x04 | ||
851 | #define SD_OCP_TIME_800 0x05 | ||
852 | #define SD_OCP_TIME_1100 0x06 | ||
853 | #define SD_OCP_TIME_MASK 0x07 | ||
854 | |||
855 | /* OCPPARA2 */ | ||
856 | #define SDVIO_OCP_THD_190 0x00 | ||
857 | #define SDVIO_OCP_THD_250 0x10 | ||
858 | #define SDVIO_OCP_THD_320 0x20 | ||
859 | #define SDVIO_OCP_THD_380 0x30 | ||
860 | #define SDVIO_OCP_THD_440 0x40 | ||
861 | #define SDVIO_OCP_THD_500 0x50 | ||
862 | #define SDVIO_OCP_THD_570 0x60 | ||
863 | #define SDVIO_OCP_THD_630 0x70 | ||
864 | #define SDVIO_OCP_THD_MASK 0x70 | ||
865 | |||
866 | #define SD_OCP_THD_450 0x00 | ||
867 | #define SD_OCP_THD_550 0x01 | ||
868 | #define SD_OCP_THD_650 0x02 | ||
869 | #define SD_OCP_THD_750 0x03 | ||
870 | #define SD_OCP_THD_850 0x04 | ||
871 | #define SD_OCP_THD_950 0x05 | ||
872 | #define SD_OCP_THD_1050 0x06 | ||
873 | #define SD_OCP_THD_1150 0x07 | ||
874 | #define SD_OCP_THD_MASK 0x07 | ||
875 | |||
876 | #define SDVIO_OCP_GLITCH_MASK 0xF0 | ||
877 | #define SDVIO_OCP_GLITCH_NONE 0x00 | ||
878 | #define SDVIO_OCP_GLITCH_50U 0x10 | ||
879 | #define SDVIO_OCP_GLITCH_100U 0x20 | ||
880 | #define SDVIO_OCP_GLITCH_200U 0x30 | ||
881 | #define SDVIO_OCP_GLITCH_600U 0x40 | ||
882 | #define SDVIO_OCP_GLITCH_800U 0x50 | ||
883 | #define SDVIO_OCP_GLITCH_1M 0x60 | ||
884 | #define SDVIO_OCP_GLITCH_2M 0x70 | ||
885 | #define SDVIO_OCP_GLITCH_3M 0x80 | ||
886 | #define SDVIO_OCP_GLITCH_4M 0x90 | ||
887 | #define SDVIO_OCP_GLIVCH_5M 0xA0 | ||
888 | #define SDVIO_OCP_GLITCH_6M 0xB0 | ||
889 | #define SDVIO_OCP_GLITCH_7M 0xC0 | ||
890 | #define SDVIO_OCP_GLITCH_8M 0xD0 | ||
891 | #define SDVIO_OCP_GLITCH_9M 0xE0 | ||
892 | #define SDVIO_OCP_GLITCH_10M 0xF0 | ||
893 | |||
894 | #define SD_OCP_GLITCH_MASK 0x0F | ||
895 | #define SD_OCP_GLITCH_NONE 0x00 | ||
896 | #define SD_OCP_GLITCH_50U 0x01 | ||
897 | #define SD_OCP_GLITCH_100U 0x02 | ||
898 | #define SD_OCP_GLITCH_200U 0x03 | ||
899 | #define SD_OCP_GLITCH_600U 0x04 | ||
900 | #define SD_OCP_GLITCH_800U 0x05 | ||
901 | #define SD_OCP_GLITCH_1M 0x06 | ||
902 | #define SD_OCP_GLITCH_2M 0x07 | ||
903 | #define SD_OCP_GLITCH_3M 0x08 | ||
904 | #define SD_OCP_GLITCH_4M 0x09 | ||
905 | #define SD_OCP_GLIVCH_5M 0x0A | ||
906 | #define SD_OCP_GLITCH_6M 0x0B | ||
907 | #define SD_OCP_GLITCH_7M 0x0C | ||
908 | #define SD_OCP_GLITCH_8M 0x0D | ||
909 | #define SD_OCP_GLITCH_9M 0x0E | ||
910 | #define SD_OCP_GLITCH_10M 0x0F | ||
911 | |||
714 | /* Phy register */ | 912 | /* Phy register */ |
715 | #define PHY_PCR 0x00 | 913 | #define PHY_PCR 0x00 |
716 | #define PHY_PCR_FORCE_CODE 0xB000 | 914 | #define PHY_PCR_FORCE_CODE 0xB000 |
@@ -857,6 +1055,7 @@ | |||
857 | 1055 | ||
858 | #define PCR_ASPM_SETTING_REG1 0x160 | 1056 | #define PCR_ASPM_SETTING_REG1 0x160 |
859 | #define PCR_ASPM_SETTING_REG2 0x168 | 1057 | #define PCR_ASPM_SETTING_REG2 0x168 |
1058 | #define PCR_ASPM_SETTING_5260 0x178 | ||
860 | 1059 | ||
861 | #define PCR_SETTING_REG1 0x724 | 1060 | #define PCR_SETTING_REG1 0x724 |
862 | #define PCR_SETTING_REG2 0x814 | 1061 | #define PCR_SETTING_REG2 0x814 |
@@ -890,6 +1089,7 @@ struct pcr_ops { | |||
890 | int (*conv_clk_and_div_n)(int clk, int dir); | 1089 | int (*conv_clk_and_div_n)(int clk, int dir); |
891 | void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); | 1090 | void (*fetch_vendor_settings)(struct rtsx_pcr *pcr); |
892 | void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); | 1091 | void (*force_power_down)(struct rtsx_pcr *pcr, u8 pm_state); |
1092 | void (*stop_cmd)(struct rtsx_pcr *pcr); | ||
893 | 1093 | ||
894 | void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); | 1094 | void (*set_aspm)(struct rtsx_pcr *pcr, bool enable); |
895 | int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); | 1095 | int (*set_ltr_latency)(struct rtsx_pcr *pcr, u32 latency); |
@@ -897,6 +1097,12 @@ struct pcr_ops { | |||
897 | void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); | 1097 | void (*set_l1off_cfg_sub_d0)(struct rtsx_pcr *pcr, int active); |
898 | void (*full_on)(struct rtsx_pcr *pcr); | 1098 | void (*full_on)(struct rtsx_pcr *pcr); |
899 | void (*power_saving)(struct rtsx_pcr *pcr); | 1099 | void (*power_saving)(struct rtsx_pcr *pcr); |
1100 | void (*enable_ocp)(struct rtsx_pcr *pcr); | ||
1101 | void (*disable_ocp)(struct rtsx_pcr *pcr); | ||
1102 | void (*init_ocp)(struct rtsx_pcr *pcr); | ||
1103 | void (*process_ocp)(struct rtsx_pcr *pcr); | ||
1104 | int (*get_ocpstat)(struct rtsx_pcr *pcr, u8 *val); | ||
1105 | void (*clear_ocpstat)(struct rtsx_pcr *pcr); | ||
900 | }; | 1106 | }; |
901 | 1107 | ||
902 | enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; | 1108 | enum PDEV_STAT {PDEV_STAT_IDLE, PDEV_STAT_RUN}; |
@@ -935,6 +1141,9 @@ enum dev_aspm_mode { | |||
935 | * @l1_snooze_delay: l1 snooze delay | 1141 | * @l1_snooze_delay: l1 snooze delay |
936 | * @ltr_l1off_sspwrgate: ltr l1off sspwrgate | 1142 | * @ltr_l1off_sspwrgate: ltr l1off sspwrgate |
937 | * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate | 1143 | * @ltr_l1off_snooze_sspwrgate: ltr l1off snooze sspwrgate |
1144 | * @ocp_en: enable ocp flag | ||
1145 | * @sd_400mA_ocp_thd: 400mA ocp thd | ||
1146 | * @sd_800mA_ocp_thd: 800mA ocp thd | ||
938 | */ | 1147 | */ |
939 | struct rtsx_cr_option { | 1148 | struct rtsx_cr_option { |
940 | u32 dev_flags; | 1149 | u32 dev_flags; |
@@ -949,6 +1158,19 @@ struct rtsx_cr_option { | |||
949 | u32 l1_snooze_delay; | 1158 | u32 l1_snooze_delay; |
950 | u8 ltr_l1off_sspwrgate; | 1159 | u8 ltr_l1off_sspwrgate; |
951 | u8 ltr_l1off_snooze_sspwrgate; | 1160 | u8 ltr_l1off_snooze_sspwrgate; |
1161 | bool ocp_en; | ||
1162 | u8 sd_400mA_ocp_thd; | ||
1163 | u8 sd_800mA_ocp_thd; | ||
1164 | }; | ||
1165 | |||
1166 | /* | ||
1167 | * struct rtsx_hw_param - card reader hardware param | ||
1168 | * @interrupt_en: indicate which interrutp enable | ||
1169 | * @ocp_glitch: ocp glitch time | ||
1170 | */ | ||
1171 | struct rtsx_hw_param { | ||
1172 | u32 interrupt_en; | ||
1173 | u8 ocp_glitch; | ||
952 | }; | 1174 | }; |
953 | 1175 | ||
954 | #define rtsx_set_dev_flag(cr, flag) \ | 1176 | #define rtsx_set_dev_flag(cr, flag) \ |
@@ -963,6 +1185,7 @@ struct rtsx_pcr { | |||
963 | unsigned int id; | 1185 | unsigned int id; |
964 | int pcie_cap; | 1186 | int pcie_cap; |
965 | struct rtsx_cr_option option; | 1187 | struct rtsx_cr_option option; |
1188 | struct rtsx_hw_param hw_param; | ||
966 | 1189 | ||
967 | /* pci resources */ | 1190 | /* pci resources */ |
968 | unsigned long addr; | 1191 | unsigned long addr; |
@@ -1042,12 +1265,15 @@ struct rtsx_pcr { | |||
1042 | struct rtsx_slot *slots; | 1265 | struct rtsx_slot *slots; |
1043 | 1266 | ||
1044 | u8 dma_error_count; | 1267 | u8 dma_error_count; |
1268 | u8 ocp_stat; | ||
1269 | u8 ocp_stat2; | ||
1045 | }; | 1270 | }; |
1046 | 1271 | ||
1047 | #define PID_524A 0x524A | 1272 | #define PID_524A 0x524A |
1048 | #define PID_5249 0x5249 | 1273 | #define PID_5249 0x5249 |
1049 | #define PID_5250 0x5250 | 1274 | #define PID_5250 0x5250 |
1050 | #define PID_525A 0x525A | 1275 | #define PID_525A 0x525A |
1276 | #define PID_5260 0x5260 | ||
1051 | 1277 | ||
1052 | #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) | 1278 | #define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid)) |
1053 | #define PCI_VID(pcr) ((pcr)->pci->vendor) | 1279 | #define PCI_VID(pcr) ((pcr)->pci->vendor) |
diff --git a/include/linux/mfd/rtsx_usb.h b/include/linux/rtsx_usb.h index c446e4fd6b5c..c446e4fd6b5c 100644 --- a/include/linux/mfd/rtsx_usb.h +++ b/include/linux/rtsx_usb.h | |||
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index b7c83254c566..22b2131bcdcd 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
@@ -276,6 +276,17 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, | |||
276 | unsigned int n_pages, unsigned int offset, | 276 | unsigned int n_pages, unsigned int offset, |
277 | unsigned long size, gfp_t gfp_mask); | 277 | unsigned long size, gfp_t gfp_mask); |
278 | 278 | ||
279 | #ifdef CONFIG_SGL_ALLOC | ||
280 | struct scatterlist *sgl_alloc_order(unsigned long long length, | ||
281 | unsigned int order, bool chainable, | ||
282 | gfp_t gfp, unsigned int *nent_p); | ||
283 | struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp, | ||
284 | unsigned int *nent_p); | ||
285 | void sgl_free_n_order(struct scatterlist *sgl, int nents, int order); | ||
286 | void sgl_free_order(struct scatterlist *sgl, int order); | ||
287 | void sgl_free(struct scatterlist *sgl); | ||
288 | #endif /* CONFIG_SGL_ALLOC */ | ||
289 | |||
279 | size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, | 290 | size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf, |
280 | size_t buflen, off_t skip, bool to_buffer); | 291 | size_t buflen, off_t skip, bool to_buffer); |
281 | 292 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index 68a504f6e474..b161ef8a902e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -472,11 +472,15 @@ struct sched_dl_entity { | |||
472 | * has not been executed yet. This flag is useful to avoid race | 472 | * has not been executed yet. This flag is useful to avoid race |
473 | * conditions between the inactive timer handler and the wakeup | 473 | * conditions between the inactive timer handler and the wakeup |
474 | * code. | 474 | * code. |
475 | * | ||
476 | * @dl_overrun tells if the task asked to be informed about runtime | ||
477 | * overruns. | ||
475 | */ | 478 | */ |
476 | unsigned int dl_throttled : 1; | 479 | unsigned int dl_throttled : 1; |
477 | unsigned int dl_boosted : 1; | 480 | unsigned int dl_boosted : 1; |
478 | unsigned int dl_yielded : 1; | 481 | unsigned int dl_yielded : 1; |
479 | unsigned int dl_non_contending : 1; | 482 | unsigned int dl_non_contending : 1; |
483 | unsigned int dl_overrun : 1; | ||
480 | 484 | ||
481 | /* | 485 | /* |
482 | * Bandwidth enforcement timer. Each -deadline task has its | 486 | * Bandwidth enforcement timer. Each -deadline task has its |
@@ -551,6 +555,14 @@ struct task_struct { | |||
551 | unsigned long wakee_flip_decay_ts; | 555 | unsigned long wakee_flip_decay_ts; |
552 | struct task_struct *last_wakee; | 556 | struct task_struct *last_wakee; |
553 | 557 | ||
558 | /* | ||
559 | * recent_used_cpu is initially set as the last CPU used by a task | ||
560 | * that wakes affine another task. Waker/wakee relationships can | ||
561 | * push tasks around a CPU where each wakeup moves to the next one. | ||
562 | * Tracking a recently used CPU allows a quick search for a recently | ||
563 | * used CPU that may be idle. | ||
564 | */ | ||
565 | int recent_used_cpu; | ||
554 | int wake_cpu; | 566 | int wake_cpu; |
555 | #endif | 567 | #endif |
556 | int on_rq; | 568 | int on_rq; |
@@ -1427,6 +1439,7 @@ extern int idle_cpu(int cpu); | |||
1427 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); | 1439 | extern int sched_setscheduler(struct task_struct *, int, const struct sched_param *); |
1428 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); | 1440 | extern int sched_setscheduler_nocheck(struct task_struct *, int, const struct sched_param *); |
1429 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); | 1441 | extern int sched_setattr(struct task_struct *, const struct sched_attr *); |
1442 | extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *); | ||
1430 | extern struct task_struct *idle_task(int cpu); | 1443 | extern struct task_struct *idle_task(int cpu); |
1431 | 1444 | ||
1432 | /** | 1445 | /** |
@@ -1484,6 +1497,11 @@ static inline struct thread_info *task_thread_info(struct task_struct *task) | |||
1484 | extern struct task_struct *find_task_by_vpid(pid_t nr); | 1497 | extern struct task_struct *find_task_by_vpid(pid_t nr); |
1485 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); | 1498 | extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); |
1486 | 1499 | ||
1500 | /* | ||
1501 | * find a task by its virtual pid and get the task struct | ||
1502 | */ | ||
1503 | extern struct task_struct *find_get_task_by_vpid(pid_t nr); | ||
1504 | |||
1487 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); | 1505 | extern int wake_up_state(struct task_struct *tsk, unsigned int state); |
1488 | extern int wake_up_process(struct task_struct *tsk); | 1506 | extern int wake_up_process(struct task_struct *tsk); |
1489 | extern void wake_up_new_task(struct task_struct *tsk); | 1507 | extern void wake_up_new_task(struct task_struct *tsk); |
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h index d1ad3d825561..0b55834efd46 100644 --- a/include/linux/sched/cpufreq.h +++ b/include/linux/sched/cpufreq.h | |||
@@ -12,8 +12,6 @@ | |||
12 | #define SCHED_CPUFREQ_DL (1U << 1) | 12 | #define SCHED_CPUFREQ_DL (1U << 1) |
13 | #define SCHED_CPUFREQ_IOWAIT (1U << 2) | 13 | #define SCHED_CPUFREQ_IOWAIT (1U << 2) |
14 | 14 | ||
15 | #define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL) | ||
16 | |||
17 | #ifdef CONFIG_CPU_FREQ | 15 | #ifdef CONFIG_CPU_FREQ |
18 | struct update_util_data { | 16 | struct update_util_data { |
19 | void (*func)(struct update_util_data *data, u64 time, unsigned int flags); | 17 | void (*func)(struct update_util_data *data, u64 time, unsigned int flags); |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3d49b91b674d..1149533aa2fa 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
@@ -7,11 +7,12 @@ | |||
7 | #include <linux/sched.h> | 7 | #include <linux/sched.h> |
8 | #include <linux/mm_types.h> | 8 | #include <linux/mm_types.h> |
9 | #include <linux/gfp.h> | 9 | #include <linux/gfp.h> |
10 | #include <linux/sync_core.h> | ||
10 | 11 | ||
11 | /* | 12 | /* |
12 | * Routines for handling mm_structs | 13 | * Routines for handling mm_structs |
13 | */ | 14 | */ |
14 | extern struct mm_struct * mm_alloc(void); | 15 | extern struct mm_struct *mm_alloc(void); |
15 | 16 | ||
16 | /** | 17 | /** |
17 | * mmgrab() - Pin a &struct mm_struct. | 18 | * mmgrab() - Pin a &struct mm_struct. |
@@ -35,27 +36,7 @@ static inline void mmgrab(struct mm_struct *mm) | |||
35 | atomic_inc(&mm->mm_count); | 36 | atomic_inc(&mm->mm_count); |
36 | } | 37 | } |
37 | 38 | ||
38 | /* mmdrop drops the mm and the page tables */ | 39 | extern void mmdrop(struct mm_struct *mm); |
39 | extern void __mmdrop(struct mm_struct *); | ||
40 | static inline void mmdrop(struct mm_struct *mm) | ||
41 | { | ||
42 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) | ||
43 | __mmdrop(mm); | ||
44 | } | ||
45 | |||
46 | static inline void mmdrop_async_fn(struct work_struct *work) | ||
47 | { | ||
48 | struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work); | ||
49 | __mmdrop(mm); | ||
50 | } | ||
51 | |||
52 | static inline void mmdrop_async(struct mm_struct *mm) | ||
53 | { | ||
54 | if (unlikely(atomic_dec_and_test(&mm->mm_count))) { | ||
55 | INIT_WORK(&mm->async_put_work, mmdrop_async_fn); | ||
56 | schedule_work(&mm->async_put_work); | ||
57 | } | ||
58 | } | ||
59 | 40 | ||
60 | /** | 41 | /** |
61 | * mmget() - Pin the address space associated with a &struct mm_struct. | 42 | * mmget() - Pin the address space associated with a &struct mm_struct. |
@@ -214,18 +195,48 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) | |||
214 | 195 | ||
215 | #ifdef CONFIG_MEMBARRIER | 196 | #ifdef CONFIG_MEMBARRIER |
216 | enum { | 197 | enum { |
217 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), | 198 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0), |
218 | MEMBARRIER_STATE_SWITCH_MM = (1U << 1), | 199 | MEMBARRIER_STATE_PRIVATE_EXPEDITED = (1U << 1), |
200 | MEMBARRIER_STATE_GLOBAL_EXPEDITED_READY = (1U << 2), | ||
201 | MEMBARRIER_STATE_GLOBAL_EXPEDITED = (1U << 3), | ||
202 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE_READY = (1U << 4), | ||
203 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE = (1U << 5), | ||
204 | }; | ||
205 | |||
206 | enum { | ||
207 | MEMBARRIER_FLAG_SYNC_CORE = (1U << 0), | ||
219 | }; | 208 | }; |
220 | 209 | ||
210 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS | ||
211 | #include <asm/membarrier.h> | ||
212 | #endif | ||
213 | |||
214 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) | ||
215 | { | ||
216 | if (likely(!(atomic_read(&mm->membarrier_state) & | ||
217 | MEMBARRIER_STATE_PRIVATE_EXPEDITED_SYNC_CORE))) | ||
218 | return; | ||
219 | sync_core_before_usermode(); | ||
220 | } | ||
221 | |||
221 | static inline void membarrier_execve(struct task_struct *t) | 222 | static inline void membarrier_execve(struct task_struct *t) |
222 | { | 223 | { |
223 | atomic_set(&t->mm->membarrier_state, 0); | 224 | atomic_set(&t->mm->membarrier_state, 0); |
224 | } | 225 | } |
225 | #else | 226 | #else |
227 | #ifdef CONFIG_ARCH_HAS_MEMBARRIER_CALLBACKS | ||
228 | static inline void membarrier_arch_switch_mm(struct mm_struct *prev, | ||
229 | struct mm_struct *next, | ||
230 | struct task_struct *tsk) | ||
231 | { | ||
232 | } | ||
233 | #endif | ||
226 | static inline void membarrier_execve(struct task_struct *t) | 234 | static inline void membarrier_execve(struct task_struct *t) |
227 | { | 235 | { |
228 | } | 236 | } |
237 | static inline void membarrier_mm_sync_core_before_usermode(struct mm_struct *mm) | ||
238 | { | ||
239 | } | ||
229 | #endif | 240 | #endif |
230 | 241 | ||
231 | #endif /* _LINUX_SCHED_MM_H */ | 242 | #endif /* _LINUX_SCHED_MM_H */ |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 0aa4548fb492..23b4f9cb82db 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
@@ -285,6 +285,34 @@ static inline void kernel_signal_stop(void) | |||
285 | 285 | ||
286 | schedule(); | 286 | schedule(); |
287 | } | 287 | } |
288 | #ifdef __ARCH_SI_TRAPNO | ||
289 | # define ___ARCH_SI_TRAPNO(_a1) , _a1 | ||
290 | #else | ||
291 | # define ___ARCH_SI_TRAPNO(_a1) | ||
292 | #endif | ||
293 | #ifdef __ia64__ | ||
294 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3 | ||
295 | #else | ||
296 | # define ___ARCH_SI_IA64(_a1, _a2, _a3) | ||
297 | #endif | ||
298 | |||
299 | int force_sig_fault(int sig, int code, void __user *addr | ||
300 | ___ARCH_SI_TRAPNO(int trapno) | ||
301 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | ||
302 | , struct task_struct *t); | ||
303 | int send_sig_fault(int sig, int code, void __user *addr | ||
304 | ___ARCH_SI_TRAPNO(int trapno) | ||
305 | ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr) | ||
306 | , struct task_struct *t); | ||
307 | |||
308 | int force_sig_mceerr(int code, void __user *, short, struct task_struct *); | ||
309 | int send_sig_mceerr(int code, void __user *, short, struct task_struct *); | ||
310 | |||
311 | int force_sig_bnderr(void __user *addr, void __user *lower, void __user *upper); | ||
312 | int force_sig_pkuerr(void __user *addr, u32 pkey); | ||
313 | |||
314 | int force_sig_ptrace_errno_trap(int errno, void __user *addr); | ||
315 | |||
288 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); | 316 | extern int send_sig_info(int, struct siginfo *, struct task_struct *); |
289 | extern int force_sigsegv(int, struct task_struct *); | 317 | extern int force_sigsegv(int, struct task_struct *); |
290 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); | 318 | extern int force_sig_info(int, struct siginfo *, struct task_struct *); |
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 05b8650f06f5..5be31eb7b266 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h | |||
@@ -104,6 +104,20 @@ extern int arch_task_struct_size __read_mostly; | |||
104 | # define arch_task_struct_size (sizeof(struct task_struct)) | 104 | # define arch_task_struct_size (sizeof(struct task_struct)) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #ifndef CONFIG_HAVE_ARCH_THREAD_STRUCT_WHITELIST | ||
108 | /* | ||
109 | * If an architecture has not declared a thread_struct whitelist we | ||
110 | * must assume something there may need to be copied to userspace. | ||
111 | */ | ||
112 | static inline void arch_thread_struct_whitelist(unsigned long *offset, | ||
113 | unsigned long *size) | ||
114 | { | ||
115 | *offset = 0; | ||
116 | /* Handle dynamically sized thread_struct. */ | ||
117 | *size = arch_task_struct_size - offsetof(struct task_struct, thread); | ||
118 | } | ||
119 | #endif | ||
120 | |||
107 | #ifdef CONFIG_VMAP_STACK | 121 | #ifdef CONFIG_VMAP_STACK |
108 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) | 122 | static inline struct vm_struct *task_stack_vm_area(const struct task_struct *t) |
109 | { | 123 | { |
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index cb4828aaa34f..6a841929073f 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h | |||
@@ -78,7 +78,7 @@ static inline void put_task_stack(struct task_struct *tsk) {} | |||
78 | #define task_stack_end_corrupted(task) \ | 78 | #define task_stack_end_corrupted(task) \ |
79 | (*(end_of_stack(task)) != STACK_END_MAGIC) | 79 | (*(end_of_stack(task)) != STACK_END_MAGIC) |
80 | 80 | ||
81 | static inline int object_is_on_stack(void *obj) | 81 | static inline int object_is_on_stack(const void *obj) |
82 | { | 82 | { |
83 | void *stack = task_stack_page(current); | 83 | void *stack = task_stack_page(current); |
84 | 84 | ||
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index cf257c2e728d..26347741ba50 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h | |||
@@ -7,6 +7,12 @@ | |||
7 | #include <linux/sched/idle.h> | 7 | #include <linux/sched/idle.h> |
8 | 8 | ||
9 | /* | 9 | /* |
10 | * Increase resolution of cpu_capacity calculations | ||
11 | */ | ||
12 | #define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT | ||
13 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) | ||
14 | |||
15 | /* | ||
10 | * sched-domains (multiprocessor balancing) declarations: | 16 | * sched-domains (multiprocessor balancing) declarations: |
11 | */ | 17 | */ |
12 | #ifdef CONFIG_SMP | 18 | #ifdef CONFIG_SMP |
@@ -27,12 +33,6 @@ | |||
27 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ | 33 | #define SD_OVERLAP 0x2000 /* sched_domains of this level overlap */ |
28 | #define SD_NUMA 0x4000 /* cross-node balancing */ | 34 | #define SD_NUMA 0x4000 /* cross-node balancing */ |
29 | 35 | ||
30 | /* | ||
31 | * Increase resolution of cpu_capacity calculations | ||
32 | */ | ||
33 | #define SCHED_CAPACITY_SHIFT SCHED_FIXEDPOINT_SHIFT | ||
34 | #define SCHED_CAPACITY_SCALE (1L << SCHED_CAPACITY_SHIFT) | ||
35 | |||
36 | #ifdef CONFIG_SCHED_SMT | 36 | #ifdef CONFIG_SCHED_SMT |
37 | static inline int cpu_smt_flags(void) | 37 | static inline int cpu_smt_flags(void) |
38 | { | 38 | { |
diff --git a/include/linux/scif.h b/include/linux/scif.h index 49a35d6edc94..7046111b8d0a 100644 --- a/include/linux/scif.h +++ b/include/linux/scif.h | |||
@@ -123,8 +123,8 @@ struct scif_range { | |||
123 | */ | 123 | */ |
124 | struct scif_pollepd { | 124 | struct scif_pollepd { |
125 | scif_epd_t epd; | 125 | scif_epd_t epd; |
126 | short events; | 126 | __poll_t events; |
127 | short revents; | 127 | __poll_t revents; |
128 | }; | 128 | }; |
129 | 129 | ||
130 | /** | 130 | /** |
diff --git a/include/linux/sctp.h b/include/linux/sctp.h index da803dfc7a39..b36c76635f18 100644 --- a/include/linux/sctp.h +++ b/include/linux/sctp.h | |||
@@ -102,11 +102,15 @@ enum sctp_cid { | |||
102 | /* AUTH Extension Section 4.1 */ | 102 | /* AUTH Extension Section 4.1 */ |
103 | SCTP_CID_AUTH = 0x0F, | 103 | SCTP_CID_AUTH = 0x0F, |
104 | 104 | ||
105 | /* sctp ndata 5.1. I-DATA */ | ||
106 | SCTP_CID_I_DATA = 0x40, | ||
107 | |||
105 | /* PR-SCTP Sec 3.2 */ | 108 | /* PR-SCTP Sec 3.2 */ |
106 | SCTP_CID_FWD_TSN = 0xC0, | 109 | SCTP_CID_FWD_TSN = 0xC0, |
107 | 110 | ||
108 | /* Use hex, as defined in ADDIP sec. 3.1 */ | 111 | /* Use hex, as defined in ADDIP sec. 3.1 */ |
109 | SCTP_CID_ASCONF = 0xC1, | 112 | SCTP_CID_ASCONF = 0xC1, |
113 | SCTP_CID_I_FWD_TSN = 0xC2, | ||
110 | SCTP_CID_ASCONF_ACK = 0x80, | 114 | SCTP_CID_ASCONF_ACK = 0x80, |
111 | SCTP_CID_RECONF = 0x82, | 115 | SCTP_CID_RECONF = 0x82, |
112 | }; /* enum */ | 116 | }; /* enum */ |
@@ -240,6 +244,23 @@ struct sctp_data_chunk { | |||
240 | struct sctp_datahdr data_hdr; | 244 | struct sctp_datahdr data_hdr; |
241 | }; | 245 | }; |
242 | 246 | ||
247 | struct sctp_idatahdr { | ||
248 | __be32 tsn; | ||
249 | __be16 stream; | ||
250 | __be16 reserved; | ||
251 | __be32 mid; | ||
252 | union { | ||
253 | __u32 ppid; | ||
254 | __be32 fsn; | ||
255 | }; | ||
256 | __u8 payload[0]; | ||
257 | }; | ||
258 | |||
259 | struct sctp_idata_chunk { | ||
260 | struct sctp_chunkhdr chunk_hdr; | ||
261 | struct sctp_idatahdr data_hdr; | ||
262 | }; | ||
263 | |||
243 | /* DATA Chuck Specific Flags */ | 264 | /* DATA Chuck Specific Flags */ |
244 | enum { | 265 | enum { |
245 | SCTP_DATA_MIDDLE_FRAG = 0x00, | 266 | SCTP_DATA_MIDDLE_FRAG = 0x00, |
@@ -596,6 +617,22 @@ struct sctp_fwdtsn_chunk { | |||
596 | struct sctp_fwdtsn_hdr fwdtsn_hdr; | 617 | struct sctp_fwdtsn_hdr fwdtsn_hdr; |
597 | }; | 618 | }; |
598 | 619 | ||
620 | struct sctp_ifwdtsn_skip { | ||
621 | __be16 stream; | ||
622 | __u8 reserved; | ||
623 | __u8 flags; | ||
624 | __be32 mid; | ||
625 | }; | ||
626 | |||
627 | struct sctp_ifwdtsn_hdr { | ||
628 | __be32 new_cum_tsn; | ||
629 | struct sctp_ifwdtsn_skip skip[0]; | ||
630 | }; | ||
631 | |||
632 | struct sctp_ifwdtsn_chunk { | ||
633 | struct sctp_chunkhdr chunk_hdr; | ||
634 | struct sctp_ifwdtsn_hdr fwdtsn_hdr; | ||
635 | }; | ||
599 | 636 | ||
600 | /* ADDIP | 637 | /* ADDIP |
601 | * Section 3.1.1 Address Configuration Change Chunk (ASCONF) | 638 | * Section 3.1.1 Address Configuration Change Chunk (ASCONF) |
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h index 10f25f7e4304..c723a5c4e3ff 100644 --- a/include/linux/seccomp.h +++ b/include/linux/seccomp.h | |||
@@ -95,11 +95,19 @@ static inline void get_seccomp_filter(struct task_struct *tsk) | |||
95 | #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) | 95 | #if defined(CONFIG_SECCOMP_FILTER) && defined(CONFIG_CHECKPOINT_RESTORE) |
96 | extern long seccomp_get_filter(struct task_struct *task, | 96 | extern long seccomp_get_filter(struct task_struct *task, |
97 | unsigned long filter_off, void __user *data); | 97 | unsigned long filter_off, void __user *data); |
98 | extern long seccomp_get_metadata(struct task_struct *task, | ||
99 | unsigned long filter_off, void __user *data); | ||
98 | #else | 100 | #else |
99 | static inline long seccomp_get_filter(struct task_struct *task, | 101 | static inline long seccomp_get_filter(struct task_struct *task, |
100 | unsigned long n, void __user *data) | 102 | unsigned long n, void __user *data) |
101 | { | 103 | { |
102 | return -EINVAL; | 104 | return -EINVAL; |
103 | } | 105 | } |
106 | static inline long seccomp_get_metadata(struct task_struct *task, | ||
107 | unsigned long filter_off, | ||
108 | void __user *data) | ||
109 | { | ||
110 | return -EINVAL; | ||
111 | } | ||
104 | #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ | 112 | #endif /* CONFIG_SECCOMP_FILTER && CONFIG_CHECKPOINT_RESTORE */ |
105 | #endif /* _LINUX_SECCOMP_H */ | 113 | #endif /* _LINUX_SECCOMP_H */ |
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h index 09c6e28746f9..ab437dd2e3b9 100644 --- a/include/linux/seq_file.h +++ b/include/linux/seq_file.h | |||
@@ -140,6 +140,20 @@ void *__seq_open_private(struct file *, const struct seq_operations *, int); | |||
140 | int seq_open_private(struct file *, const struct seq_operations *, int); | 140 | int seq_open_private(struct file *, const struct seq_operations *, int); |
141 | int seq_release_private(struct inode *, struct file *); | 141 | int seq_release_private(struct inode *, struct file *); |
142 | 142 | ||
143 | #define DEFINE_SHOW_ATTRIBUTE(__name) \ | ||
144 | static int __name ## _open(struct inode *inode, struct file *file) \ | ||
145 | { \ | ||
146 | return single_open(file, __name ## _show, inode->i_private); \ | ||
147 | } \ | ||
148 | \ | ||
149 | static const struct file_operations __name ## _fops = { \ | ||
150 | .owner = THIS_MODULE, \ | ||
151 | .open = __name ## _open, \ | ||
152 | .read = seq_read, \ | ||
153 | .llseek = seq_lseek, \ | ||
154 | .release = single_release, \ | ||
155 | } | ||
156 | |||
143 | static inline struct user_namespace *seq_user_ns(struct seq_file *seq) | 157 | static inline struct user_namespace *seq_user_ns(struct seq_file *seq) |
144 | { | 158 | { |
145 | #ifdef CONFIG_USER_NS | 159 | #ifdef CONFIG_USER_NS |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index f189a8a3bbb8..bcf4cf26b8c8 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -278,9 +278,8 @@ static inline void raw_write_seqcount_barrier(seqcount_t *s) | |||
278 | 278 | ||
279 | static inline int raw_read_seqcount_latch(seqcount_t *s) | 279 | static inline int raw_read_seqcount_latch(seqcount_t *s) |
280 | { | 280 | { |
281 | int seq = READ_ONCE(s->sequence); | ||
282 | /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ | 281 | /* Pairs with the first smp_wmb() in raw_write_seqcount_latch() */ |
283 | smp_read_barrier_depends(); | 282 | int seq = READ_ONCE(s->sequence); /* ^^^ */ |
284 | return seq; | 283 | return seq; |
285 | } | 284 | } |
286 | 285 | ||
diff --git a/include/linux/serdev.h b/include/linux/serdev.h index d609e6dc5bad..f153b2c7f0cd 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h | |||
@@ -27,8 +27,10 @@ struct serdev_device; | |||
27 | 27 | ||
28 | /** | 28 | /** |
29 | * struct serdev_device_ops - Callback operations for a serdev device | 29 | * struct serdev_device_ops - Callback operations for a serdev device |
30 | * @receive_buf: Function called with data received from device. | 30 | * @receive_buf: Function called with data received from device; |
31 | * @write_wakeup: Function called when ready to transmit more data. | 31 | * returns number of bytes accepted; may sleep. |
32 | * @write_wakeup: Function called when ready to transmit more data; must | ||
33 | * not sleep. | ||
32 | */ | 34 | */ |
33 | struct serdev_device_ops { | 35 | struct serdev_device_ops { |
34 | int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); | 36 | int (*receive_buf)(struct serdev_device *, const unsigned char *, size_t); |
@@ -76,6 +78,12 @@ static inline struct serdev_device_driver *to_serdev_device_driver(struct device | |||
76 | return container_of(d, struct serdev_device_driver, driver); | 78 | return container_of(d, struct serdev_device_driver, driver); |
77 | } | 79 | } |
78 | 80 | ||
81 | enum serdev_parity { | ||
82 | SERDEV_PARITY_NONE, | ||
83 | SERDEV_PARITY_EVEN, | ||
84 | SERDEV_PARITY_ODD, | ||
85 | }; | ||
86 | |||
79 | /* | 87 | /* |
80 | * serdev controller structures | 88 | * serdev controller structures |
81 | */ | 89 | */ |
@@ -86,6 +94,7 @@ struct serdev_controller_ops { | |||
86 | int (*open)(struct serdev_controller *); | 94 | int (*open)(struct serdev_controller *); |
87 | void (*close)(struct serdev_controller *); | 95 | void (*close)(struct serdev_controller *); |
88 | void (*set_flow_control)(struct serdev_controller *, bool); | 96 | void (*set_flow_control)(struct serdev_controller *, bool); |
97 | int (*set_parity)(struct serdev_controller *, enum serdev_parity); | ||
89 | unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); | 98 | unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int); |
90 | void (*wait_until_sent)(struct serdev_controller *, long); | 99 | void (*wait_until_sent)(struct serdev_controller *, long); |
91 | int (*get_tiocm)(struct serdev_controller *); | 100 | int (*get_tiocm)(struct serdev_controller *); |
@@ -193,6 +202,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl, | |||
193 | 202 | ||
194 | int serdev_device_open(struct serdev_device *); | 203 | int serdev_device_open(struct serdev_device *); |
195 | void serdev_device_close(struct serdev_device *); | 204 | void serdev_device_close(struct serdev_device *); |
205 | int devm_serdev_device_open(struct device *, struct serdev_device *); | ||
196 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); | 206 | unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int); |
197 | void serdev_device_set_flow_control(struct serdev_device *, bool); | 207 | void serdev_device_set_flow_control(struct serdev_device *, bool); |
198 | int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); | 208 | int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t); |
@@ -298,6 +308,9 @@ static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enabl | |||
298 | return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); | 308 | return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS); |
299 | } | 309 | } |
300 | 310 | ||
311 | int serdev_device_set_parity(struct serdev_device *serdev, | ||
312 | enum serdev_parity parity); | ||
313 | |||
301 | /* | 314 | /* |
302 | * serdev hooks into TTY core | 315 | * serdev hooks into TTY core |
303 | */ | 316 | */ |
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h index aefd0e5115da..b32df49a3bd5 100644 --- a/include/linux/serial_core.h +++ b/include/linux/serial_core.h | |||
@@ -387,7 +387,7 @@ struct uart_port *uart_get_console(struct uart_port *ports, int nr, | |||
387 | struct console *c); | 387 | struct console *c); |
388 | int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, | 388 | int uart_parse_earlycon(char *p, unsigned char *iotype, resource_size_t *addr, |
389 | char **options); | 389 | char **options); |
390 | void uart_parse_options(char *options, int *baud, int *parity, int *bits, | 390 | void uart_parse_options(const char *options, int *baud, int *parity, int *bits, |
391 | int *flow); | 391 | int *flow); |
392 | int uart_set_options(struct uart_port *port, struct console *co, int baud, | 392 | int uart_set_options(struct uart_port *port, struct console *co, int baud, |
393 | int parity, int bits, int flow); | 393 | int parity, int bits, int flow); |
@@ -501,9 +501,5 @@ static inline int uart_handle_break(struct uart_port *port) | |||
501 | (cflag) & CRTSCTS || \ | 501 | (cflag) & CRTSCTS || \ |
502 | !((cflag) & CLOCAL)) | 502 | !((cflag) & CLOCAL)) |
503 | 503 | ||
504 | /* | 504 | void uart_get_rs485_mode(struct device *dev, struct serial_rs485 *rs485conf); |
505 | * Common device tree parsing helpers | ||
506 | */ | ||
507 | void of_get_rs485_mode(struct device_node *np, struct serial_rs485 *rs485conf); | ||
508 | |||
509 | #endif /* LINUX_SERIAL_CORE_H */ | 505 | #endif /* LINUX_SERIAL_CORE_H */ |
diff --git a/include/linux/sfp.h b/include/linux/sfp.h index 4a906f560817..e724d5a3dd80 100644 --- a/include/linux/sfp.h +++ b/include/linux/sfp.h | |||
@@ -3,7 +3,7 @@ | |||
3 | 3 | ||
4 | #include <linux/phy.h> | 4 | #include <linux/phy.h> |
5 | 5 | ||
6 | struct __packed sfp_eeprom_base { | 6 | struct sfp_eeprom_base { |
7 | u8 phys_id; | 7 | u8 phys_id; |
8 | u8 phys_ext_id; | 8 | u8 phys_ext_id; |
9 | u8 connector; | 9 | u8 connector; |
@@ -165,13 +165,47 @@ struct __packed sfp_eeprom_base { | |||
165 | char vendor_rev[4]; | 165 | char vendor_rev[4]; |
166 | union { | 166 | union { |
167 | __be16 optical_wavelength; | 167 | __be16 optical_wavelength; |
168 | u8 cable_spec; | 168 | __be16 cable_compliance; |
169 | }; | 169 | struct { |
170 | #if defined __BIG_ENDIAN_BITFIELD | ||
171 | u8 reserved60_2:6; | ||
172 | u8 fc_pi_4_app_h:1; | ||
173 | u8 sff8431_app_e:1; | ||
174 | u8 reserved61:8; | ||
175 | #elif defined __LITTLE_ENDIAN_BITFIELD | ||
176 | u8 sff8431_app_e:1; | ||
177 | u8 fc_pi_4_app_h:1; | ||
178 | u8 reserved60_2:6; | ||
179 | u8 reserved61:8; | ||
180 | #else | ||
181 | #error Unknown Endian | ||
182 | #endif | ||
183 | } __packed passive; | ||
184 | struct { | ||
185 | #if defined __BIG_ENDIAN_BITFIELD | ||
186 | u8 reserved60_4:4; | ||
187 | u8 fc_pi_4_lim:1; | ||
188 | u8 sff8431_lim:1; | ||
189 | u8 fc_pi_4_app_h:1; | ||
190 | u8 sff8431_app_e:1; | ||
191 | u8 reserved61:8; | ||
192 | #elif defined __LITTLE_ENDIAN_BITFIELD | ||
193 | u8 sff8431_app_e:1; | ||
194 | u8 fc_pi_4_app_h:1; | ||
195 | u8 sff8431_lim:1; | ||
196 | u8 fc_pi_4_lim:1; | ||
197 | u8 reserved60_4:4; | ||
198 | u8 reserved61:8; | ||
199 | #else | ||
200 | #error Unknown Endian | ||
201 | #endif | ||
202 | } __packed active; | ||
203 | } __packed; | ||
170 | u8 reserved62; | 204 | u8 reserved62; |
171 | u8 cc_base; | 205 | u8 cc_base; |
172 | }; | 206 | } __packed; |
173 | 207 | ||
174 | struct __packed sfp_eeprom_ext { | 208 | struct sfp_eeprom_ext { |
175 | __be16 options; | 209 | __be16 options; |
176 | u8 br_max; | 210 | u8 br_max; |
177 | u8 br_min; | 211 | u8 br_min; |
@@ -181,12 +215,21 @@ struct __packed sfp_eeprom_ext { | |||
181 | u8 enhopts; | 215 | u8 enhopts; |
182 | u8 sff8472_compliance; | 216 | u8 sff8472_compliance; |
183 | u8 cc_ext; | 217 | u8 cc_ext; |
184 | }; | 218 | } __packed; |
185 | 219 | ||
186 | struct __packed sfp_eeprom_id { | 220 | /** |
221 | * struct sfp_eeprom_id - raw SFP module identification information | ||
222 | * @base: base SFP module identification structure | ||
223 | * @ext: extended SFP module identification structure | ||
224 | * | ||
225 | * See the SFF-8472 specification and related documents for the definition | ||
226 | * of these structure members. This can be obtained from | ||
227 | * ftp://ftp.seagate.com/sff | ||
228 | */ | ||
229 | struct sfp_eeprom_id { | ||
187 | struct sfp_eeprom_base base; | 230 | struct sfp_eeprom_base base; |
188 | struct sfp_eeprom_ext ext; | 231 | struct sfp_eeprom_ext ext; |
189 | }; | 232 | } __packed; |
190 | 233 | ||
191 | /* SFP EEPROM registers */ | 234 | /* SFP EEPROM registers */ |
192 | enum { | 235 | enum { |
@@ -222,6 +265,7 @@ enum { | |||
222 | SFP_SFF8472_COMPLIANCE = 0x5e, | 265 | SFP_SFF8472_COMPLIANCE = 0x5e, |
223 | SFP_CC_EXT = 0x5f, | 266 | SFP_CC_EXT = 0x5f, |
224 | 267 | ||
268 | SFP_PHYS_ID_SFF = 0x02, | ||
225 | SFP_PHYS_ID_SFP = 0x03, | 269 | SFP_PHYS_ID_SFP = 0x03, |
226 | SFP_PHYS_EXT_ID_SFP = 0x04, | 270 | SFP_PHYS_EXT_ID_SFP = 0x04, |
227 | SFP_CONNECTOR_UNSPEC = 0x00, | 271 | SFP_CONNECTOR_UNSPEC = 0x00, |
@@ -347,19 +391,32 @@ enum { | |||
347 | SFP_PAGE = 0x7f, | 391 | SFP_PAGE = 0x7f, |
348 | }; | 392 | }; |
349 | 393 | ||
350 | struct device_node; | 394 | struct fwnode_handle; |
351 | struct ethtool_eeprom; | 395 | struct ethtool_eeprom; |
352 | struct ethtool_modinfo; | 396 | struct ethtool_modinfo; |
353 | struct net_device; | 397 | struct net_device; |
354 | struct sfp_bus; | 398 | struct sfp_bus; |
355 | 399 | ||
400 | /** | ||
401 | * struct sfp_upstream_ops - upstream operations structure | ||
402 | * @module_insert: called after a module has been detected to determine | ||
403 | * whether the module is supported for the upstream device. | ||
404 | * @module_remove: called after the module has been removed. | ||
405 | * @link_down: called when the link is non-operational for whatever | ||
406 | * reason. | ||
407 | * @link_up: called when the link is operational. | ||
408 | * @connect_phy: called when an I2C accessible PHY has been detected | ||
409 | * on the module. | ||
410 | * @disconnect_phy: called when a module with an I2C accessible PHY has | ||
411 | * been removed. | ||
412 | */ | ||
356 | struct sfp_upstream_ops { | 413 | struct sfp_upstream_ops { |
357 | int (*module_insert)(void *, const struct sfp_eeprom_id *id); | 414 | int (*module_insert)(void *priv, const struct sfp_eeprom_id *id); |
358 | void (*module_remove)(void *); | 415 | void (*module_remove)(void *priv); |
359 | void (*link_down)(void *); | 416 | void (*link_down)(void *priv); |
360 | void (*link_up)(void *); | 417 | void (*link_up)(void *priv); |
361 | int (*connect_phy)(void *, struct phy_device *); | 418 | int (*connect_phy)(void *priv, struct phy_device *); |
362 | void (*disconnect_phy)(void *); | 419 | void (*disconnect_phy)(void *priv); |
363 | }; | 420 | }; |
364 | 421 | ||
365 | #if IS_ENABLED(CONFIG_SFP) | 422 | #if IS_ENABLED(CONFIG_SFP) |
@@ -375,7 +432,7 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee, | |||
375 | u8 *data); | 432 | u8 *data); |
376 | void sfp_upstream_start(struct sfp_bus *bus); | 433 | void sfp_upstream_start(struct sfp_bus *bus); |
377 | void sfp_upstream_stop(struct sfp_bus *bus); | 434 | void sfp_upstream_stop(struct sfp_bus *bus); |
378 | struct sfp_bus *sfp_register_upstream(struct device_node *np, | 435 | struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode, |
379 | struct net_device *ndev, void *upstream, | 436 | struct net_device *ndev, void *upstream, |
380 | const struct sfp_upstream_ops *ops); | 437 | const struct sfp_upstream_ops *ops); |
381 | void sfp_unregister_upstream(struct sfp_bus *bus); | 438 | void sfp_unregister_upstream(struct sfp_bus *bus); |
@@ -419,7 +476,8 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus) | |||
419 | { | 476 | { |
420 | } | 477 | } |
421 | 478 | ||
422 | static inline struct sfp_bus *sfp_register_upstream(struct device_node *np, | 479 | static inline struct sfp_bus *sfp_register_upstream( |
480 | struct fwnode_handle *fwnode, | ||
423 | struct net_device *ndev, void *upstream, | 481 | struct net_device *ndev, void *upstream, |
424 | const struct sfp_upstream_ops *ops) | 482 | const struct sfp_upstream_ops *ops) |
425 | { | 483 | { |
diff --git a/include/linux/sh_eth.h b/include/linux/sh_eth.h index 94081e9a5010..6dfda97a6c1a 100644 --- a/include/linux/sh_eth.h +++ b/include/linux/sh_eth.h | |||
@@ -5,12 +5,9 @@ | |||
5 | #include <linux/phy.h> | 5 | #include <linux/phy.h> |
6 | #include <linux/if_ether.h> | 6 | #include <linux/if_ether.h> |
7 | 7 | ||
8 | enum {EDMAC_LITTLE_ENDIAN}; | ||
9 | |||
10 | struct sh_eth_plat_data { | 8 | struct sh_eth_plat_data { |
11 | int phy; | 9 | int phy; |
12 | int phy_irq; | 10 | int phy_irq; |
13 | int edmac_endian; | ||
14 | phy_interface_t phy_interface; | 11 | phy_interface_t phy_interface; |
15 | void (*set_mdio_gate)(void *addr); | 12 | void (*set_mdio_gate)(void *addr); |
16 | 13 | ||
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 06b295bec00d..73b5e655a76e 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
@@ -112,13 +112,11 @@ extern void shmem_uncharge(struct inode *inode, long pages); | |||
112 | 112 | ||
113 | #ifdef CONFIG_TMPFS | 113 | #ifdef CONFIG_TMPFS |
114 | 114 | ||
115 | extern int shmem_add_seals(struct file *file, unsigned int seals); | 115 | extern long memfd_fcntl(struct file *file, unsigned int cmd, unsigned long arg); |
116 | extern int shmem_get_seals(struct file *file); | ||
117 | extern long shmem_fcntl(struct file *file, unsigned int cmd, unsigned long arg); | ||
118 | 116 | ||
119 | #else | 117 | #else |
120 | 118 | ||
121 | static inline long shmem_fcntl(struct file *f, unsigned int c, unsigned long a) | 119 | static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned long a) |
122 | { | 120 | { |
123 | return -EINVAL; | 121 | return -EINVAL; |
124 | } | 122 | } |
diff --git a/include/linux/signal.h b/include/linux/signal.h index 042968dd98f0..a9bc7e1b077e 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
@@ -11,13 +11,14 @@ struct task_struct; | |||
11 | /* for sysctl */ | 11 | /* for sysctl */ |
12 | extern int print_fatal_signals; | 12 | extern int print_fatal_signals; |
13 | 13 | ||
14 | static inline void copy_siginfo(struct siginfo *to, struct siginfo *from) | 14 | static inline void copy_siginfo(struct siginfo *to, const struct siginfo *from) |
15 | { | 15 | { |
16 | if (from->si_code < 0) | 16 | memcpy(to, from, sizeof(*to)); |
17 | memcpy(to, from, sizeof(*to)); | 17 | } |
18 | else | 18 | |
19 | /* _sigchld is currently the largest know union member */ | 19 | static inline void clear_siginfo(struct siginfo *info) |
20 | memcpy(to, from, __ARCH_SI_PREAMBLE_SIZE + sizeof(from->_sifields._sigchld)); | 20 | { |
21 | memset(info, 0, sizeof(*info)); | ||
21 | } | 22 | } |
22 | 23 | ||
23 | int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); | 24 | int copy_siginfo_to_user(struct siginfo __user *to, const struct siginfo *from); |
@@ -29,9 +30,7 @@ enum siginfo_layout { | |||
29 | SIL_FAULT, | 30 | SIL_FAULT, |
30 | SIL_CHLD, | 31 | SIL_CHLD, |
31 | SIL_RT, | 32 | SIL_RT, |
32 | #ifdef __ARCH_SIGSYS | ||
33 | SIL_SYS, | 33 | SIL_SYS, |
34 | #endif | ||
35 | }; | 34 | }; |
36 | 35 | ||
37 | enum siginfo_layout siginfo_layout(int sig, int si_code); | 36 | enum siginfo_layout siginfo_layout(int sig, int si_code); |
diff --git a/include/linux/siox.h b/include/linux/siox.h new file mode 100644 index 000000000000..d79624e83134 --- /dev/null +++ b/include/linux/siox.h | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2015 Pengutronix, Uwe Kleine-König <kernel@pengutronix.de> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify it under | ||
5 | * the terms of the GNU General Public License version 2 as published by the | ||
6 | * Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/device.h> | ||
10 | |||
11 | #define to_siox_device(_dev) container_of((_dev), struct siox_device, dev) | ||
12 | struct siox_device { | ||
13 | struct list_head node; /* node in smaster->devices */ | ||
14 | struct siox_master *smaster; | ||
15 | struct device dev; | ||
16 | |||
17 | const char *type; | ||
18 | size_t inbytes; | ||
19 | size_t outbytes; | ||
20 | u8 statustype; | ||
21 | |||
22 | u8 status_read_clean; | ||
23 | u8 status_written; | ||
24 | u8 status_written_lastcycle; | ||
25 | bool connected; | ||
26 | |||
27 | /* statistics */ | ||
28 | unsigned int watchdog_errors; | ||
29 | unsigned int status_errors; | ||
30 | |||
31 | struct kernfs_node *status_errors_kn; | ||
32 | struct kernfs_node *watchdog_kn; | ||
33 | struct kernfs_node *watchdog_errors_kn; | ||
34 | struct kernfs_node *connected_kn; | ||
35 | }; | ||
36 | |||
37 | bool siox_device_synced(struct siox_device *sdevice); | ||
38 | bool siox_device_connected(struct siox_device *sdevice); | ||
39 | |||
40 | struct siox_driver { | ||
41 | int (*probe)(struct siox_device *sdevice); | ||
42 | int (*remove)(struct siox_device *sdevice); | ||
43 | void (*shutdown)(struct siox_device *sdevice); | ||
44 | |||
45 | /* | ||
46 | * buf is big enough to hold sdev->inbytes - 1 bytes, the status byte | ||
47 | * is in the scope of the framework. | ||
48 | */ | ||
49 | int (*set_data)(struct siox_device *sdevice, u8 status, u8 buf[]); | ||
50 | /* | ||
51 | * buf is big enough to hold sdev->outbytes - 1 bytes, the status byte | ||
52 | * is in the scope of the framework | ||
53 | */ | ||
54 | int (*get_data)(struct siox_device *sdevice, const u8 buf[]); | ||
55 | |||
56 | struct device_driver driver; | ||
57 | }; | ||
58 | |||
59 | static inline struct siox_driver *to_siox_driver(struct device_driver *driver) | ||
60 | { | ||
61 | if (driver) | ||
62 | return container_of(driver, struct siox_driver, driver); | ||
63 | else | ||
64 | return NULL; | ||
65 | } | ||
66 | |||
67 | int __siox_driver_register(struct siox_driver *sdriver, struct module *owner); | ||
68 | |||
69 | static inline int siox_driver_register(struct siox_driver *sdriver) | ||
70 | { | ||
71 | return __siox_driver_register(sdriver, THIS_MODULE); | ||
72 | } | ||
73 | |||
74 | static inline void siox_driver_unregister(struct siox_driver *sdriver) | ||
75 | { | ||
76 | return driver_unregister(&sdriver->driver); | ||
77 | } | ||
diff --git a/include/linux/skb_array.h b/include/linux/skb_array.h index 8621ffdeecbf..a6b6e8bb3d7b 100644 --- a/include/linux/skb_array.h +++ b/include/linux/skb_array.h | |||
@@ -69,7 +69,12 @@ static inline int skb_array_produce_any(struct skb_array *a, struct sk_buff *skb | |||
69 | */ | 69 | */ |
70 | static inline bool __skb_array_empty(struct skb_array *a) | 70 | static inline bool __skb_array_empty(struct skb_array *a) |
71 | { | 71 | { |
72 | return !__ptr_ring_peek(&a->ring); | 72 | return __ptr_ring_empty(&a->ring); |
73 | } | ||
74 | |||
75 | static inline struct sk_buff *__skb_array_peek(struct skb_array *a) | ||
76 | { | ||
77 | return __ptr_ring_peek(&a->ring); | ||
73 | } | 78 | } |
74 | 79 | ||
75 | static inline bool skb_array_empty(struct skb_array *a) | 80 | static inline bool skb_array_empty(struct skb_array *a) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index a38c80e9f91e..5ebc0f869720 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
@@ -1211,6 +1211,11 @@ static inline bool skb_flow_dissect_flow_keys_buf(struct flow_keys *flow, | |||
1211 | data, proto, nhoff, hlen, flags); | 1211 | data, proto, nhoff, hlen, flags); |
1212 | } | 1212 | } |
1213 | 1213 | ||
1214 | void | ||
1215 | skb_flow_dissect_tunnel_info(const struct sk_buff *skb, | ||
1216 | struct flow_dissector *flow_dissector, | ||
1217 | void *target_container); | ||
1218 | |||
1214 | static inline __u32 skb_get_hash(struct sk_buff *skb) | 1219 | static inline __u32 skb_get_hash(struct sk_buff *skb) |
1215 | { | 1220 | { |
1216 | if (!skb->l4_hash && !skb->sw_hash) | 1221 | if (!skb->l4_hash && !skb->sw_hash) |
@@ -3241,7 +3246,7 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags, | |||
3241 | int *peeked, int *off, int *err); | 3246 | int *peeked, int *off, int *err); |
3242 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, | 3247 | struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock, |
3243 | int *err); | 3248 | int *err); |
3244 | unsigned int datagram_poll(struct file *file, struct socket *sock, | 3249 | __poll_t datagram_poll(struct file *file, struct socket *sock, |
3245 | struct poll_table_struct *wait); | 3250 | struct poll_table_struct *wait); |
3246 | int skb_copy_datagram_iter(const struct sk_buff *from, int offset, | 3251 | int skb_copy_datagram_iter(const struct sk_buff *from, int offset, |
3247 | struct iov_iter *to, int size); | 3252 | struct iov_iter *to, int size); |
@@ -3282,6 +3287,7 @@ int skb_shift(struct sk_buff *tgt, struct sk_buff *skb, int shiftlen); | |||
3282 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); | 3287 | void skb_scrub_packet(struct sk_buff *skb, bool xnet); |
3283 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); | 3288 | unsigned int skb_gso_transport_seglen(const struct sk_buff *skb); |
3284 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); | 3289 | bool skb_gso_validate_mtu(const struct sk_buff *skb, unsigned int mtu); |
3290 | bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len); | ||
3285 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); | 3291 | struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features); |
3286 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); | 3292 | struct sk_buff *skb_vlan_untag(struct sk_buff *skb); |
3287 | int skb_ensure_writable(struct sk_buff *skb, int write_len); | 3293 | int skb_ensure_writable(struct sk_buff *skb, int write_len); |
@@ -4115,6 +4121,21 @@ static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb) | |||
4115 | return hdr_len + skb_gso_transport_seglen(skb); | 4121 | return hdr_len + skb_gso_transport_seglen(skb); |
4116 | } | 4122 | } |
4117 | 4123 | ||
4124 | /** | ||
4125 | * skb_gso_mac_seglen - Return length of individual segments of a gso packet | ||
4126 | * | ||
4127 | * @skb: GSO skb | ||
4128 | * | ||
4129 | * skb_gso_mac_seglen is used to determine the real size of the | ||
4130 | * individual segments, including MAC/L2, Layer3 (IP, IPv6) and L4 | ||
4131 | * headers (TCP/UDP). | ||
4132 | */ | ||
4133 | static inline unsigned int skb_gso_mac_seglen(const struct sk_buff *skb) | ||
4134 | { | ||
4135 | unsigned int hdr_len = skb_transport_header(skb) - skb_mac_header(skb); | ||
4136 | return hdr_len + skb_gso_transport_seglen(skb); | ||
4137 | } | ||
4138 | |||
4118 | /* Local Checksum Offload. | 4139 | /* Local Checksum Offload. |
4119 | * Compute outer checksum based on the assumption that the | 4140 | * Compute outer checksum based on the assumption that the |
4120 | * inner checksum will be offloaded later. | 4141 | * inner checksum will be offloaded later. |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 50697a1d6621..231abc8976c5 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
@@ -135,9 +135,15 @@ struct mem_cgroup; | |||
135 | void __init kmem_cache_init(void); | 135 | void __init kmem_cache_init(void); |
136 | bool slab_is_available(void); | 136 | bool slab_is_available(void); |
137 | 137 | ||
138 | struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, | 138 | extern bool usercopy_fallback; |
139 | slab_flags_t, | 139 | |
140 | void (*)(void *)); | 140 | struct kmem_cache *kmem_cache_create(const char *name, size_t size, |
141 | size_t align, slab_flags_t flags, | ||
142 | void (*ctor)(void *)); | ||
143 | struct kmem_cache *kmem_cache_create_usercopy(const char *name, | ||
144 | size_t size, size_t align, slab_flags_t flags, | ||
145 | size_t useroffset, size_t usersize, | ||
146 | void (*ctor)(void *)); | ||
141 | void kmem_cache_destroy(struct kmem_cache *); | 147 | void kmem_cache_destroy(struct kmem_cache *); |
142 | int kmem_cache_shrink(struct kmem_cache *); | 148 | int kmem_cache_shrink(struct kmem_cache *); |
143 | 149 | ||
@@ -153,9 +159,20 @@ void memcg_destroy_kmem_caches(struct mem_cgroup *); | |||
153 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration | 159 | * f.e. add ____cacheline_aligned_in_smp to the struct declaration |
154 | * then the objects will be properly aligned in SMP configurations. | 160 | * then the objects will be properly aligned in SMP configurations. |
155 | */ | 161 | */ |
156 | #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\ | 162 | #define KMEM_CACHE(__struct, __flags) \ |
157 | sizeof(struct __struct), __alignof__(struct __struct),\ | 163 | kmem_cache_create(#__struct, sizeof(struct __struct), \ |
158 | (__flags), NULL) | 164 | __alignof__(struct __struct), (__flags), NULL) |
165 | |||
166 | /* | ||
167 | * To whitelist a single field for copying to/from usercopy, use this | ||
168 | * macro instead for KMEM_CACHE() above. | ||
169 | */ | ||
170 | #define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \ | ||
171 | kmem_cache_create_usercopy(#__struct, \ | ||
172 | sizeof(struct __struct), \ | ||
173 | __alignof__(struct __struct), (__flags), \ | ||
174 | offsetof(struct __struct, __field), \ | ||
175 | sizeof_field(struct __struct, __field), NULL) | ||
159 | 176 | ||
160 | /* | 177 | /* |
161 | * Common kmalloc functions provided by all allocators | 178 | * Common kmalloc functions provided by all allocators |
@@ -167,15 +184,11 @@ void kzfree(const void *); | |||
167 | size_t ksize(const void *); | 184 | size_t ksize(const void *); |
168 | 185 | ||
169 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR | 186 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
170 | const char *__check_heap_object(const void *ptr, unsigned long n, | 187 | void __check_heap_object(const void *ptr, unsigned long n, struct page *page, |
171 | struct page *page); | 188 | bool to_user); |
172 | #else | 189 | #else |
173 | static inline const char *__check_heap_object(const void *ptr, | 190 | static inline void __check_heap_object(const void *ptr, unsigned long n, |
174 | unsigned long n, | 191 | struct page *page, bool to_user) { } |
175 | struct page *page) | ||
176 | { | ||
177 | return NULL; | ||
178 | } | ||
179 | #endif | 192 | #endif |
180 | 193 | ||
181 | /* | 194 | /* |
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h index 072e46e9e1d5..7385547c04b1 100644 --- a/include/linux/slab_def.h +++ b/include/linux/slab_def.h | |||
@@ -85,6 +85,9 @@ struct kmem_cache { | |||
85 | unsigned int *random_seq; | 85 | unsigned int *random_seq; |
86 | #endif | 86 | #endif |
87 | 87 | ||
88 | size_t useroffset; /* Usercopy region offset */ | ||
89 | size_t usersize; /* Usercopy region size */ | ||
90 | |||
88 | struct kmem_cache_node *node[MAX_NUMNODES]; | 91 | struct kmem_cache_node *node[MAX_NUMNODES]; |
89 | }; | 92 | }; |
90 | 93 | ||
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h new file mode 100644 index 000000000000..c36cf121d2cd --- /dev/null +++ b/include/linux/slimbus.h | |||
@@ -0,0 +1,164 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2011-2017, The Linux Foundation | ||
4 | */ | ||
5 | |||
6 | #ifndef _LINUX_SLIMBUS_H | ||
7 | #define _LINUX_SLIMBUS_H | ||
8 | #include <linux/device.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/completion.h> | ||
11 | #include <linux/mod_devicetable.h> | ||
12 | |||
13 | extern struct bus_type slimbus_bus; | ||
14 | |||
15 | /** | ||
16 | * struct slim_eaddr - Enumeration address for a SLIMbus device | ||
17 | * @manf_id: Manufacturer Id for the device | ||
18 | * @prod_code: Product code | ||
19 | * @dev_index: Device index | ||
20 | * @instance: Instance value | ||
21 | */ | ||
22 | struct slim_eaddr { | ||
23 | u16 manf_id; | ||
24 | u16 prod_code; | ||
25 | u8 dev_index; | ||
26 | u8 instance; | ||
27 | } __packed; | ||
28 | |||
29 | /** | ||
30 | * enum slim_device_status - slim device status | ||
31 | * @SLIM_DEVICE_STATUS_DOWN: Slim device is absent or not reported yet. | ||
32 | * @SLIM_DEVICE_STATUS_UP: Slim device is announced on the bus. | ||
33 | * @SLIM_DEVICE_STATUS_RESERVED: Reserved for future use. | ||
34 | */ | ||
35 | enum slim_device_status { | ||
36 | SLIM_DEVICE_STATUS_DOWN = 0, | ||
37 | SLIM_DEVICE_STATUS_UP, | ||
38 | SLIM_DEVICE_STATUS_RESERVED, | ||
39 | }; | ||
40 | |||
41 | struct slim_controller; | ||
42 | |||
43 | /** | ||
44 | * struct slim_device - Slim device handle. | ||
45 | * @dev: Driver model representation of the device. | ||
46 | * @e_addr: Enumeration address of this device. | ||
47 | * @status: slim device status | ||
48 | * @ctrl: slim controller instance. | ||
49 | * @laddr: 1-byte Logical address of this device. | ||
50 | * @is_laddr_valid: indicates if the laddr is valid or not | ||
51 | * | ||
52 | * This is the client/device handle returned when a SLIMbus | ||
53 | * device is registered with a controller. | ||
54 | * Pointer to this structure is used by client-driver as a handle. | ||
55 | */ | ||
56 | struct slim_device { | ||
57 | struct device dev; | ||
58 | struct slim_eaddr e_addr; | ||
59 | struct slim_controller *ctrl; | ||
60 | enum slim_device_status status; | ||
61 | u8 laddr; | ||
62 | bool is_laddr_valid; | ||
63 | }; | ||
64 | |||
65 | #define to_slim_device(d) container_of(d, struct slim_device, dev) | ||
66 | |||
67 | /** | ||
68 | * struct slim_driver - SLIMbus 'generic device' (slave) device driver | ||
69 | * (similar to 'spi_device' on SPI) | ||
70 | * @probe: Binds this driver to a SLIMbus device. | ||
71 | * @remove: Unbinds this driver from the SLIMbus device. | ||
72 | * @shutdown: Standard shutdown callback used during powerdown/halt. | ||
73 | * @device_status: This callback is called when | ||
74 | * - The device reports present and gets a laddr assigned | ||
75 | * - The device reports absent, or the bus goes down. | ||
76 | * @driver: SLIMbus device drivers should initialize name and owner field of | ||
77 | * this structure | ||
78 | * @id_table: List of SLIMbus devices supported by this driver | ||
79 | */ | ||
80 | |||
81 | struct slim_driver { | ||
82 | int (*probe)(struct slim_device *sl); | ||
83 | void (*remove)(struct slim_device *sl); | ||
84 | void (*shutdown)(struct slim_device *sl); | ||
85 | int (*device_status)(struct slim_device *sl, | ||
86 | enum slim_device_status s); | ||
87 | struct device_driver driver; | ||
88 | const struct slim_device_id *id_table; | ||
89 | }; | ||
90 | #define to_slim_driver(d) container_of(d, struct slim_driver, driver) | ||
91 | |||
92 | /** | ||
93 | * struct slim_val_inf - Slimbus value or information element | ||
94 | * @start_offset: Specifies starting offset in information/value element map | ||
95 | * @rbuf: buffer to read the values | ||
96 | * @wbuf: buffer to write | ||
97 | * @num_bytes: upto 16. This ensures that the message will fit the slicesize | ||
98 | * per SLIMbus spec | ||
99 | * @comp: completion for asynchronous operations, valid only if TID is | ||
100 | * required for transaction, like REQUEST operations. | ||
101 | * Rest of the transactions are synchronous anyway. | ||
102 | */ | ||
103 | struct slim_val_inf { | ||
104 | u16 start_offset; | ||
105 | u8 num_bytes; | ||
106 | u8 *rbuf; | ||
107 | const u8 *wbuf; | ||
108 | struct completion *comp; | ||
109 | }; | ||
110 | |||
111 | /* | ||
112 | * use a macro to avoid include chaining to get THIS_MODULE | ||
113 | */ | ||
114 | #define slim_driver_register(drv) \ | ||
115 | __slim_driver_register(drv, THIS_MODULE) | ||
116 | int __slim_driver_register(struct slim_driver *drv, struct module *owner); | ||
117 | void slim_driver_unregister(struct slim_driver *drv); | ||
118 | |||
119 | /** | ||
120 | * module_slim_driver() - Helper macro for registering a SLIMbus driver | ||
121 | * @__slim_driver: slimbus_driver struct | ||
122 | * | ||
123 | * Helper macro for SLIMbus drivers which do not do anything special in module | ||
124 | * init/exit. This eliminates a lot of boilerplate. Each module may only | ||
125 | * use this macro once, and calling it replaces module_init() and module_exit() | ||
126 | */ | ||
127 | #define module_slim_driver(__slim_driver) \ | ||
128 | module_driver(__slim_driver, slim_driver_register, \ | ||
129 | slim_driver_unregister) | ||
130 | |||
131 | static inline void *slim_get_devicedata(const struct slim_device *dev) | ||
132 | { | ||
133 | return dev_get_drvdata(&dev->dev); | ||
134 | } | ||
135 | |||
136 | static inline void slim_set_devicedata(struct slim_device *dev, void *data) | ||
137 | { | ||
138 | dev_set_drvdata(&dev->dev, data); | ||
139 | } | ||
140 | |||
141 | struct slim_device *slim_get_device(struct slim_controller *ctrl, | ||
142 | struct slim_eaddr *e_addr); | ||
143 | int slim_get_logical_addr(struct slim_device *sbdev); | ||
144 | |||
145 | /* Information Element management messages */ | ||
146 | #define SLIM_MSG_MC_REQUEST_INFORMATION 0x20 | ||
147 | #define SLIM_MSG_MC_REQUEST_CLEAR_INFORMATION 0x21 | ||
148 | #define SLIM_MSG_MC_REPLY_INFORMATION 0x24 | ||
149 | #define SLIM_MSG_MC_CLEAR_INFORMATION 0x28 | ||
150 | #define SLIM_MSG_MC_REPORT_INFORMATION 0x29 | ||
151 | |||
152 | /* Value Element management messages */ | ||
153 | #define SLIM_MSG_MC_REQUEST_VALUE 0x60 | ||
154 | #define SLIM_MSG_MC_REQUEST_CHANGE_VALUE 0x61 | ||
155 | #define SLIM_MSG_MC_REPLY_VALUE 0x64 | ||
156 | #define SLIM_MSG_MC_CHANGE_VALUE 0x68 | ||
157 | |||
158 | int slim_xfer_msg(struct slim_device *sbdev, struct slim_val_inf *msg, | ||
159 | u8 mc); | ||
160 | int slim_readb(struct slim_device *sdev, u32 addr); | ||
161 | int slim_writeb(struct slim_device *sdev, u32 addr, u8 value); | ||
162 | int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val); | ||
163 | int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val); | ||
164 | #endif /* _LINUX_SLIMBUS_H */ | ||
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 0adae162dc8f..8ad99c47b19c 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
@@ -135,6 +135,9 @@ struct kmem_cache { | |||
135 | struct kasan_cache kasan_info; | 135 | struct kasan_cache kasan_info; |
136 | #endif | 136 | #endif |
137 | 137 | ||
138 | size_t useroffset; /* Usercopy region offset */ | ||
139 | size_t usersize; /* Usercopy region size */ | ||
140 | |||
138 | struct kmem_cache_node *node[MAX_NUMNODES]; | 141 | struct kmem_cache_node *node[MAX_NUMNODES]; |
139 | }; | 142 | }; |
140 | 143 | ||
diff --git a/include/linux/soc/brcmstb/brcmstb.h b/include/linux/soc/brcmstb/brcmstb.h index 12e548938bbb..8e884e0dda0a 100644 --- a/include/linux/soc/brcmstb/brcmstb.h +++ b/include/linux/soc/brcmstb/brcmstb.h | |||
@@ -13,12 +13,6 @@ static inline u32 BRCM_REV(u32 reg) | |||
13 | } | 13 | } |
14 | 14 | ||
15 | /* | 15 | /* |
16 | * Bus Interface Unit control register setup, must happen early during boot, | ||
17 | * before SMP is brought up, called by machine entry point. | ||
18 | */ | ||
19 | void brcmstb_biuctrl_init(void); | ||
20 | |||
21 | /* | ||
22 | * Helper functions for getting family or product id from the | 16 | * Helper functions for getting family or product id from the |
23 | * SoC driver. | 17 | * SoC driver. |
24 | */ | 18 | */ |
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h index e8d9f0d52933..b0a507d356ef 100644 --- a/include/linux/soc/mediatek/infracfg.h +++ b/include/linux/soc/mediatek/infracfg.h | |||
@@ -28,7 +28,8 @@ | |||
28 | #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ | 28 | #define MT7622_TOP_AXI_PROT_EN_WB (BIT(2) | BIT(6) | \ |
29 | BIT(7) | BIT(8)) | 29 | BIT(7) | BIT(8)) |
30 | 30 | ||
31 | int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask); | 31 | int mtk_infracfg_set_bus_protection(struct regmap *infracfg, u32 mask, |
32 | int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask); | 32 | bool reg_update); |
33 | 33 | int mtk_infracfg_clear_bus_protection(struct regmap *infracfg, u32 mask, | |
34 | bool reg_update); | ||
34 | #endif /* __SOC_MEDIATEK_INFRACFG_H */ | 35 | #endif /* __SOC_MEDIATEK_INFRACFG_H */ |
diff --git a/include/linux/soc/qcom/qmi.h b/include/linux/soc/qcom/qmi.h new file mode 100644 index 000000000000..f4de33654a60 --- /dev/null +++ b/include/linux/soc/qcom/qmi.h | |||
@@ -0,0 +1,271 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. | ||
4 | * Copyright (c) 2017, Linaro Ltd. | ||
5 | */ | ||
6 | #ifndef __QMI_HELPERS_H__ | ||
7 | #define __QMI_HELPERS_H__ | ||
8 | |||
9 | #include <linux/completion.h> | ||
10 | #include <linux/idr.h> | ||
11 | #include <linux/list.h> | ||
12 | #include <linux/qrtr.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/workqueue.h> | ||
15 | |||
16 | struct socket; | ||
17 | |||
18 | /** | ||
19 | * qmi_header - wireformat header of QMI messages | ||
20 | * @type: type of message | ||
21 | * @txn_id: transaction id | ||
22 | * @msg_id: message id | ||
23 | * @msg_len: length of message payload following header | ||
24 | */ | ||
25 | struct qmi_header { | ||
26 | u8 type; | ||
27 | u16 txn_id; | ||
28 | u16 msg_id; | ||
29 | u16 msg_len; | ||
30 | } __packed; | ||
31 | |||
32 | #define QMI_REQUEST 0 | ||
33 | #define QMI_RESPONSE 2 | ||
34 | #define QMI_INDICATION 4 | ||
35 | |||
36 | #define QMI_COMMON_TLV_TYPE 0 | ||
37 | |||
38 | enum qmi_elem_type { | ||
39 | QMI_EOTI, | ||
40 | QMI_OPT_FLAG, | ||
41 | QMI_DATA_LEN, | ||
42 | QMI_UNSIGNED_1_BYTE, | ||
43 | QMI_UNSIGNED_2_BYTE, | ||
44 | QMI_UNSIGNED_4_BYTE, | ||
45 | QMI_UNSIGNED_8_BYTE, | ||
46 | QMI_SIGNED_2_BYTE_ENUM, | ||
47 | QMI_SIGNED_4_BYTE_ENUM, | ||
48 | QMI_STRUCT, | ||
49 | QMI_STRING, | ||
50 | }; | ||
51 | |||
52 | enum qmi_array_type { | ||
53 | NO_ARRAY, | ||
54 | STATIC_ARRAY, | ||
55 | VAR_LEN_ARRAY, | ||
56 | }; | ||
57 | |||
58 | /** | ||
59 | * struct qmi_elem_info - describes how to encode a single QMI element | ||
60 | * @data_type: Data type of this element. | ||
61 | * @elem_len: Array length of this element, if an array. | ||
62 | * @elem_size: Size of a single instance of this data type. | ||
63 | * @array_type: Array type of this element. | ||
64 | * @tlv_type: QMI message specific type to identify which element | ||
65 | * is present in an incoming message. | ||
66 | * @offset: Specifies the offset of the first instance of this | ||
67 | * element in the data structure. | ||
68 | * @ei_array: Null-terminated array of @qmi_elem_info to describe nested | ||
69 | * structures. | ||
70 | */ | ||
71 | struct qmi_elem_info { | ||
72 | enum qmi_elem_type data_type; | ||
73 | u32 elem_len; | ||
74 | u32 elem_size; | ||
75 | enum qmi_array_type array_type; | ||
76 | u8 tlv_type; | ||
77 | u32 offset; | ||
78 | struct qmi_elem_info *ei_array; | ||
79 | }; | ||
80 | |||
81 | #define QMI_RESULT_SUCCESS_V01 0 | ||
82 | #define QMI_RESULT_FAILURE_V01 1 | ||
83 | |||
84 | #define QMI_ERR_NONE_V01 0 | ||
85 | #define QMI_ERR_MALFORMED_MSG_V01 1 | ||
86 | #define QMI_ERR_NO_MEMORY_V01 2 | ||
87 | #define QMI_ERR_INTERNAL_V01 3 | ||
88 | #define QMI_ERR_CLIENT_IDS_EXHAUSTED_V01 5 | ||
89 | #define QMI_ERR_INVALID_ID_V01 41 | ||
90 | #define QMI_ERR_ENCODING_V01 58 | ||
91 | #define QMI_ERR_INCOMPATIBLE_STATE_V01 90 | ||
92 | #define QMI_ERR_NOT_SUPPORTED_V01 94 | ||
93 | |||
94 | /** | ||
95 | * qmi_response_type_v01 - common response header (decoded) | ||
96 | * @result: result of the transaction | ||
97 | * @error: error value, when @result is QMI_RESULT_FAILURE_V01 | ||
98 | */ | ||
99 | struct qmi_response_type_v01 { | ||
100 | u16 result; | ||
101 | u16 error; | ||
102 | }; | ||
103 | |||
104 | extern struct qmi_elem_info qmi_response_type_v01_ei[]; | ||
105 | |||
106 | /** | ||
107 | * struct qmi_service - context to track lookup-results | ||
108 | * @service: service type | ||
109 | * @version: version of the @service | ||
110 | * @instance: instance id of the @service | ||
111 | * @node: node of the service | ||
112 | * @port: port of the service | ||
113 | * @priv: handle for client's use | ||
114 | * @list_node: list_head for house keeping | ||
115 | */ | ||
116 | struct qmi_service { | ||
117 | unsigned int service; | ||
118 | unsigned int version; | ||
119 | unsigned int instance; | ||
120 | |||
121 | unsigned int node; | ||
122 | unsigned int port; | ||
123 | |||
124 | void *priv; | ||
125 | struct list_head list_node; | ||
126 | }; | ||
127 | |||
128 | struct qmi_handle; | ||
129 | |||
130 | /** | ||
131 | * struct qmi_ops - callbacks for qmi_handle | ||
132 | * @new_server: inform client of a new_server lookup-result, returning | ||
133 | * successfully from this call causes the library to call | ||
134 | * @del_server as the service is removed from the | ||
135 | * lookup-result. @priv of the qmi_service can be used by | ||
136 | * the client | ||
137 | * @del_server: inform client of a del_server lookup-result | ||
138 | * @net_reset: inform client that the name service was restarted and | ||
139 | * that and any state needs to be released | ||
140 | * @msg_handler: invoked for incoming messages, allows a client to | ||
141 | * override the usual QMI message handler | ||
142 | * @bye: inform a client that all clients from a node are gone | ||
143 | * @del_client: inform a client that a particular client is gone | ||
144 | */ | ||
145 | struct qmi_ops { | ||
146 | int (*new_server)(struct qmi_handle *qmi, struct qmi_service *svc); | ||
147 | void (*del_server)(struct qmi_handle *qmi, struct qmi_service *svc); | ||
148 | void (*net_reset)(struct qmi_handle *qmi); | ||
149 | void (*msg_handler)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
150 | const void *data, size_t count); | ||
151 | void (*bye)(struct qmi_handle *qmi, unsigned int node); | ||
152 | void (*del_client)(struct qmi_handle *qmi, | ||
153 | unsigned int node, unsigned int port); | ||
154 | }; | ||
155 | |||
156 | /** | ||
157 | * struct qmi_txn - transaction context | ||
158 | * @qmi: QMI handle this transaction is associated with | ||
159 | * @id: transaction id | ||
160 | * @lock: for synchronization between handler and waiter of messages | ||
161 | * @completion: completion object as the transaction receives a response | ||
162 | * @result: result code for the completed transaction | ||
163 | * @ei: description of the QMI encoded response (optional) | ||
164 | * @dest: destination buffer to decode message into (optional) | ||
165 | */ | ||
166 | struct qmi_txn { | ||
167 | struct qmi_handle *qmi; | ||
168 | |||
169 | int id; | ||
170 | |||
171 | struct mutex lock; | ||
172 | struct completion completion; | ||
173 | int result; | ||
174 | |||
175 | struct qmi_elem_info *ei; | ||
176 | void *dest; | ||
177 | }; | ||
178 | |||
179 | /** | ||
180 | * struct qmi_msg_handler - description of QMI message handler | ||
181 | * @type: type of message | ||
182 | * @msg_id: message id | ||
183 | * @ei: description of the QMI encoded message | ||
184 | * @decoded_size: size of the decoded object | ||
185 | * @fn: function to invoke as the message is decoded | ||
186 | */ | ||
187 | struct qmi_msg_handler { | ||
188 | unsigned int type; | ||
189 | unsigned int msg_id; | ||
190 | |||
191 | struct qmi_elem_info *ei; | ||
192 | |||
193 | size_t decoded_size; | ||
194 | void (*fn)(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
195 | struct qmi_txn *txn, const void *decoded); | ||
196 | }; | ||
197 | |||
198 | /** | ||
199 | * struct qmi_handle - QMI context | ||
200 | * @sock: socket handle | ||
201 | * @sock_lock: synchronization of @sock modifications | ||
202 | * @sq: sockaddr of @sock | ||
203 | * @work: work for handling incoming messages | ||
204 | * @wq: workqueue to post @work on | ||
205 | * @recv_buf: scratch buffer for handling incoming messages | ||
206 | * @recv_buf_size: size of @recv_buf | ||
207 | * @lookups: list of registered lookup requests | ||
208 | * @lookup_results: list of lookup-results advertised to the client | ||
209 | * @services: list of registered services (by this client) | ||
210 | * @ops: reference to callbacks | ||
211 | * @txns: outstanding transactions | ||
212 | * @txn_lock: lock for modifications of @txns | ||
213 | * @handlers: list of handlers for incoming messages | ||
214 | */ | ||
215 | struct qmi_handle { | ||
216 | struct socket *sock; | ||
217 | struct mutex sock_lock; | ||
218 | |||
219 | struct sockaddr_qrtr sq; | ||
220 | |||
221 | struct work_struct work; | ||
222 | struct workqueue_struct *wq; | ||
223 | |||
224 | void *recv_buf; | ||
225 | size_t recv_buf_size; | ||
226 | |||
227 | struct list_head lookups; | ||
228 | struct list_head lookup_results; | ||
229 | struct list_head services; | ||
230 | |||
231 | struct qmi_ops ops; | ||
232 | |||
233 | struct idr txns; | ||
234 | struct mutex txn_lock; | ||
235 | |||
236 | const struct qmi_msg_handler *handlers; | ||
237 | }; | ||
238 | |||
239 | int qmi_add_lookup(struct qmi_handle *qmi, unsigned int service, | ||
240 | unsigned int version, unsigned int instance); | ||
241 | int qmi_add_server(struct qmi_handle *qmi, unsigned int service, | ||
242 | unsigned int version, unsigned int instance); | ||
243 | |||
244 | int qmi_handle_init(struct qmi_handle *qmi, size_t max_msg_len, | ||
245 | const struct qmi_ops *ops, | ||
246 | const struct qmi_msg_handler *handlers); | ||
247 | void qmi_handle_release(struct qmi_handle *qmi); | ||
248 | |||
249 | ssize_t qmi_send_request(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
250 | struct qmi_txn *txn, int msg_id, size_t len, | ||
251 | struct qmi_elem_info *ei, const void *c_struct); | ||
252 | ssize_t qmi_send_response(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
253 | struct qmi_txn *txn, int msg_id, size_t len, | ||
254 | struct qmi_elem_info *ei, const void *c_struct); | ||
255 | ssize_t qmi_send_indication(struct qmi_handle *qmi, struct sockaddr_qrtr *sq, | ||
256 | int msg_id, size_t len, struct qmi_elem_info *ei, | ||
257 | const void *c_struct); | ||
258 | |||
259 | void *qmi_encode_message(int type, unsigned int msg_id, size_t *len, | ||
260 | unsigned int txn_id, struct qmi_elem_info *ei, | ||
261 | const void *c_struct); | ||
262 | |||
263 | int qmi_decode_message(const void *buf, size_t len, | ||
264 | struct qmi_elem_info *ei, void *c_struct); | ||
265 | |||
266 | int qmi_txn_init(struct qmi_handle *qmi, struct qmi_txn *txn, | ||
267 | struct qmi_elem_info *ei, void *c_struct); | ||
268 | int qmi_txn_wait(struct qmi_txn *txn, unsigned long timeout); | ||
269 | void qmi_txn_cancel(struct qmi_txn *txn); | ||
270 | |||
271 | #endif | ||
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h new file mode 100644 index 000000000000..e91fdcf41049 --- /dev/null +++ b/include/linux/soundwire/sdw.h | |||
@@ -0,0 +1,479 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) | ||
2 | // Copyright(c) 2015-17 Intel Corporation. | ||
3 | |||
4 | #ifndef __SOUNDWIRE_H | ||
5 | #define __SOUNDWIRE_H | ||
6 | |||
7 | struct sdw_bus; | ||
8 | struct sdw_slave; | ||
9 | |||
10 | /* SDW spec defines and enums, as defined by MIPI 1.1. Spec */ | ||
11 | |||
12 | /* SDW Broadcast Device Number */ | ||
13 | #define SDW_BROADCAST_DEV_NUM 15 | ||
14 | |||
15 | /* SDW Enumeration Device Number */ | ||
16 | #define SDW_ENUM_DEV_NUM 0 | ||
17 | |||
18 | /* SDW Group Device Numbers */ | ||
19 | #define SDW_GROUP12_DEV_NUM 12 | ||
20 | #define SDW_GROUP13_DEV_NUM 13 | ||
21 | |||
22 | /* SDW Master Device Number, not supported yet */ | ||
23 | #define SDW_MASTER_DEV_NUM 14 | ||
24 | |||
25 | #define SDW_NUM_DEV_ID_REGISTERS 6 | ||
26 | |||
27 | #define SDW_MAX_DEVICES 11 | ||
28 | |||
29 | /** | ||
30 | * enum sdw_slave_status - Slave status | ||
31 | * @SDW_SLAVE_UNATTACHED: Slave is not attached with the bus. | ||
32 | * @SDW_SLAVE_ATTACHED: Slave is attached with bus. | ||
33 | * @SDW_SLAVE_ALERT: Some alert condition on the Slave | ||
34 | * @SDW_SLAVE_RESERVED: Reserved for future use | ||
35 | */ | ||
36 | enum sdw_slave_status { | ||
37 | SDW_SLAVE_UNATTACHED = 0, | ||
38 | SDW_SLAVE_ATTACHED = 1, | ||
39 | SDW_SLAVE_ALERT = 2, | ||
40 | SDW_SLAVE_RESERVED = 3, | ||
41 | }; | ||
42 | |||
43 | /** | ||
44 | * enum sdw_command_response - Command response as defined by SDW spec | ||
45 | * @SDW_CMD_OK: cmd was successful | ||
46 | * @SDW_CMD_IGNORED: cmd was ignored | ||
47 | * @SDW_CMD_FAIL: cmd was NACKed | ||
48 | * @SDW_CMD_TIMEOUT: cmd timedout | ||
49 | * @SDW_CMD_FAIL_OTHER: cmd failed due to other reason than above | ||
50 | * | ||
51 | * NOTE: The enum is different than actual Spec as response in the Spec is | ||
52 | * combination of ACK/NAK bits | ||
53 | * | ||
54 | * SDW_CMD_TIMEOUT/FAIL_OTHER is defined for SW use, not in spec | ||
55 | */ | ||
56 | enum sdw_command_response { | ||
57 | SDW_CMD_OK = 0, | ||
58 | SDW_CMD_IGNORED = 1, | ||
59 | SDW_CMD_FAIL = 2, | ||
60 | SDW_CMD_TIMEOUT = 3, | ||
61 | SDW_CMD_FAIL_OTHER = 4, | ||
62 | }; | ||
63 | |||
64 | /* | ||
65 | * SDW properties, defined in MIPI DisCo spec v1.0 | ||
66 | */ | ||
67 | enum sdw_clk_stop_reset_behave { | ||
68 | SDW_CLK_STOP_KEEP_STATUS = 1, | ||
69 | }; | ||
70 | |||
71 | /** | ||
72 | * enum sdw_p15_behave - Slave Port 15 behaviour when the Master attempts a | ||
73 | * read | ||
74 | * @SDW_P15_READ_IGNORED: Read is ignored | ||
75 | * @SDW_P15_CMD_OK: Command is ok | ||
76 | */ | ||
77 | enum sdw_p15_behave { | ||
78 | SDW_P15_READ_IGNORED = 0, | ||
79 | SDW_P15_CMD_OK = 1, | ||
80 | }; | ||
81 | |||
82 | /** | ||
83 | * enum sdw_dpn_type - Data port types | ||
84 | * @SDW_DPN_FULL: Full Data Port is supported | ||
85 | * @SDW_DPN_SIMPLE: Simplified Data Port as defined in spec. | ||
86 | * DPN_SampleCtrl2, DPN_OffsetCtrl2, DPN_HCtrl and DPN_BlockCtrl3 | ||
87 | * are not implemented. | ||
88 | * @SDW_DPN_REDUCED: Reduced Data Port as defined in spec. | ||
89 | * DPN_SampleCtrl2, DPN_HCtrl are not implemented. | ||
90 | */ | ||
91 | enum sdw_dpn_type { | ||
92 | SDW_DPN_FULL = 0, | ||
93 | SDW_DPN_SIMPLE = 1, | ||
94 | SDW_DPN_REDUCED = 2, | ||
95 | }; | ||
96 | |||
97 | /** | ||
98 | * enum sdw_clk_stop_mode - Clock Stop modes | ||
99 | * @SDW_CLK_STOP_MODE0: Slave can continue operation seamlessly on clock | ||
100 | * restart | ||
101 | * @SDW_CLK_STOP_MODE1: Slave may have entered a deeper power-saving mode, | ||
102 | * not capable of continuing operation seamlessly when the clock restarts | ||
103 | */ | ||
104 | enum sdw_clk_stop_mode { | ||
105 | SDW_CLK_STOP_MODE0 = 0, | ||
106 | SDW_CLK_STOP_MODE1 = 1, | ||
107 | }; | ||
108 | |||
109 | /** | ||
110 | * struct sdw_dp0_prop - DP0 properties | ||
111 | * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 | ||
112 | * (inclusive) | ||
113 | * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 | ||
114 | * (inclusive) | ||
115 | * @num_words: number of wordlengths supported | ||
116 | * @words: wordlengths supported | ||
117 | * @flow_controlled: Slave implementation results in an OK_NotReady | ||
118 | * response | ||
119 | * @simple_ch_prep_sm: If channel prepare sequence is required | ||
120 | * @device_interrupts: If implementation-defined interrupts are supported | ||
121 | * | ||
122 | * The wordlengths are specified by Spec as max, min AND number of | ||
123 | * discrete values, implementation can define based on the wordlengths they | ||
124 | * support | ||
125 | */ | ||
126 | struct sdw_dp0_prop { | ||
127 | u32 max_word; | ||
128 | u32 min_word; | ||
129 | u32 num_words; | ||
130 | u32 *words; | ||
131 | bool flow_controlled; | ||
132 | bool simple_ch_prep_sm; | ||
133 | bool device_interrupts; | ||
134 | }; | ||
135 | |||
136 | /** | ||
137 | * struct sdw_dpn_audio_mode - Audio mode properties for DPn | ||
138 | * @bus_min_freq: Minimum bus frequency, in Hz | ||
139 | * @bus_max_freq: Maximum bus frequency, in Hz | ||
140 | * @bus_num_freq: Number of discrete frequencies supported | ||
141 | * @bus_freq: Discrete bus frequencies, in Hz | ||
142 | * @min_freq: Minimum sampling frequency, in Hz | ||
143 | * @max_freq: Maximum sampling bus frequency, in Hz | ||
144 | * @num_freq: Number of discrete sampling frequency supported | ||
145 | * @freq: Discrete sampling frequencies, in Hz | ||
146 | * @prep_ch_behave: Specifies the dependencies between Channel Prepare | ||
147 | * sequence and bus clock configuration | ||
148 | * If 0, Channel Prepare can happen at any Bus clock rate | ||
149 | * If 1, Channel Prepare sequence shall happen only after Bus clock is | ||
150 | * changed to a frequency supported by this mode or compatible modes | ||
151 | * described by the next field | ||
152 | * @glitchless: Bitmap describing possible glitchless transitions from this | ||
153 | * Audio Mode to other Audio Modes | ||
154 | */ | ||
155 | struct sdw_dpn_audio_mode { | ||
156 | u32 bus_min_freq; | ||
157 | u32 bus_max_freq; | ||
158 | u32 bus_num_freq; | ||
159 | u32 *bus_freq; | ||
160 | u32 max_freq; | ||
161 | u32 min_freq; | ||
162 | u32 num_freq; | ||
163 | u32 *freq; | ||
164 | u32 prep_ch_behave; | ||
165 | u32 glitchless; | ||
166 | }; | ||
167 | |||
168 | /** | ||
169 | * struct sdw_dpn_prop - Data Port DPn properties | ||
170 | * @num: port number | ||
171 | * @max_word: Maximum number of bits in a Payload Channel Sample, 1 to 64 | ||
172 | * (inclusive) | ||
173 | * @min_word: Minimum number of bits in a Payload Channel Sample, 1 to 64 | ||
174 | * (inclusive) | ||
175 | * @num_words: Number of discrete supported wordlengths | ||
176 | * @words: Discrete supported wordlength | ||
177 | * @type: Data port type. Full, Simplified or Reduced | ||
178 | * @max_grouping: Maximum number of samples that can be grouped together for | ||
179 | * a full data port | ||
180 | * @simple_ch_prep_sm: If the port supports simplified channel prepare state | ||
181 | * machine | ||
182 | * @ch_prep_timeout: Port-specific timeout value, in milliseconds | ||
183 | * @device_interrupts: If set, each bit corresponds to support for | ||
184 | * implementation-defined interrupts | ||
185 | * @max_ch: Maximum channels supported | ||
186 | * @min_ch: Minimum channels supported | ||
187 | * @num_ch: Number of discrete channels supported | ||
188 | * @ch: Discrete channels supported | ||
189 | * @num_ch_combinations: Number of channel combinations supported | ||
190 | * @ch_combinations: Channel combinations supported | ||
191 | * @modes: SDW mode supported | ||
192 | * @max_async_buffer: Number of samples that this port can buffer in | ||
193 | * asynchronous modes | ||
194 | * @block_pack_mode: Type of block port mode supported | ||
195 | * @port_encoding: Payload Channel Sample encoding schemes supported | ||
196 | * @audio_modes: Audio modes supported | ||
197 | */ | ||
198 | struct sdw_dpn_prop { | ||
199 | u32 num; | ||
200 | u32 max_word; | ||
201 | u32 min_word; | ||
202 | u32 num_words; | ||
203 | u32 *words; | ||
204 | enum sdw_dpn_type type; | ||
205 | u32 max_grouping; | ||
206 | bool simple_ch_prep_sm; | ||
207 | u32 ch_prep_timeout; | ||
208 | u32 device_interrupts; | ||
209 | u32 max_ch; | ||
210 | u32 min_ch; | ||
211 | u32 num_ch; | ||
212 | u32 *ch; | ||
213 | u32 num_ch_combinations; | ||
214 | u32 *ch_combinations; | ||
215 | u32 modes; | ||
216 | u32 max_async_buffer; | ||
217 | bool block_pack_mode; | ||
218 | u32 port_encoding; | ||
219 | struct sdw_dpn_audio_mode *audio_modes; | ||
220 | }; | ||
221 | |||
222 | /** | ||
223 | * struct sdw_slave_prop - SoundWire Slave properties | ||
224 | * @mipi_revision: Spec version of the implementation | ||
225 | * @wake_capable: Wake-up events are supported | ||
226 | * @test_mode_capable: If test mode is supported | ||
227 | * @clk_stop_mode1: Clock-Stop Mode 1 is supported | ||
228 | * @simple_clk_stop_capable: Simple clock mode is supported | ||
229 | * @clk_stop_timeout: Worst-case latency of the Clock Stop Prepare State | ||
230 | * Machine transitions, in milliseconds | ||
231 | * @ch_prep_timeout: Worst-case latency of the Channel Prepare State Machine | ||
232 | * transitions, in milliseconds | ||
233 | * @reset_behave: Slave keeps the status of the SlaveStopClockPrepare | ||
234 | * state machine (P=1 SCSP_SM) after exit from clock-stop mode1 | ||
235 | * @high_PHY_capable: Slave is HighPHY capable | ||
236 | * @paging_support: Slave implements paging registers SCP_AddrPage1 and | ||
237 | * SCP_AddrPage2 | ||
238 | * @bank_delay_support: Slave implements bank delay/bridge support registers | ||
239 | * SCP_BankDelay and SCP_NextFrame | ||
240 | * @p15_behave: Slave behavior when the Master attempts a read to the Port15 | ||
241 | * alias | ||
242 | * @lane_control_support: Slave supports lane control | ||
243 | * @master_count: Number of Masters present on this Slave | ||
244 | * @source_ports: Bitmap identifying source ports | ||
245 | * @sink_ports: Bitmap identifying sink ports | ||
246 | * @dp0_prop: Data Port 0 properties | ||
247 | * @src_dpn_prop: Source Data Port N properties | ||
248 | * @sink_dpn_prop: Sink Data Port N properties | ||
249 | */ | ||
250 | struct sdw_slave_prop { | ||
251 | u32 mipi_revision; | ||
252 | bool wake_capable; | ||
253 | bool test_mode_capable; | ||
254 | bool clk_stop_mode1; | ||
255 | bool simple_clk_stop_capable; | ||
256 | u32 clk_stop_timeout; | ||
257 | u32 ch_prep_timeout; | ||
258 | enum sdw_clk_stop_reset_behave reset_behave; | ||
259 | bool high_PHY_capable; | ||
260 | bool paging_support; | ||
261 | bool bank_delay_support; | ||
262 | enum sdw_p15_behave p15_behave; | ||
263 | bool lane_control_support; | ||
264 | u32 master_count; | ||
265 | u32 source_ports; | ||
266 | u32 sink_ports; | ||
267 | struct sdw_dp0_prop *dp0_prop; | ||
268 | struct sdw_dpn_prop *src_dpn_prop; | ||
269 | struct sdw_dpn_prop *sink_dpn_prop; | ||
270 | }; | ||
271 | |||
272 | /** | ||
273 | * struct sdw_master_prop - Master properties | ||
274 | * @revision: MIPI spec version of the implementation | ||
275 | * @master_count: Number of masters | ||
276 | * @clk_stop_mode: Bitmap for Clock Stop modes supported | ||
277 | * @max_freq: Maximum Bus clock frequency, in Hz | ||
278 | * @num_clk_gears: Number of clock gears supported | ||
279 | * @clk_gears: Clock gears supported | ||
280 | * @num_freq: Number of clock frequencies supported, in Hz | ||
281 | * @freq: Clock frequencies supported, in Hz | ||
282 | * @default_frame_rate: Controller default Frame rate, in Hz | ||
283 | * @default_row: Number of rows | ||
284 | * @default_col: Number of columns | ||
285 | * @dynamic_frame: Dynamic frame supported | ||
286 | * @err_threshold: Number of times that software may retry sending a single | ||
287 | * command | ||
288 | * @dpn_prop: Data Port N properties | ||
289 | */ | ||
290 | struct sdw_master_prop { | ||
291 | u32 revision; | ||
292 | u32 master_count; | ||
293 | enum sdw_clk_stop_mode clk_stop_mode; | ||
294 | u32 max_freq; | ||
295 | u32 num_clk_gears; | ||
296 | u32 *clk_gears; | ||
297 | u32 num_freq; | ||
298 | u32 *freq; | ||
299 | u32 default_frame_rate; | ||
300 | u32 default_row; | ||
301 | u32 default_col; | ||
302 | bool dynamic_frame; | ||
303 | u32 err_threshold; | ||
304 | struct sdw_dpn_prop *dpn_prop; | ||
305 | }; | ||
306 | |||
307 | int sdw_master_read_prop(struct sdw_bus *bus); | ||
308 | int sdw_slave_read_prop(struct sdw_slave *slave); | ||
309 | |||
310 | /* | ||
311 | * SDW Slave Structures and APIs | ||
312 | */ | ||
313 | |||
314 | /** | ||
315 | * struct sdw_slave_id - Slave ID | ||
316 | * @mfg_id: MIPI Manufacturer ID | ||
317 | * @part_id: Device Part ID | ||
318 | * @class_id: MIPI Class ID, unused now. | ||
319 | * Currently a placeholder in MIPI SoundWire Spec | ||
320 | * @unique_id: Device unique ID | ||
321 | * @sdw_version: SDW version implemented | ||
322 | * | ||
323 | * The order of the IDs here does not follow the DisCo spec definitions | ||
324 | */ | ||
325 | struct sdw_slave_id { | ||
326 | __u16 mfg_id; | ||
327 | __u16 part_id; | ||
328 | __u8 class_id; | ||
329 | __u8 unique_id:4; | ||
330 | __u8 sdw_version:4; | ||
331 | }; | ||
332 | |||
333 | /** | ||
334 | * struct sdw_slave_intr_status - Slave interrupt status | ||
335 | * @control_port: control port status | ||
336 | * @port: data port status | ||
337 | */ | ||
338 | struct sdw_slave_intr_status { | ||
339 | u8 control_port; | ||
340 | u8 port[15]; | ||
341 | }; | ||
342 | |||
343 | /** | ||
344 | * struct sdw_slave_ops - Slave driver callback ops | ||
345 | * @read_prop: Read Slave properties | ||
346 | * @interrupt_callback: Device interrupt notification (invoked in thread | ||
347 | * context) | ||
348 | * @update_status: Update Slave status | ||
349 | */ | ||
350 | struct sdw_slave_ops { | ||
351 | int (*read_prop)(struct sdw_slave *sdw); | ||
352 | int (*interrupt_callback)(struct sdw_slave *slave, | ||
353 | struct sdw_slave_intr_status *status); | ||
354 | int (*update_status)(struct sdw_slave *slave, | ||
355 | enum sdw_slave_status status); | ||
356 | }; | ||
357 | |||
358 | /** | ||
359 | * struct sdw_slave - SoundWire Slave | ||
360 | * @id: MIPI device ID | ||
361 | * @dev: Linux device | ||
362 | * @status: Status reported by the Slave | ||
363 | * @bus: Bus handle | ||
364 | * @ops: Slave callback ops | ||
365 | * @prop: Slave properties | ||
366 | * @node: node for bus list | ||
367 | * @port_ready: Port ready completion flag for each Slave port | ||
368 | * @dev_num: Device Number assigned by Bus | ||
369 | */ | ||
370 | struct sdw_slave { | ||
371 | struct sdw_slave_id id; | ||
372 | struct device dev; | ||
373 | enum sdw_slave_status status; | ||
374 | struct sdw_bus *bus; | ||
375 | const struct sdw_slave_ops *ops; | ||
376 | struct sdw_slave_prop prop; | ||
377 | struct list_head node; | ||
378 | struct completion *port_ready; | ||
379 | u16 dev_num; | ||
380 | }; | ||
381 | |||
382 | #define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev) | ||
383 | |||
384 | struct sdw_driver { | ||
385 | const char *name; | ||
386 | |||
387 | int (*probe)(struct sdw_slave *sdw, | ||
388 | const struct sdw_device_id *id); | ||
389 | int (*remove)(struct sdw_slave *sdw); | ||
390 | void (*shutdown)(struct sdw_slave *sdw); | ||
391 | |||
392 | const struct sdw_device_id *id_table; | ||
393 | const struct sdw_slave_ops *ops; | ||
394 | |||
395 | struct device_driver driver; | ||
396 | }; | ||
397 | |||
398 | #define SDW_SLAVE_ENTRY(_mfg_id, _part_id, _drv_data) \ | ||
399 | { .mfg_id = (_mfg_id), .part_id = (_part_id), \ | ||
400 | .driver_data = (unsigned long)(_drv_data) } | ||
401 | |||
402 | int sdw_handle_slave_status(struct sdw_bus *bus, | ||
403 | enum sdw_slave_status status[]); | ||
404 | |||
405 | /* | ||
406 | * SDW master structures and APIs | ||
407 | */ | ||
408 | |||
409 | struct sdw_msg; | ||
410 | |||
411 | /** | ||
412 | * struct sdw_defer - SDW deffered message | ||
413 | * @length: message length | ||
414 | * @complete: message completion | ||
415 | * @msg: SDW message | ||
416 | */ | ||
417 | struct sdw_defer { | ||
418 | int length; | ||
419 | struct completion complete; | ||
420 | struct sdw_msg *msg; | ||
421 | }; | ||
422 | |||
423 | /** | ||
424 | * struct sdw_master_ops - Master driver ops | ||
425 | * @read_prop: Read Master properties | ||
426 | * @xfer_msg: Transfer message callback | ||
427 | * @xfer_msg_defer: Defer version of transfer message callback | ||
428 | * @reset_page_addr: Reset the SCP page address registers | ||
429 | */ | ||
430 | struct sdw_master_ops { | ||
431 | int (*read_prop)(struct sdw_bus *bus); | ||
432 | |||
433 | enum sdw_command_response (*xfer_msg) | ||
434 | (struct sdw_bus *bus, struct sdw_msg *msg); | ||
435 | enum sdw_command_response (*xfer_msg_defer) | ||
436 | (struct sdw_bus *bus, struct sdw_msg *msg, | ||
437 | struct sdw_defer *defer); | ||
438 | enum sdw_command_response (*reset_page_addr) | ||
439 | (struct sdw_bus *bus, unsigned int dev_num); | ||
440 | }; | ||
441 | |||
442 | /** | ||
443 | * struct sdw_bus - SoundWire bus | ||
444 | * @dev: Master linux device | ||
445 | * @link_id: Link id number, can be 0 to N, unique for each Master | ||
446 | * @slaves: list of Slaves on this bus | ||
447 | * @assigned: Bitmap for Slave device numbers. | ||
448 | * Bit set implies used number, bit clear implies unused number. | ||
449 | * @bus_lock: bus lock | ||
450 | * @msg_lock: message lock | ||
451 | * @ops: Master callback ops | ||
452 | * @prop: Master properties | ||
453 | * @defer_msg: Defer message | ||
454 | * @clk_stop_timeout: Clock stop timeout computed | ||
455 | */ | ||
456 | struct sdw_bus { | ||
457 | struct device *dev; | ||
458 | unsigned int link_id; | ||
459 | struct list_head slaves; | ||
460 | DECLARE_BITMAP(assigned, SDW_MAX_DEVICES); | ||
461 | struct mutex bus_lock; | ||
462 | struct mutex msg_lock; | ||
463 | const struct sdw_master_ops *ops; | ||
464 | struct sdw_master_prop prop; | ||
465 | struct sdw_defer defer_msg; | ||
466 | unsigned int clk_stop_timeout; | ||
467 | }; | ||
468 | |||
469 | int sdw_add_bus_master(struct sdw_bus *bus); | ||
470 | void sdw_delete_bus_master(struct sdw_bus *bus); | ||
471 | |||
472 | /* messaging and data APIs */ | ||
473 | |||
474 | int sdw_read(struct sdw_slave *slave, u32 addr); | ||
475 | int sdw_write(struct sdw_slave *slave, u32 addr, u8 value); | ||
476 | int sdw_nread(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); | ||
477 | int sdw_nwrite(struct sdw_slave *slave, u32 addr, size_t count, u8 *val); | ||
478 | |||
479 | #endif /* __SOUNDWIRE_H */ | ||
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h new file mode 100644 index 000000000000..4b37528f592d --- /dev/null +++ b/include/linux/soundwire/sdw_intel.h | |||
@@ -0,0 +1,24 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) | ||
2 | // Copyright(c) 2015-17 Intel Corporation. | ||
3 | |||
4 | #ifndef __SDW_INTEL_H | ||
5 | #define __SDW_INTEL_H | ||
6 | |||
7 | /** | ||
8 | * struct sdw_intel_res - Soundwire Intel resource structure | ||
9 | * @mmio_base: mmio base of SoundWire registers | ||
10 | * @irq: interrupt number | ||
11 | * @handle: ACPI parent handle | ||
12 | * @parent: parent device | ||
13 | */ | ||
14 | struct sdw_intel_res { | ||
15 | void __iomem *mmio_base; | ||
16 | int irq; | ||
17 | acpi_handle handle; | ||
18 | struct device *parent; | ||
19 | }; | ||
20 | |||
21 | void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res); | ||
22 | void sdw_intel_exit(void *arg); | ||
23 | |||
24 | #endif | ||
diff --git a/include/linux/soundwire/sdw_registers.h b/include/linux/soundwire/sdw_registers.h new file mode 100644 index 000000000000..df472b1ab410 --- /dev/null +++ b/include/linux/soundwire/sdw_registers.h | |||
@@ -0,0 +1,194 @@ | |||
1 | // SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) | ||
2 | // Copyright(c) 2015-17 Intel Corporation. | ||
3 | |||
4 | #ifndef __SDW_REGISTERS_H | ||
5 | #define __SDW_REGISTERS_H | ||
6 | |||
7 | /* | ||
8 | * typically we define register and shifts but if one observes carefully, | ||
9 | * the shift can be generated from MASKS using few bit primitaives like ffs | ||
10 | * etc, so we use that and avoid defining shifts | ||
11 | */ | ||
12 | #define SDW_REG_SHIFT(n) (ffs(n) - 1) | ||
13 | |||
14 | /* | ||
15 | * SDW registers as defined by MIPI 1.1 Spec | ||
16 | */ | ||
17 | #define SDW_REGADDR GENMASK(14, 0) | ||
18 | #define SDW_SCP_ADDRPAGE2_MASK GENMASK(22, 15) | ||
19 | #define SDW_SCP_ADDRPAGE1_MASK GENMASK(30, 23) | ||
20 | |||
21 | #define SDW_REG_NO_PAGE 0x00008000 | ||
22 | #define SDW_REG_OPTIONAL_PAGE 0x00010000 | ||
23 | #define SDW_REG_MAX 0x80000000 | ||
24 | |||
25 | #define SDW_DPN_SIZE 0x100 | ||
26 | #define SDW_BANK1_OFFSET 0x10 | ||
27 | |||
28 | /* | ||
29 | * DP0 Interrupt register & bits | ||
30 | * | ||
31 | * Spec treats Status (RO) and Clear (WC) as separate but they are same | ||
32 | * address, so treat as same register with WC. | ||
33 | */ | ||
34 | |||
35 | /* both INT and STATUS register are same */ | ||
36 | #define SDW_DP0_INT 0x0 | ||
37 | #define SDW_DP0_INTMASK 0x1 | ||
38 | #define SDW_DP0_PORTCTRL 0x2 | ||
39 | #define SDW_DP0_BLOCKCTRL1 0x3 | ||
40 | #define SDW_DP0_PREPARESTATUS 0x4 | ||
41 | #define SDW_DP0_PREPARECTRL 0x5 | ||
42 | |||
43 | #define SDW_DP0_INT_TEST_FAIL BIT(0) | ||
44 | #define SDW_DP0_INT_PORT_READY BIT(1) | ||
45 | #define SDW_DP0_INT_BRA_FAILURE BIT(2) | ||
46 | #define SDW_DP0_INT_IMPDEF1 BIT(5) | ||
47 | #define SDW_DP0_INT_IMPDEF2 BIT(6) | ||
48 | #define SDW_DP0_INT_IMPDEF3 BIT(7) | ||
49 | |||
50 | #define SDW_DP0_PORTCTRL_DATAMODE GENMASK(3, 2) | ||
51 | #define SDW_DP0_PORTCTRL_NXTINVBANK BIT(4) | ||
52 | #define SDW_DP0_PORTCTRL_BPT_PAYLD GENMASK(7, 6) | ||
53 | |||
54 | #define SDW_DP0_CHANNELEN 0x20 | ||
55 | #define SDW_DP0_SAMPLECTRL1 0x22 | ||
56 | #define SDW_DP0_SAMPLECTRL2 0x23 | ||
57 | #define SDW_DP0_OFFSETCTRL1 0x24 | ||
58 | #define SDW_DP0_OFFSETCTRL2 0x25 | ||
59 | #define SDW_DP0_HCTRL 0x26 | ||
60 | #define SDW_DP0_LANECTRL 0x28 | ||
61 | |||
62 | /* Both INT and STATUS register are same */ | ||
63 | #define SDW_SCP_INT1 0x40 | ||
64 | #define SDW_SCP_INTMASK1 0x41 | ||
65 | |||
66 | #define SDW_SCP_INT1_PARITY BIT(0) | ||
67 | #define SDW_SCP_INT1_BUS_CLASH BIT(1) | ||
68 | #define SDW_SCP_INT1_IMPL_DEF BIT(2) | ||
69 | #define SDW_SCP_INT1_SCP2_CASCADE BIT(7) | ||
70 | #define SDW_SCP_INT1_PORT0_3 GENMASK(6, 3) | ||
71 | |||
72 | #define SDW_SCP_INTSTAT2 0x42 | ||
73 | #define SDW_SCP_INTSTAT2_SCP3_CASCADE BIT(7) | ||
74 | #define SDW_SCP_INTSTAT2_PORT4_10 GENMASK(6, 0) | ||
75 | |||
76 | |||
77 | #define SDW_SCP_INTSTAT3 0x43 | ||
78 | #define SDW_SCP_INTSTAT3_PORT11_14 GENMASK(3, 0) | ||
79 | |||
80 | /* Number of interrupt status registers */ | ||
81 | #define SDW_NUM_INT_STAT_REGISTERS 3 | ||
82 | |||
83 | /* Number of interrupt clear registers */ | ||
84 | #define SDW_NUM_INT_CLEAR_REGISTERS 1 | ||
85 | |||
86 | #define SDW_SCP_CTRL 0x44 | ||
87 | #define SDW_SCP_CTRL_CLK_STP_NOW BIT(1) | ||
88 | #define SDW_SCP_CTRL_FORCE_RESET BIT(7) | ||
89 | |||
90 | #define SDW_SCP_STAT 0x44 | ||
91 | #define SDW_SCP_STAT_CLK_STP_NF BIT(0) | ||
92 | #define SDW_SCP_STAT_HPHY_NOK BIT(5) | ||
93 | #define SDW_SCP_STAT_CURR_BANK BIT(6) | ||
94 | |||
95 | #define SDW_SCP_SYSTEMCTRL 0x45 | ||
96 | #define SDW_SCP_SYSTEMCTRL_CLK_STP_PREP BIT(0) | ||
97 | #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE BIT(2) | ||
98 | #define SDW_SCP_SYSTEMCTRL_WAKE_UP_EN BIT(3) | ||
99 | #define SDW_SCP_SYSTEMCTRL_HIGH_PHY BIT(4) | ||
100 | |||
101 | #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE0 0 | ||
102 | #define SDW_SCP_SYSTEMCTRL_CLK_STP_MODE1 BIT(2) | ||
103 | |||
104 | #define SDW_SCP_DEVNUMBER 0x46 | ||
105 | #define SDW_SCP_HIGH_PHY_CHECK 0x47 | ||
106 | #define SDW_SCP_ADDRPAGE1 0x48 | ||
107 | #define SDW_SCP_ADDRPAGE2 0x49 | ||
108 | #define SDW_SCP_KEEPEREN 0x4A | ||
109 | #define SDW_SCP_BANKDELAY 0x4B | ||
110 | #define SDW_SCP_TESTMODE 0x4F | ||
111 | #define SDW_SCP_DEVID_0 0x50 | ||
112 | #define SDW_SCP_DEVID_1 0x51 | ||
113 | #define SDW_SCP_DEVID_2 0x52 | ||
114 | #define SDW_SCP_DEVID_3 0x53 | ||
115 | #define SDW_SCP_DEVID_4 0x54 | ||
116 | #define SDW_SCP_DEVID_5 0x55 | ||
117 | |||
118 | /* Banked Registers */ | ||
119 | #define SDW_SCP_FRAMECTRL_B0 0x60 | ||
120 | #define SDW_SCP_FRAMECTRL_B1 (0x60 + SDW_BANK1_OFFSET) | ||
121 | #define SDW_SCP_NEXTFRAME_B0 0x61 | ||
122 | #define SDW_SCP_NEXTFRAME_B1 (0x61 + SDW_BANK1_OFFSET) | ||
123 | |||
124 | /* Both INT and STATUS register is same */ | ||
125 | #define SDW_DPN_INT(n) (0x0 + SDW_DPN_SIZE * (n)) | ||
126 | #define SDW_DPN_INTMASK(n) (0x1 + SDW_DPN_SIZE * (n)) | ||
127 | #define SDW_DPN_PORTCTRL(n) (0x2 + SDW_DPN_SIZE * (n)) | ||
128 | #define SDW_DPN_BLOCKCTRL1(n) (0x3 + SDW_DPN_SIZE * (n)) | ||
129 | #define SDW_DPN_PREPARESTATUS(n) (0x4 + SDW_DPN_SIZE * (n)) | ||
130 | #define SDW_DPN_PREPARECTRL(n) (0x5 + SDW_DPN_SIZE * (n)) | ||
131 | |||
132 | #define SDW_DPN_INT_TEST_FAIL BIT(0) | ||
133 | #define SDW_DPN_INT_PORT_READY BIT(1) | ||
134 | #define SDW_DPN_INT_IMPDEF1 BIT(5) | ||
135 | #define SDW_DPN_INT_IMPDEF2 BIT(6) | ||
136 | #define SDW_DPN_INT_IMPDEF3 BIT(7) | ||
137 | |||
138 | #define SDW_DPN_PORTCTRL_FLOWMODE GENMASK(1, 0) | ||
139 | #define SDW_DPN_PORTCTRL_DATAMODE GENMASK(3, 2) | ||
140 | #define SDW_DPN_PORTCTRL_NXTINVBANK BIT(4) | ||
141 | |||
142 | #define SDW_DPN_BLOCKCTRL1_WDLEN GENMASK(5, 0) | ||
143 | |||
144 | #define SDW_DPN_PREPARECTRL_CH_PREP GENMASK(7, 0) | ||
145 | |||
146 | #define SDW_DPN_CHANNELEN_B0(n) (0x20 + SDW_DPN_SIZE * (n)) | ||
147 | #define SDW_DPN_CHANNELEN_B1(n) (0x30 + SDW_DPN_SIZE * (n)) | ||
148 | |||
149 | #define SDW_DPN_BLOCKCTRL2_B0(n) (0x21 + SDW_DPN_SIZE * (n)) | ||
150 | #define SDW_DPN_BLOCKCTRL2_B1(n) (0x31 + SDW_DPN_SIZE * (n)) | ||
151 | |||
152 | #define SDW_DPN_SAMPLECTRL1_B0(n) (0x22 + SDW_DPN_SIZE * (n)) | ||
153 | #define SDW_DPN_SAMPLECTRL1_B1(n) (0x32 + SDW_DPN_SIZE * (n)) | ||
154 | |||
155 | #define SDW_DPN_SAMPLECTRL2_B0(n) (0x23 + SDW_DPN_SIZE * (n)) | ||
156 | #define SDW_DPN_SAMPLECTRL2_B1(n) (0x33 + SDW_DPN_SIZE * (n)) | ||
157 | |||
158 | #define SDW_DPN_OFFSETCTRL1_B0(n) (0x24 + SDW_DPN_SIZE * (n)) | ||
159 | #define SDW_DPN_OFFSETCTRL1_B1(n) (0x34 + SDW_DPN_SIZE * (n)) | ||
160 | |||
161 | #define SDW_DPN_OFFSETCTRL2_B0(n) (0x25 + SDW_DPN_SIZE * (n)) | ||
162 | #define SDW_DPN_OFFSETCTRL2_B1(n) (0x35 + SDW_DPN_SIZE * (n)) | ||
163 | |||
164 | #define SDW_DPN_HCTRL_B0(n) (0x26 + SDW_DPN_SIZE * (n)) | ||
165 | #define SDW_DPN_HCTRL_B1(n) (0x36 + SDW_DPN_SIZE * (n)) | ||
166 | |||
167 | #define SDW_DPN_BLOCKCTRL3_B0(n) (0x27 + SDW_DPN_SIZE * (n)) | ||
168 | #define SDW_DPN_BLOCKCTRL3_B1(n) (0x37 + SDW_DPN_SIZE * (n)) | ||
169 | |||
170 | #define SDW_DPN_LANECTRL_B0(n) (0x28 + SDW_DPN_SIZE * (n)) | ||
171 | #define SDW_DPN_LANECTRL_B1(n) (0x38 + SDW_DPN_SIZE * (n)) | ||
172 | |||
173 | #define SDW_DPN_SAMPLECTRL_LOW GENMASK(7, 0) | ||
174 | #define SDW_DPN_SAMPLECTRL_HIGH GENMASK(15, 8) | ||
175 | |||
176 | #define SDW_DPN_HCTRL_HSTART GENMASK(7, 4) | ||
177 | #define SDW_DPN_HCTRL_HSTOP GENMASK(3, 0) | ||
178 | |||
179 | #define SDW_NUM_CASC_PORT_INTSTAT1 4 | ||
180 | #define SDW_CASC_PORT_START_INTSTAT1 0 | ||
181 | #define SDW_CASC_PORT_MASK_INTSTAT1 0x8 | ||
182 | #define SDW_CASC_PORT_REG_OFFSET_INTSTAT1 0x0 | ||
183 | |||
184 | #define SDW_NUM_CASC_PORT_INTSTAT2 7 | ||
185 | #define SDW_CASC_PORT_START_INTSTAT2 4 | ||
186 | #define SDW_CASC_PORT_MASK_INTSTAT2 1 | ||
187 | #define SDW_CASC_PORT_REG_OFFSET_INTSTAT2 1 | ||
188 | |||
189 | #define SDW_NUM_CASC_PORT_INTSTAT3 4 | ||
190 | #define SDW_CASC_PORT_START_INTSTAT3 11 | ||
191 | #define SDW_CASC_PORT_MASK_INTSTAT3 1 | ||
192 | #define SDW_CASC_PORT_REG_OFFSET_INTSTAT3 2 | ||
193 | |||
194 | #endif /* __SDW_REGISTERS_H */ | ||
diff --git a/include/linux/soundwire/sdw_type.h b/include/linux/soundwire/sdw_type.h new file mode 100644 index 000000000000..9fd553e553e9 --- /dev/null +++ b/include/linux/soundwire/sdw_type.h | |||
@@ -0,0 +1,19 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | // Copyright(c) 2015-17 Intel Corporation. | ||
3 | |||
4 | #ifndef __SOUNDWIRE_TYPES_H | ||
5 | #define __SOUNDWIRE_TYPES_H | ||
6 | |||
7 | extern struct bus_type sdw_bus_type; | ||
8 | |||
9 | #define drv_to_sdw_driver(_drv) container_of(_drv, struct sdw_driver, driver) | ||
10 | |||
11 | #define sdw_register_driver(drv) \ | ||
12 | __sdw_register_driver(drv, THIS_MODULE) | ||
13 | |||
14 | int __sdw_register_driver(struct sdw_driver *drv, struct module *); | ||
15 | void sdw_unregister_driver(struct sdw_driver *drv); | ||
16 | |||
17 | int sdw_slave_modalias(const struct sdw_slave *slave, char *buf, size_t size); | ||
18 | |||
19 | #endif /* __SOUNDWIRE_TYPES_H */ | ||
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 3bf273538840..4894d322d258 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
@@ -409,4 +409,10 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock); | |||
409 | #define atomic_dec_and_lock(atomic, lock) \ | 409 | #define atomic_dec_and_lock(atomic, lock) \ |
410 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) | 410 | __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) |
411 | 411 | ||
412 | int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask, | ||
413 | size_t max_size, unsigned int cpu_mult, | ||
414 | gfp_t gfp); | ||
415 | |||
416 | void free_bucket_spinlocks(spinlock_t *locks); | ||
417 | |||
412 | #endif /* __LINUX_SPINLOCK_H */ | 418 | #endif /* __LINUX_SPINLOCK_H */ |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index 62be8966e837..33c1c698df09 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
@@ -92,7 +92,7 @@ void synchronize_srcu(struct srcu_struct *sp); | |||
92 | * relies on normal RCU, it can be called from the CPU which | 92 | * relies on normal RCU, it can be called from the CPU which |
93 | * is in the idle loop from an RCU point of view or offline. | 93 | * is in the idle loop from an RCU point of view or offline. |
94 | */ | 94 | */ |
95 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 95 | static inline int srcu_read_lock_held(const struct srcu_struct *sp) |
96 | { | 96 | { |
97 | if (!debug_lockdep_rcu_enabled()) | 97 | if (!debug_lockdep_rcu_enabled()) |
98 | return 1; | 98 | return 1; |
@@ -101,7 +101,7 @@ static inline int srcu_read_lock_held(struct srcu_struct *sp) | |||
101 | 101 | ||
102 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 102 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
103 | 103 | ||
104 | static inline int srcu_read_lock_held(struct srcu_struct *sp) | 104 | static inline int srcu_read_lock_held(const struct srcu_struct *sp) |
105 | { | 105 | { |
106 | return 1; | 106 | return 1; |
107 | } | 107 | } |
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index a949f4f9e4d7..4eda108abee0 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h | |||
@@ -40,7 +40,7 @@ struct srcu_data { | |||
40 | unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ | 40 | unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */ |
41 | 41 | ||
42 | /* Update-side state. */ | 42 | /* Update-side state. */ |
43 | raw_spinlock_t __private lock ____cacheline_internodealigned_in_smp; | 43 | spinlock_t __private lock ____cacheline_internodealigned_in_smp; |
44 | struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ | 44 | struct rcu_segcblist srcu_cblist; /* List of callbacks.*/ |
45 | unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ | 45 | unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ |
46 | unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ | 46 | unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ |
@@ -58,7 +58,7 @@ struct srcu_data { | |||
58 | * Node in SRCU combining tree, similar in function to rcu_data. | 58 | * Node in SRCU combining tree, similar in function to rcu_data. |
59 | */ | 59 | */ |
60 | struct srcu_node { | 60 | struct srcu_node { |
61 | raw_spinlock_t __private lock; | 61 | spinlock_t __private lock; |
62 | unsigned long srcu_have_cbs[4]; /* GP seq for children */ | 62 | unsigned long srcu_have_cbs[4]; /* GP seq for children */ |
63 | /* having CBs, but only */ | 63 | /* having CBs, but only */ |
64 | /* is > ->srcu_gq_seq. */ | 64 | /* is > ->srcu_gq_seq. */ |
@@ -78,7 +78,7 @@ struct srcu_struct { | |||
78 | struct srcu_node *level[RCU_NUM_LVLS + 1]; | 78 | struct srcu_node *level[RCU_NUM_LVLS + 1]; |
79 | /* First node at each level. */ | 79 | /* First node at each level. */ |
80 | struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ | 80 | struct mutex srcu_cb_mutex; /* Serialize CB preparation. */ |
81 | raw_spinlock_t __private lock; /* Protect counters */ | 81 | spinlock_t __private lock; /* Protect counters */ |
82 | struct mutex srcu_gp_mutex; /* Serialize GP work. */ | 82 | struct mutex srcu_gp_mutex; /* Serialize GP work. */ |
83 | unsigned int srcu_idx; /* Current rdr array element. */ | 83 | unsigned int srcu_idx; /* Current rdr array element. */ |
84 | unsigned long srcu_gp_seq; /* Grace-period seq #. */ | 84 | unsigned long srcu_gp_seq; /* Grace-period seq #. */ |
@@ -107,7 +107,7 @@ struct srcu_struct { | |||
107 | #define __SRCU_STRUCT_INIT(name) \ | 107 | #define __SRCU_STRUCT_INIT(name) \ |
108 | { \ | 108 | { \ |
109 | .sda = &name##_srcu_data, \ | 109 | .sda = &name##_srcu_data, \ |
110 | .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ | 110 | .lock = __SPIN_LOCK_UNLOCKED(name.lock), \ |
111 | .srcu_gp_seq_needed = 0 - 1, \ | 111 | .srcu_gp_seq_needed = 0 - 1, \ |
112 | __SRCU_DEP_MAP_INIT(name) \ | 112 | __SRCU_DEP_MAP_INIT(name) \ |
113 | } | 113 | } |
diff --git a/include/linux/stddef.h b/include/linux/stddef.h index 2181719fd907..998a4ba28eba 100644 --- a/include/linux/stddef.h +++ b/include/linux/stddef.h | |||
@@ -20,12 +20,20 @@ enum { | |||
20 | #endif | 20 | #endif |
21 | 21 | ||
22 | /** | 22 | /** |
23 | * sizeof_field(TYPE, MEMBER) | ||
24 | * | ||
25 | * @TYPE: The structure containing the field of interest | ||
26 | * @MEMBER: The field to return the size of | ||
27 | */ | ||
28 | #define sizeof_field(TYPE, MEMBER) sizeof((((TYPE *)0)->MEMBER)) | ||
29 | |||
30 | /** | ||
23 | * offsetofend(TYPE, MEMBER) | 31 | * offsetofend(TYPE, MEMBER) |
24 | * | 32 | * |
25 | * @TYPE: The type of the structure | 33 | * @TYPE: The type of the structure |
26 | * @MEMBER: The member within the structure to get the end offset of | 34 | * @MEMBER: The member within the structure to get the end offset of |
27 | */ | 35 | */ |
28 | #define offsetofend(TYPE, MEMBER) \ | 36 | #define offsetofend(TYPE, MEMBER) \ |
29 | (offsetof(TYPE, MEMBER) + sizeof(((TYPE *)0)->MEMBER)) | 37 | (offsetof(TYPE, MEMBER) + sizeof_field(TYPE, MEMBER)) |
30 | 38 | ||
31 | #endif | 39 | #endif |
diff --git a/include/linux/string.h b/include/linux/string.h index cfd83eb2f926..dd39a690c841 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
@@ -11,6 +11,7 @@ | |||
11 | 11 | ||
12 | extern char *strndup_user(const char __user *, long); | 12 | extern char *strndup_user(const char __user *, long); |
13 | extern void *memdup_user(const void __user *, size_t); | 13 | extern void *memdup_user(const void __user *, size_t); |
14 | extern void *vmemdup_user(const void __user *, size_t); | ||
14 | extern void *memdup_user_nul(const void __user *, size_t); | 15 | extern void *memdup_user_nul(const void __user *, size_t); |
15 | 16 | ||
16 | /* | 17 | /* |
@@ -28,7 +29,7 @@ extern char * strncpy(char *,const char *, __kernel_size_t); | |||
28 | size_t strlcpy(char *, const char *, size_t); | 29 | size_t strlcpy(char *, const char *, size_t); |
29 | #endif | 30 | #endif |
30 | #ifndef __HAVE_ARCH_STRSCPY | 31 | #ifndef __HAVE_ARCH_STRSCPY |
31 | ssize_t __must_check strscpy(char *, const char *, size_t); | 32 | ssize_t strscpy(char *, const char *, size_t); |
32 | #endif | 33 | #endif |
33 | #ifndef __HAVE_ARCH_STRCAT | 34 | #ifndef __HAVE_ARCH_STRCAT |
34 | extern char * strcat(char *, const char *); | 35 | extern char * strcat(char *, const char *); |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 71c237e8240e..ed761f751ecb 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
@@ -179,7 +179,6 @@ struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, | |||
179 | int rpc_restart_call_prepare(struct rpc_task *); | 179 | int rpc_restart_call_prepare(struct rpc_task *); |
180 | int rpc_restart_call(struct rpc_task *); | 180 | int rpc_restart_call(struct rpc_task *); |
181 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); | 181 | void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int); |
182 | int rpc_protocol(struct rpc_clnt *); | ||
183 | struct net * rpc_net_ns(struct rpc_clnt *); | 182 | struct net * rpc_net_ns(struct rpc_clnt *); |
184 | size_t rpc_max_payload(struct rpc_clnt *); | 183 | size_t rpc_max_payload(struct rpc_clnt *); |
185 | size_t rpc_max_bc_payload(struct rpc_clnt *); | 184 | size_t rpc_max_bc_payload(struct rpc_clnt *); |
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h index 995c6fe9ee90..4b731b046bcd 100644 --- a/include/linux/sunrpc/svc_rdma.h +++ b/include/linux/sunrpc/svc_rdma.h | |||
@@ -185,8 +185,6 @@ extern void svc_rdma_wc_reg(struct ib_cq *, struct ib_wc *); | |||
185 | extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); | 185 | extern void svc_rdma_wc_read(struct ib_cq *, struct ib_wc *); |
186 | extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); | 186 | extern void svc_rdma_wc_inv(struct ib_cq *, struct ib_wc *); |
187 | extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); | 187 | extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); |
188 | extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t); | ||
189 | extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t); | ||
190 | extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); | 188 | extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); |
191 | extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); | 189 | extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *); |
192 | extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); | 190 | extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int); |
diff --git a/include/linux/sunrpc/xprtrdma.h b/include/linux/sunrpc/xprtrdma.h index 221b7a2e5406..5859563e3c1f 100644 --- a/include/linux/sunrpc/xprtrdma.h +++ b/include/linux/sunrpc/xprtrdma.h | |||
@@ -64,7 +64,7 @@ enum rpcrdma_memreg { | |||
64 | RPCRDMA_MEMWINDOWS, | 64 | RPCRDMA_MEMWINDOWS, |
65 | RPCRDMA_MEMWINDOWS_ASYNC, | 65 | RPCRDMA_MEMWINDOWS_ASYNC, |
66 | RPCRDMA_MTHCAFMR, | 66 | RPCRDMA_MTHCAFMR, |
67 | RPCRDMA_FRMR, | 67 | RPCRDMA_FRWR, |
68 | RPCRDMA_ALLPHYSICAL, | 68 | RPCRDMA_ALLPHYSICAL, |
69 | RPCRDMA_LAST | 69 | RPCRDMA_LAST |
70 | }; | 70 | }; |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index cc22a24516d6..440b62f7502e 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
@@ -384,6 +384,8 @@ extern int swsusp_page_is_forbidden(struct page *); | |||
384 | extern void swsusp_set_page_free(struct page *); | 384 | extern void swsusp_set_page_free(struct page *); |
385 | extern void swsusp_unset_page_free(struct page *); | 385 | extern void swsusp_unset_page_free(struct page *); |
386 | extern unsigned long get_safe_page(gfp_t gfp_mask); | 386 | extern unsigned long get_safe_page(gfp_t gfp_mask); |
387 | extern asmlinkage int swsusp_arch_suspend(void); | ||
388 | extern asmlinkage int swsusp_arch_resume(void); | ||
387 | 389 | ||
388 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); | 390 | extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); |
389 | extern int hibernate(void); | 391 | extern int hibernate(void); |
diff --git a/include/linux/swap.h b/include/linux/swap.h index c2b8128799c1..7b6a59f722a3 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -332,7 +332,6 @@ extern void mark_page_accessed(struct page *); | |||
332 | extern void lru_add_drain(void); | 332 | extern void lru_add_drain(void); |
333 | extern void lru_add_drain_cpu(int cpu); | 333 | extern void lru_add_drain_cpu(int cpu); |
334 | extern void lru_add_drain_all(void); | 334 | extern void lru_add_drain_all(void); |
335 | extern void lru_add_drain_all_cpuslocked(void); | ||
336 | extern void rotate_reclaimable_page(struct page *page); | 335 | extern void rotate_reclaimable_page(struct page *page); |
337 | extern void deactivate_file_page(struct page *page); | 336 | extern void deactivate_file_page(struct page *page); |
338 | extern void mark_page_lazyfree(struct page *page); | 337 | extern void mark_page_lazyfree(struct page *page); |
@@ -345,7 +344,6 @@ extern void lru_cache_add_active_or_unevictable(struct page *page, | |||
345 | 344 | ||
346 | /* linux/mm/vmscan.c */ | 345 | /* linux/mm/vmscan.c */ |
347 | extern unsigned long zone_reclaimable_pages(struct zone *zone); | 346 | extern unsigned long zone_reclaimable_pages(struct zone *zone); |
348 | extern unsigned long pgdat_reclaimable_pages(struct pglist_data *pgdat); | ||
349 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | 347 | extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, |
350 | gfp_t gfp_mask, nodemask_t *mask); | 348 | gfp_t gfp_mask, nodemask_t *mask); |
351 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); | 349 | extern int __isolate_lru_page(struct page *page, isolate_mode_t mode); |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 24ed817082ee..5b1f2a00491c 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
@@ -66,6 +66,12 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, | |||
66 | enum dma_sync_target target); | 66 | enum dma_sync_target target); |
67 | 67 | ||
68 | /* Accessory functions. */ | 68 | /* Accessory functions. */ |
69 | |||
70 | void *swiotlb_alloc(struct device *hwdev, size_t size, dma_addr_t *dma_handle, | ||
71 | gfp_t flags, unsigned long attrs); | ||
72 | void swiotlb_free(struct device *dev, size_t size, void *vaddr, | ||
73 | dma_addr_t dma_addr, unsigned long attrs); | ||
74 | |||
69 | extern void | 75 | extern void |
70 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, | 76 | *swiotlb_alloc_coherent(struct device *hwdev, size_t size, |
71 | dma_addr_t *dma_handle, gfp_t flags); | 77 | dma_addr_t *dma_handle, gfp_t flags); |
@@ -115,10 +121,10 @@ extern int | |||
115 | swiotlb_dma_supported(struct device *hwdev, u64 mask); | 121 | swiotlb_dma_supported(struct device *hwdev, u64 mask); |
116 | 122 | ||
117 | #ifdef CONFIG_SWIOTLB | 123 | #ifdef CONFIG_SWIOTLB |
118 | extern void __init swiotlb_free(void); | 124 | extern void __init swiotlb_exit(void); |
119 | unsigned int swiotlb_max_segment(void); | 125 | unsigned int swiotlb_max_segment(void); |
120 | #else | 126 | #else |
121 | static inline void swiotlb_free(void) { } | 127 | static inline void swiotlb_exit(void) { } |
122 | static inline unsigned int swiotlb_max_segment(void) { return 0; } | 128 | static inline unsigned int swiotlb_max_segment(void) { return 0; } |
123 | #endif | 129 | #endif |
124 | 130 | ||
@@ -126,4 +132,6 @@ extern void swiotlb_print_info(void); | |||
126 | extern int is_swiotlb_buffer(phys_addr_t paddr); | 132 | extern int is_swiotlb_buffer(phys_addr_t paddr); |
127 | extern void swiotlb_set_max_segment(unsigned int); | 133 | extern void swiotlb_set_max_segment(unsigned int); |
128 | 134 | ||
135 | extern const struct dma_map_ops swiotlb_dma_ops; | ||
136 | |||
129 | #endif /* __LINUX_SWIOTLB_H */ | 137 | #endif /* __LINUX_SWIOTLB_H */ |
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index 09d73d0d1aa8..ec93e93371fa 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h | |||
@@ -100,6 +100,9 @@ struct sw_event_regs { | |||
100 | u32 gpio_interrupt_hdr; | 100 | u32 gpio_interrupt_hdr; |
101 | u32 gpio_interrupt_data; | 101 | u32 gpio_interrupt_data; |
102 | u32 reserved16[4]; | 102 | u32 reserved16[4]; |
103 | u32 gfms_event_hdr; | ||
104 | u32 gfms_event_data; | ||
105 | u32 reserved17[4]; | ||
103 | } __packed; | 106 | } __packed; |
104 | 107 | ||
105 | enum { | 108 | enum { |
@@ -168,6 +171,14 @@ struct ntb_info_regs { | |||
168 | u16 reserved1; | 171 | u16 reserved1; |
169 | u64 ep_map; | 172 | u64 ep_map; |
170 | u16 requester_id; | 173 | u16 requester_id; |
174 | u16 reserved2; | ||
175 | u32 reserved3[4]; | ||
176 | struct nt_partition_info { | ||
177 | u32 xlink_enabled; | ||
178 | u32 target_part_low; | ||
179 | u32 target_part_high; | ||
180 | u32 reserved; | ||
181 | } ntp_info[48]; | ||
171 | } __packed; | 182 | } __packed; |
172 | 183 | ||
173 | struct part_cfg_regs { | 184 | struct part_cfg_regs { |
@@ -284,7 +295,20 @@ enum { | |||
284 | struct pff_csr_regs { | 295 | struct pff_csr_regs { |
285 | u16 vendor_id; | 296 | u16 vendor_id; |
286 | u16 device_id; | 297 | u16 device_id; |
287 | u32 pci_cfg_header[15]; | 298 | u16 pcicmd; |
299 | u16 pcists; | ||
300 | u32 pci_class; | ||
301 | u32 pci_opts; | ||
302 | union { | ||
303 | u32 pci_bar[6]; | ||
304 | u64 pci_bar64[3]; | ||
305 | }; | ||
306 | u32 pci_cardbus; | ||
307 | u32 pci_subsystem_id; | ||
308 | u32 pci_expansion_rom; | ||
309 | u32 pci_cap_ptr; | ||
310 | u32 reserved1; | ||
311 | u32 pci_irq; | ||
288 | u32 pci_cap_region[48]; | 312 | u32 pci_cap_region[48]; |
289 | u32 pcie_cap_region[448]; | 313 | u32 pcie_cap_region[448]; |
290 | u32 indirect_gas_window[128]; | 314 | u32 indirect_gas_window[128]; |
diff --git a/include/linux/sync_core.h b/include/linux/sync_core.h new file mode 100644 index 000000000000..013da4b8b327 --- /dev/null +++ b/include/linux/sync_core.h | |||
@@ -0,0 +1,21 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #ifndef _LINUX_SYNC_CORE_H | ||
3 | #define _LINUX_SYNC_CORE_H | ||
4 | |||
5 | #ifdef CONFIG_ARCH_HAS_SYNC_CORE_BEFORE_USERMODE | ||
6 | #include <asm/sync_core.h> | ||
7 | #else | ||
8 | /* | ||
9 | * This is a dummy sync_core_before_usermode() implementation that can be used | ||
10 | * on all architectures which return to user-space through core serializing | ||
11 | * instructions. | ||
12 | * If your architecture returns to user-space through non-core-serializing | ||
13 | * instructions, you need to write your own functions. | ||
14 | */ | ||
15 | static inline void sync_core_before_usermode(void) | ||
16 | { | ||
17 | } | ||
18 | #endif | ||
19 | |||
20 | #endif /* _LINUX_SYNC_CORE_H */ | ||
21 | |||
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h index 992bc9948232..b769ecfcc3bd 100644 --- a/include/linux/sysctl.h +++ b/include/linux/sysctl.h | |||
@@ -51,9 +51,6 @@ extern int proc_dointvec_minmax(struct ctl_table *, int, | |||
51 | extern int proc_douintvec_minmax(struct ctl_table *table, int write, | 51 | extern int proc_douintvec_minmax(struct ctl_table *table, int write, |
52 | void __user *buffer, size_t *lenp, | 52 | void __user *buffer, size_t *lenp, |
53 | loff_t *ppos); | 53 | loff_t *ppos); |
54 | extern int proc_dopipe_max_size(struct ctl_table *table, int write, | ||
55 | void __user *buffer, size_t *lenp, | ||
56 | loff_t *ppos); | ||
57 | extern int proc_dointvec_jiffies(struct ctl_table *, int, | 54 | extern int proc_dointvec_jiffies(struct ctl_table *, int, |
58 | void __user *, size_t *, loff_t *); | 55 | void __user *, size_t *, loff_t *); |
59 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, | 56 | extern int proc_dointvec_userhz_jiffies(struct ctl_table *, int, |
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h index 40839c02d28c..b8bfdc173ec0 100644 --- a/include/linux/sysfs.h +++ b/include/linux/sysfs.h | |||
@@ -113,7 +113,7 @@ struct attribute_group { | |||
113 | } | 113 | } |
114 | 114 | ||
115 | #define __ATTR_RO(_name) { \ | 115 | #define __ATTR_RO(_name) { \ |
116 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ | 116 | .attr = { .name = __stringify(_name), .mode = 0444 }, \ |
117 | .show = _name##_show, \ | 117 | .show = _name##_show, \ |
118 | } | 118 | } |
119 | 119 | ||
@@ -124,12 +124,11 @@ struct attribute_group { | |||
124 | } | 124 | } |
125 | 125 | ||
126 | #define __ATTR_WO(_name) { \ | 126 | #define __ATTR_WO(_name) { \ |
127 | .attr = { .name = __stringify(_name), .mode = S_IWUSR }, \ | 127 | .attr = { .name = __stringify(_name), .mode = 0200 }, \ |
128 | .store = _name##_store, \ | 128 | .store = _name##_store, \ |
129 | } | 129 | } |
130 | 130 | ||
131 | #define __ATTR_RW(_name) __ATTR(_name, (S_IWUSR | S_IRUGO), \ | 131 | #define __ATTR_RW(_name) __ATTR(_name, 0644, _name##_show, _name##_store) |
132 | _name##_show, _name##_store) | ||
133 | 132 | ||
134 | #define __ATTR_NULL { .attr = { .name = NULL } } | 133 | #define __ATTR_NULL { .attr = { .name = NULL } } |
135 | 134 | ||
@@ -192,14 +191,13 @@ struct bin_attribute { | |||
192 | } | 191 | } |
193 | 192 | ||
194 | #define __BIN_ATTR_RO(_name, _size) { \ | 193 | #define __BIN_ATTR_RO(_name, _size) { \ |
195 | .attr = { .name = __stringify(_name), .mode = S_IRUGO }, \ | 194 | .attr = { .name = __stringify(_name), .mode = 0444 }, \ |
196 | .read = _name##_read, \ | 195 | .read = _name##_read, \ |
197 | .size = _size, \ | 196 | .size = _size, \ |
198 | } | 197 | } |
199 | 198 | ||
200 | #define __BIN_ATTR_RW(_name, _size) __BIN_ATTR(_name, \ | 199 | #define __BIN_ATTR_RW(_name, _size) \ |
201 | (S_IWUSR | S_IRUGO), _name##_read, \ | 200 | __BIN_ATTR(_name, 0644, _name##_read, _name##_write, _size) |
202 | _name##_write, _size) | ||
203 | 201 | ||
204 | #define __BIN_ATTR_NULL __ATTR_NULL | 202 | #define __BIN_ATTR_NULL __ATTR_NULL |
205 | 203 | ||
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index ca4a6361389b..8f4c54986f97 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
@@ -335,6 +335,17 @@ struct tcp_sock { | |||
335 | 335 | ||
336 | int linger2; | 336 | int linger2; |
337 | 337 | ||
338 | |||
339 | /* Sock_ops bpf program related variables */ | ||
340 | #ifdef CONFIG_BPF | ||
341 | u8 bpf_sock_ops_cb_flags; /* Control calling BPF programs | ||
342 | * values defined in uapi/linux/tcp.h | ||
343 | */ | ||
344 | #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) (TP->bpf_sock_ops_cb_flags & ARG) | ||
345 | #else | ||
346 | #define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0 | ||
347 | #endif | ||
348 | |||
338 | /* Receiver side RTT estimation */ | 349 | /* Receiver side RTT estimation */ |
339 | struct { | 350 | struct { |
340 | u32 rtt_us; | 351 | u32 rtt_us; |
@@ -344,7 +355,7 @@ struct tcp_sock { | |||
344 | 355 | ||
345 | /* Receiver queue space */ | 356 | /* Receiver queue space */ |
346 | struct { | 357 | struct { |
347 | int space; | 358 | u32 space; |
348 | u32 seq; | 359 | u32 seq; |
349 | u64 time; | 360 | u64 time; |
350 | } rcvq_space; | 361 | } rcvq_space; |
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index cb889afe576b..a2b3dfcee0b5 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
19 | #include <linux/idr.h> | 19 | #include <linux/idr.h> |
20 | #include <linux/kref.h> | ||
20 | #include <linux/list.h> | 21 | #include <linux/list.h> |
21 | #include <linux/tee.h> | 22 | #include <linux/tee.h> |
22 | 23 | ||
@@ -25,8 +26,12 @@ | |||
25 | * specific TEE driver. | 26 | * specific TEE driver. |
26 | */ | 27 | */ |
27 | 28 | ||
28 | #define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */ | 29 | #define TEE_SHM_MAPPED BIT(0) /* Memory mapped by the kernel */ |
29 | #define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */ | 30 | #define TEE_SHM_DMA_BUF BIT(1) /* Memory with dma-buf handle */ |
31 | #define TEE_SHM_EXT_DMA_BUF BIT(2) /* Memory with dma-buf handle */ | ||
32 | #define TEE_SHM_REGISTER BIT(3) /* Memory registered in secure world */ | ||
33 | #define TEE_SHM_USER_MAPPED BIT(4) /* Memory mapped in user space */ | ||
34 | #define TEE_SHM_POOL BIT(5) /* Memory allocated from pool */ | ||
30 | 35 | ||
31 | struct device; | 36 | struct device; |
32 | struct tee_device; | 37 | struct tee_device; |
@@ -38,11 +43,17 @@ struct tee_shm_pool; | |||
38 | * @teedev: pointer to this drivers struct tee_device | 43 | * @teedev: pointer to this drivers struct tee_device |
39 | * @list_shm: List of shared memory object owned by this context | 44 | * @list_shm: List of shared memory object owned by this context |
40 | * @data: driver specific context data, managed by the driver | 45 | * @data: driver specific context data, managed by the driver |
46 | * @refcount: reference counter for this structure | ||
47 | * @releasing: flag that indicates if context is being released right now. | ||
48 | * It is needed to break circular dependency on context during | ||
49 | * shared memory release. | ||
41 | */ | 50 | */ |
42 | struct tee_context { | 51 | struct tee_context { |
43 | struct tee_device *teedev; | 52 | struct tee_device *teedev; |
44 | struct list_head list_shm; | 53 | struct list_head list_shm; |
45 | void *data; | 54 | void *data; |
55 | struct kref refcount; | ||
56 | bool releasing; | ||
46 | }; | 57 | }; |
47 | 58 | ||
48 | struct tee_param_memref { | 59 | struct tee_param_memref { |
@@ -76,6 +87,8 @@ struct tee_param { | |||
76 | * @cancel_req: request cancel of an ongoing invoke or open | 87 | * @cancel_req: request cancel of an ongoing invoke or open |
77 | * @supp_revc: called for supplicant to get a command | 88 | * @supp_revc: called for supplicant to get a command |
78 | * @supp_send: called for supplicant to send a response | 89 | * @supp_send: called for supplicant to send a response |
90 | * @shm_register: register shared memory buffer in TEE | ||
91 | * @shm_unregister: unregister shared memory buffer in TEE | ||
79 | */ | 92 | */ |
80 | struct tee_driver_ops { | 93 | struct tee_driver_ops { |
81 | void (*get_version)(struct tee_device *teedev, | 94 | void (*get_version)(struct tee_device *teedev, |
@@ -94,6 +107,10 @@ struct tee_driver_ops { | |||
94 | struct tee_param *param); | 107 | struct tee_param *param); |
95 | int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, | 108 | int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params, |
96 | struct tee_param *param); | 109 | struct tee_param *param); |
110 | int (*shm_register)(struct tee_context *ctx, struct tee_shm *shm, | ||
111 | struct page **pages, size_t num_pages, | ||
112 | unsigned long start); | ||
113 | int (*shm_unregister)(struct tee_context *ctx, struct tee_shm *shm); | ||
97 | }; | 114 | }; |
98 | 115 | ||
99 | /** | 116 | /** |
@@ -150,6 +167,97 @@ int tee_device_register(struct tee_device *teedev); | |||
150 | void tee_device_unregister(struct tee_device *teedev); | 167 | void tee_device_unregister(struct tee_device *teedev); |
151 | 168 | ||
152 | /** | 169 | /** |
170 | * struct tee_shm - shared memory object | ||
171 | * @teedev: device used to allocate the object | ||
172 | * @ctx: context using the object, if NULL the context is gone | ||
173 | * @link link element | ||
174 | * @paddr: physical address of the shared memory | ||
175 | * @kaddr: virtual address of the shared memory | ||
176 | * @size: size of shared memory | ||
177 | * @offset: offset of buffer in user space | ||
178 | * @pages: locked pages from userspace | ||
179 | * @num_pages: number of locked pages | ||
180 | * @dmabuf: dmabuf used to for exporting to user space | ||
181 | * @flags: defined by TEE_SHM_* in tee_drv.h | ||
182 | * @id: unique id of a shared memory object on this device | ||
183 | * | ||
184 | * This pool is only supposed to be accessed directly from the TEE | ||
185 | * subsystem and from drivers that implements their own shm pool manager. | ||
186 | */ | ||
187 | struct tee_shm { | ||
188 | struct tee_device *teedev; | ||
189 | struct tee_context *ctx; | ||
190 | struct list_head link; | ||
191 | phys_addr_t paddr; | ||
192 | void *kaddr; | ||
193 | size_t size; | ||
194 | unsigned int offset; | ||
195 | struct page **pages; | ||
196 | size_t num_pages; | ||
197 | struct dma_buf *dmabuf; | ||
198 | u32 flags; | ||
199 | int id; | ||
200 | }; | ||
201 | |||
202 | /** | ||
203 | * struct tee_shm_pool_mgr - shared memory manager | ||
204 | * @ops: operations | ||
205 | * @private_data: private data for the shared memory manager | ||
206 | */ | ||
207 | struct tee_shm_pool_mgr { | ||
208 | const struct tee_shm_pool_mgr_ops *ops; | ||
209 | void *private_data; | ||
210 | }; | ||
211 | |||
212 | /** | ||
213 | * struct tee_shm_pool_mgr_ops - shared memory pool manager operations | ||
214 | * @alloc: called when allocating shared memory | ||
215 | * @free: called when freeing shared memory | ||
216 | * @destroy_poolmgr: called when destroying the pool manager | ||
217 | */ | ||
218 | struct tee_shm_pool_mgr_ops { | ||
219 | int (*alloc)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm, | ||
220 | size_t size); | ||
221 | void (*free)(struct tee_shm_pool_mgr *poolmgr, struct tee_shm *shm); | ||
222 | void (*destroy_poolmgr)(struct tee_shm_pool_mgr *poolmgr); | ||
223 | }; | ||
224 | |||
225 | /** | ||
226 | * tee_shm_pool_alloc() - Create a shared memory pool from shm managers | ||
227 | * @priv_mgr: manager for driver private shared memory allocations | ||
228 | * @dmabuf_mgr: manager for dma-buf shared memory allocations | ||
229 | * | ||
230 | * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied | ||
231 | * in @dmabuf, others will use the range provided by @priv. | ||
232 | * | ||
233 | * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure. | ||
234 | */ | ||
235 | struct tee_shm_pool *tee_shm_pool_alloc(struct tee_shm_pool_mgr *priv_mgr, | ||
236 | struct tee_shm_pool_mgr *dmabuf_mgr); | ||
237 | |||
238 | /* | ||
239 | * tee_shm_pool_mgr_alloc_res_mem() - Create a shm manager for reserved | ||
240 | * memory | ||
241 | * @vaddr: Virtual address of start of pool | ||
242 | * @paddr: Physical address of start of pool | ||
243 | * @size: Size in bytes of the pool | ||
244 | * | ||
245 | * @returns pointer to a 'struct tee_shm_pool_mgr' or an ERR_PTR on failure. | ||
246 | */ | ||
247 | struct tee_shm_pool_mgr *tee_shm_pool_mgr_alloc_res_mem(unsigned long vaddr, | ||
248 | phys_addr_t paddr, | ||
249 | size_t size, | ||
250 | int min_alloc_order); | ||
251 | |||
252 | /** | ||
253 | * tee_shm_pool_mgr_destroy() - Free a shared memory manager | ||
254 | */ | ||
255 | static inline void tee_shm_pool_mgr_destroy(struct tee_shm_pool_mgr *poolm) | ||
256 | { | ||
257 | poolm->ops->destroy_poolmgr(poolm); | ||
258 | } | ||
259 | |||
260 | /** | ||
153 | * struct tee_shm_pool_mem_info - holds information needed to create a shared | 261 | * struct tee_shm_pool_mem_info - holds information needed to create a shared |
154 | * memory pool | 262 | * memory pool |
155 | * @vaddr: Virtual address of start of pool | 263 | * @vaddr: Virtual address of start of pool |
@@ -211,6 +319,40 @@ void *tee_get_drvdata(struct tee_device *teedev); | |||
211 | struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); | 319 | struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags); |
212 | 320 | ||
213 | /** | 321 | /** |
322 | * tee_shm_priv_alloc() - Allocate shared memory privately | ||
323 | * @dev: Device that allocates the shared memory | ||
324 | * @size: Requested size of shared memory | ||
325 | * | ||
326 | * Allocates shared memory buffer that is not associated with any client | ||
327 | * context. Such buffers are owned by TEE driver and used for internal calls. | ||
328 | * | ||
329 | * @returns a pointer to 'struct tee_shm' | ||
330 | */ | ||
331 | struct tee_shm *tee_shm_priv_alloc(struct tee_device *teedev, size_t size); | ||
332 | |||
333 | /** | ||
334 | * tee_shm_register() - Register shared memory buffer | ||
335 | * @ctx: Context that registers the shared memory | ||
336 | * @addr: Address is userspace of the shared buffer | ||
337 | * @length: Length of the shared buffer | ||
338 | * @flags: Flags setting properties for the requested shared memory. | ||
339 | * | ||
340 | * @returns a pointer to 'struct tee_shm' | ||
341 | */ | ||
342 | struct tee_shm *tee_shm_register(struct tee_context *ctx, unsigned long addr, | ||
343 | size_t length, u32 flags); | ||
344 | |||
345 | /** | ||
346 | * tee_shm_is_registered() - Check if shared memory object in registered in TEE | ||
347 | * @shm: Shared memory handle | ||
348 | * @returns true if object is registered in TEE | ||
349 | */ | ||
350 | static inline bool tee_shm_is_registered(struct tee_shm *shm) | ||
351 | { | ||
352 | return shm && (shm->flags & TEE_SHM_REGISTER); | ||
353 | } | ||
354 | |||
355 | /** | ||
214 | * tee_shm_free() - Free shared memory | 356 | * tee_shm_free() - Free shared memory |
215 | * @shm: Handle to shared memory to free | 357 | * @shm: Handle to shared memory to free |
216 | */ | 358 | */ |
@@ -260,11 +402,47 @@ void *tee_shm_get_va(struct tee_shm *shm, size_t offs); | |||
260 | int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); | 402 | int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa); |
261 | 403 | ||
262 | /** | 404 | /** |
405 | * tee_shm_get_size() - Get size of shared memory buffer | ||
406 | * @shm: Shared memory handle | ||
407 | * @returns size of shared memory | ||
408 | */ | ||
409 | static inline size_t tee_shm_get_size(struct tee_shm *shm) | ||
410 | { | ||
411 | return shm->size; | ||
412 | } | ||
413 | |||
414 | /** | ||
415 | * tee_shm_get_pages() - Get list of pages that hold shared buffer | ||
416 | * @shm: Shared memory handle | ||
417 | * @num_pages: Number of pages will be stored there | ||
418 | * @returns pointer to pages array | ||
419 | */ | ||
420 | static inline struct page **tee_shm_get_pages(struct tee_shm *shm, | ||
421 | size_t *num_pages) | ||
422 | { | ||
423 | *num_pages = shm->num_pages; | ||
424 | return shm->pages; | ||
425 | } | ||
426 | |||
427 | /** | ||
428 | * tee_shm_get_page_offset() - Get shared buffer offset from page start | ||
429 | * @shm: Shared memory handle | ||
430 | * @returns page offset of shared buffer | ||
431 | */ | ||
432 | static inline size_t tee_shm_get_page_offset(struct tee_shm *shm) | ||
433 | { | ||
434 | return shm->offset; | ||
435 | } | ||
436 | |||
437 | /** | ||
263 | * tee_shm_get_id() - Get id of a shared memory object | 438 | * tee_shm_get_id() - Get id of a shared memory object |
264 | * @shm: Shared memory handle | 439 | * @shm: Shared memory handle |
265 | * @returns id | 440 | * @returns id |
266 | */ | 441 | */ |
267 | int tee_shm_get_id(struct tee_shm *shm); | 442 | static inline int tee_shm_get_id(struct tee_shm *shm) |
443 | { | ||
444 | return shm->id; | ||
445 | } | ||
268 | 446 | ||
269 | /** | 447 | /** |
270 | * tee_shm_get_from_id() - Find shared memory object and increase reference | 448 | * tee_shm_get_from_id() - Find shared memory object and increase reference |
@@ -275,4 +453,16 @@ int tee_shm_get_id(struct tee_shm *shm); | |||
275 | */ | 453 | */ |
276 | struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); | 454 | struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id); |
277 | 455 | ||
456 | static inline bool tee_param_is_memref(struct tee_param *param) | ||
457 | { | ||
458 | switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { | ||
459 | case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT: | ||
460 | case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT: | ||
461 | case TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT: | ||
462 | return true; | ||
463 | default: | ||
464 | return false; | ||
465 | } | ||
466 | } | ||
467 | |||
278 | #endif /*__TEE_DRV_H*/ | 468 | #endif /*__TEE_DRV_H*/ |
diff --git a/include/linux/ti-emif-sram.h b/include/linux/ti-emif-sram.h new file mode 100644 index 000000000000..45bc6b376492 --- /dev/null +++ b/include/linux/ti-emif-sram.h | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * TI AM33XX EMIF Routines | ||
3 | * | ||
4 | * Copyright (C) 2016-2017 Texas Instruments Inc. | ||
5 | * Dave Gerlach | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation version 2. | ||
10 | * | ||
11 | * This program is distributed "as is" WITHOUT ANY WARRANTY of any | ||
12 | * kind, whether express or implied; without even the implied warranty | ||
13 | * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | */ | ||
16 | #ifndef __LINUX_TI_EMIF_H | ||
17 | #define __LINUX_TI_EMIF_H | ||
18 | |||
19 | #include <linux/kbuild.h> | ||
20 | #include <linux/types.h> | ||
21 | #ifndef __ASSEMBLY__ | ||
22 | |||
23 | struct emif_regs_amx3 { | ||
24 | u32 emif_sdcfg_val; | ||
25 | u32 emif_timing1_val; | ||
26 | u32 emif_timing2_val; | ||
27 | u32 emif_timing3_val; | ||
28 | u32 emif_ref_ctrl_val; | ||
29 | u32 emif_zqcfg_val; | ||
30 | u32 emif_pmcr_val; | ||
31 | u32 emif_pmcr_shdw_val; | ||
32 | u32 emif_rd_wr_level_ramp_ctrl; | ||
33 | u32 emif_rd_wr_exec_thresh; | ||
34 | u32 emif_cos_config; | ||
35 | u32 emif_priority_to_cos_mapping; | ||
36 | u32 emif_connect_id_serv_1_map; | ||
37 | u32 emif_connect_id_serv_2_map; | ||
38 | u32 emif_ocp_config_val; | ||
39 | u32 emif_lpddr2_nvm_tim; | ||
40 | u32 emif_lpddr2_nvm_tim_shdw; | ||
41 | u32 emif_dll_calib_ctrl_val; | ||
42 | u32 emif_dll_calib_ctrl_val_shdw; | ||
43 | u32 emif_ddr_phy_ctlr_1; | ||
44 | u32 emif_ext_phy_ctrl_vals[120]; | ||
45 | }; | ||
46 | |||
47 | struct ti_emif_pm_data { | ||
48 | void __iomem *ti_emif_base_addr_virt; | ||
49 | phys_addr_t ti_emif_base_addr_phys; | ||
50 | unsigned long ti_emif_sram_config; | ||
51 | struct emif_regs_amx3 *regs_virt; | ||
52 | phys_addr_t regs_phys; | ||
53 | } __packed __aligned(8); | ||
54 | |||
55 | struct ti_emif_pm_functions { | ||
56 | u32 save_context; | ||
57 | u32 restore_context; | ||
58 | u32 enter_sr; | ||
59 | u32 exit_sr; | ||
60 | u32 abort_sr; | ||
61 | } __packed __aligned(8); | ||
62 | |||
63 | struct gen_pool; | ||
64 | |||
65 | int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst); | ||
66 | int ti_emif_get_mem_type(void); | ||
67 | |||
68 | #endif | ||
69 | #endif /* __LINUX_TI_EMIF_H */ | ||
diff --git a/include/linux/torture.h b/include/linux/torture.h index a45702eb3e7b..66272862070b 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h | |||
@@ -79,7 +79,7 @@ void stutter_wait(const char *title); | |||
79 | int torture_stutter_init(int s); | 79 | int torture_stutter_init(int s); |
80 | 80 | ||
81 | /* Initialization and cleanup. */ | 81 | /* Initialization and cleanup. */ |
82 | bool torture_init_begin(char *ttype, bool v, int *runnable); | 82 | bool torture_init_begin(char *ttype, bool v); |
83 | void torture_init_end(void); | 83 | void torture_init_end(void); |
84 | bool torture_cleanup_begin(void); | 84 | bool torture_cleanup_begin(void); |
85 | void torture_cleanup_end(void); | 85 | void torture_cleanup_end(void); |
@@ -96,4 +96,10 @@ void _torture_stop_kthread(char *m, struct task_struct **tp); | |||
96 | #define torture_stop_kthread(n, tp) \ | 96 | #define torture_stop_kthread(n, tp) \ |
97 | _torture_stop_kthread("Stopping " #n " task", &(tp)) | 97 | _torture_stop_kthread("Stopping " #n " task", &(tp)) |
98 | 98 | ||
99 | #ifdef CONFIG_PREEMPT | ||
100 | #define torture_preempt_schedule() preempt_schedule() | ||
101 | #else | ||
102 | #define torture_preempt_schedule() | ||
103 | #endif | ||
104 | |||
99 | #endif /* __LINUX_TORTURE_H */ | 105 | #endif /* __LINUX_TORTURE_H */ |
diff --git a/include/linux/tpm.h b/include/linux/tpm.h index 5a090f5ab335..bcdd3790e94d 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h | |||
@@ -24,11 +24,6 @@ | |||
24 | 24 | ||
25 | #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ | 25 | #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ |
26 | 26 | ||
27 | /* | ||
28 | * Chip num is this value or a valid tpm idx | ||
29 | */ | ||
30 | #define TPM_ANY_NUM 0xFFFF | ||
31 | |||
32 | struct tpm_chip; | 27 | struct tpm_chip; |
33 | struct trusted_key_payload; | 28 | struct trusted_key_payload; |
34 | struct trusted_key_options; | 29 | struct trusted_key_options; |
@@ -50,46 +45,52 @@ struct tpm_class_ops { | |||
50 | unsigned long *timeout_cap); | 45 | unsigned long *timeout_cap); |
51 | int (*request_locality)(struct tpm_chip *chip, int loc); | 46 | int (*request_locality)(struct tpm_chip *chip, int loc); |
52 | void (*relinquish_locality)(struct tpm_chip *chip, int loc); | 47 | void (*relinquish_locality)(struct tpm_chip *chip, int loc); |
48 | void (*clk_enable)(struct tpm_chip *chip, bool value); | ||
53 | }; | 49 | }; |
54 | 50 | ||
55 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) | 51 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) |
56 | 52 | ||
57 | extern int tpm_is_tpm2(u32 chip_num); | 53 | extern int tpm_is_tpm2(struct tpm_chip *chip); |
58 | extern int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf); | 54 | extern int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf); |
59 | extern int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash); | 55 | extern int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, const u8 *hash); |
60 | extern int tpm_send(u32 chip_num, void *cmd, size_t buflen); | 56 | extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); |
61 | extern int tpm_get_random(u32 chip_num, u8 *data, size_t max); | 57 | extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); |
62 | extern int tpm_seal_trusted(u32 chip_num, | 58 | extern int tpm_seal_trusted(struct tpm_chip *chip, |
63 | struct trusted_key_payload *payload, | 59 | struct trusted_key_payload *payload, |
64 | struct trusted_key_options *options); | 60 | struct trusted_key_options *options); |
65 | extern int tpm_unseal_trusted(u32 chip_num, | 61 | extern int tpm_unseal_trusted(struct tpm_chip *chip, |
66 | struct trusted_key_payload *payload, | 62 | struct trusted_key_payload *payload, |
67 | struct trusted_key_options *options); | 63 | struct trusted_key_options *options); |
68 | #else | 64 | #else |
69 | static inline int tpm_is_tpm2(u32 chip_num) | 65 | static inline int tpm_is_tpm2(struct tpm_chip *chip) |
70 | { | 66 | { |
71 | return -ENODEV; | 67 | return -ENODEV; |
72 | } | 68 | } |
73 | static inline int tpm_pcr_read(u32 chip_num, int pcr_idx, u8 *res_buf) { | 69 | static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, u8 *res_buf) |
70 | { | ||
74 | return -ENODEV; | 71 | return -ENODEV; |
75 | } | 72 | } |
76 | static inline int tpm_pcr_extend(u32 chip_num, int pcr_idx, const u8 *hash) { | 73 | static inline int tpm_pcr_extend(struct tpm_chip *chip, int pcr_idx, |
74 | const u8 *hash) | ||
75 | { | ||
77 | return -ENODEV; | 76 | return -ENODEV; |
78 | } | 77 | } |
79 | static inline int tpm_send(u32 chip_num, void *cmd, size_t buflen) { | 78 | static inline int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen) |
79 | { | ||
80 | return -ENODEV; | 80 | return -ENODEV; |
81 | } | 81 | } |
82 | static inline int tpm_get_random(u32 chip_num, u8 *data, size_t max) { | 82 | static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max) |
83 | { | ||
83 | return -ENODEV; | 84 | return -ENODEV; |
84 | } | 85 | } |
85 | 86 | ||
86 | static inline int tpm_seal_trusted(u32 chip_num, | 87 | static inline int tpm_seal_trusted(struct tpm_chip *chip, |
87 | struct trusted_key_payload *payload, | 88 | struct trusted_key_payload *payload, |
88 | struct trusted_key_options *options) | 89 | struct trusted_key_options *options) |
89 | { | 90 | { |
90 | return -ENODEV; | 91 | return -ENODEV; |
91 | } | 92 | } |
92 | static inline int tpm_unseal_trusted(u32 chip_num, | 93 | static inline int tpm_unseal_trusted(struct tpm_chip *chip, |
93 | struct trusted_key_payload *payload, | 94 | struct trusted_key_payload *payload, |
94 | struct trusted_key_options *options) | 95 | struct trusted_key_options *options) |
95 | { | 96 | { |
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h new file mode 100644 index 000000000000..20d9da77fc11 --- /dev/null +++ b/include/linux/tpm_eventlog.h | |||
@@ -0,0 +1,124 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | |||
3 | #ifndef __LINUX_TPM_EVENTLOG_H__ | ||
4 | #define __LINUX_TPM_EVENTLOG_H__ | ||
5 | |||
6 | #include <crypto/hash_info.h> | ||
7 | |||
8 | #define TCG_EVENT_NAME_LEN_MAX 255 | ||
9 | #define MAX_TEXT_EVENT 1000 /* Max event string length */ | ||
10 | #define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ | ||
11 | #define TPM2_ACTIVE_PCR_BANKS 3 | ||
12 | |||
13 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 | ||
14 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 | ||
15 | |||
16 | #ifdef CONFIG_PPC64 | ||
17 | #define do_endian_conversion(x) be32_to_cpu(x) | ||
18 | #else | ||
19 | #define do_endian_conversion(x) x | ||
20 | #endif | ||
21 | |||
22 | enum bios_platform_class { | ||
23 | BIOS_CLIENT = 0x00, | ||
24 | BIOS_SERVER = 0x01, | ||
25 | }; | ||
26 | |||
27 | struct tcpa_event { | ||
28 | u32 pcr_index; | ||
29 | u32 event_type; | ||
30 | u8 pcr_value[20]; /* SHA1 */ | ||
31 | u32 event_size; | ||
32 | u8 event_data[0]; | ||
33 | }; | ||
34 | |||
35 | enum tcpa_event_types { | ||
36 | PREBOOT = 0, | ||
37 | POST_CODE, | ||
38 | UNUSED, | ||
39 | NO_ACTION, | ||
40 | SEPARATOR, | ||
41 | ACTION, | ||
42 | EVENT_TAG, | ||
43 | SCRTM_CONTENTS, | ||
44 | SCRTM_VERSION, | ||
45 | CPU_MICROCODE, | ||
46 | PLATFORM_CONFIG_FLAGS, | ||
47 | TABLE_OF_DEVICES, | ||
48 | COMPACT_HASH, | ||
49 | IPL, | ||
50 | IPL_PARTITION_DATA, | ||
51 | NONHOST_CODE, | ||
52 | NONHOST_CONFIG, | ||
53 | NONHOST_INFO, | ||
54 | }; | ||
55 | |||
56 | struct tcpa_pc_event { | ||
57 | u32 event_id; | ||
58 | u32 event_size; | ||
59 | u8 event_data[0]; | ||
60 | }; | ||
61 | |||
62 | enum tcpa_pc_event_ids { | ||
63 | SMBIOS = 1, | ||
64 | BIS_CERT, | ||
65 | POST_BIOS_ROM, | ||
66 | ESCD, | ||
67 | CMOS, | ||
68 | NVRAM, | ||
69 | OPTION_ROM_EXEC, | ||
70 | OPTION_ROM_CONFIG, | ||
71 | OPTION_ROM_MICROCODE = 10, | ||
72 | S_CRTM_VERSION, | ||
73 | S_CRTM_CONTENTS, | ||
74 | POST_CONTENTS, | ||
75 | HOST_TABLE_OF_DEVICES, | ||
76 | }; | ||
77 | |||
78 | /* http://www.trustedcomputinggroup.org/tcg-efi-protocol-specification/ */ | ||
79 | |||
80 | struct tcg_efi_specid_event_algs { | ||
81 | u16 alg_id; | ||
82 | u16 digest_size; | ||
83 | } __packed; | ||
84 | |||
85 | struct tcg_efi_specid_event { | ||
86 | u8 signature[16]; | ||
87 | u32 platform_class; | ||
88 | u8 spec_version_minor; | ||
89 | u8 spec_version_major; | ||
90 | u8 spec_errata; | ||
91 | u8 uintnsize; | ||
92 | u32 num_algs; | ||
93 | struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS]; | ||
94 | u8 vendor_info_size; | ||
95 | u8 vendor_info[0]; | ||
96 | } __packed; | ||
97 | |||
98 | struct tcg_pcr_event { | ||
99 | u32 pcr_idx; | ||
100 | u32 event_type; | ||
101 | u8 digest[20]; | ||
102 | u32 event_size; | ||
103 | u8 event[0]; | ||
104 | } __packed; | ||
105 | |||
106 | struct tcg_event_field { | ||
107 | u32 event_size; | ||
108 | u8 event[0]; | ||
109 | } __packed; | ||
110 | |||
111 | struct tpm2_digest { | ||
112 | u16 alg_id; | ||
113 | u8 digest[SHA512_DIGEST_SIZE]; | ||
114 | } __packed; | ||
115 | |||
116 | struct tcg_pcr_event2 { | ||
117 | u32 pcr_idx; | ||
118 | u32 event_type; | ||
119 | u32 count; | ||
120 | struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS]; | ||
121 | struct tcg_event_field event; | ||
122 | } __packed; | ||
123 | |||
124 | #endif | ||
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h index af44e7c2d577..8a1442c4e513 100644 --- a/include/linux/trace_events.h +++ b/include/linux/trace_events.h | |||
@@ -467,6 +467,7 @@ trace_trigger_soft_disabled(struct trace_event_file *file) | |||
467 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); | 467 | unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx); |
468 | int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); | 468 | int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog); |
469 | void perf_event_detach_bpf_prog(struct perf_event *event); | 469 | void perf_event_detach_bpf_prog(struct perf_event *event); |
470 | int perf_event_query_prog_array(struct perf_event *event, void __user *info); | ||
470 | #else | 471 | #else |
471 | static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) | 472 | static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx) |
472 | { | 473 | { |
@@ -481,6 +482,11 @@ perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog) | |||
481 | 482 | ||
482 | static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } | 483 | static inline void perf_event_detach_bpf_prog(struct perf_event *event) { } |
483 | 484 | ||
485 | static inline int | ||
486 | perf_event_query_prog_array(struct perf_event *event, void __user *info) | ||
487 | { | ||
488 | return -EOPNOTSUPP; | ||
489 | } | ||
484 | #endif | 490 | #endif |
485 | 491 | ||
486 | enum { | 492 | enum { |
@@ -528,6 +534,7 @@ do { \ | |||
528 | struct perf_event; | 534 | struct perf_event; |
529 | 535 | ||
530 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); | 536 | DECLARE_PER_CPU(struct pt_regs, perf_trace_regs); |
537 | DECLARE_PER_CPU(int, bpf_kprobe_override); | ||
531 | 538 | ||
532 | extern int perf_trace_init(struct perf_event *event); | 539 | extern int perf_trace_init(struct perf_event *event); |
533 | extern void perf_trace_destroy(struct perf_event *event); | 540 | extern void perf_trace_destroy(struct perf_event *event); |
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index a26ffbe09e71..c94f466d57ef 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h | |||
@@ -137,11 +137,8 @@ extern void syscall_unregfunc(void); | |||
137 | \ | 137 | \ |
138 | if (!(cond)) \ | 138 | if (!(cond)) \ |
139 | return; \ | 139 | return; \ |
140 | if (rcucheck) { \ | 140 | if (rcucheck) \ |
141 | if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \ | ||
142 | return; \ | ||
143 | rcu_irq_enter_irqson(); \ | 141 | rcu_irq_enter_irqson(); \ |
144 | } \ | ||
145 | rcu_read_lock_sched_notrace(); \ | 142 | rcu_read_lock_sched_notrace(); \ |
146 | it_func_ptr = rcu_dereference_sched((tp)->funcs); \ | 143 | it_func_ptr = rcu_dereference_sched((tp)->funcs); \ |
147 | if (it_func_ptr) { \ | 144 | if (it_func_ptr) { \ |
diff --git a/include/linux/tty.h b/include/linux/tty.h index 7ac8ba208b1f..0a6c71e0ad01 100644 --- a/include/linux/tty.h +++ b/include/linux/tty.h | |||
@@ -405,6 +405,8 @@ extern const char *tty_name(const struct tty_struct *tty); | |||
405 | extern struct tty_struct *tty_kopen(dev_t device); | 405 | extern struct tty_struct *tty_kopen(dev_t device); |
406 | extern void tty_kclose(struct tty_struct *tty); | 406 | extern void tty_kclose(struct tty_struct *tty); |
407 | extern int tty_dev_name_to_number(const char *name, dev_t *number); | 407 | extern int tty_dev_name_to_number(const char *name, dev_t *number); |
408 | extern int tty_ldisc_lock(struct tty_struct *tty, unsigned long timeout); | ||
409 | extern void tty_ldisc_unlock(struct tty_struct *tty); | ||
408 | #else | 410 | #else |
409 | static inline void tty_kref_put(struct tty_struct *tty) | 411 | static inline void tty_kref_put(struct tty_struct *tty) |
410 | { } | 412 | { } |
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h index 3bc5144b1c7e..1ef64d4ad887 100644 --- a/include/linux/tty_ldisc.h +++ b/include/linux/tty_ldisc.h | |||
@@ -187,7 +187,7 @@ struct tty_ldisc_ops { | |||
187 | long (*compat_ioctl)(struct tty_struct *tty, struct file *file, | 187 | long (*compat_ioctl)(struct tty_struct *tty, struct file *file, |
188 | unsigned int cmd, unsigned long arg); | 188 | unsigned int cmd, unsigned long arg); |
189 | void (*set_termios)(struct tty_struct *tty, struct ktermios *old); | 189 | void (*set_termios)(struct tty_struct *tty, struct ktermios *old); |
190 | unsigned int (*poll)(struct tty_struct *, struct file *, | 190 | __poll_t (*poll)(struct tty_struct *, struct file *, |
191 | struct poll_table_struct *); | 191 | struct poll_table_struct *); |
192 | int (*hangup)(struct tty_struct *tty); | 192 | int (*hangup)(struct tty_struct *tty); |
193 | 193 | ||
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 251e655d407f..efe79c1cdd47 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
@@ -273,4 +273,12 @@ extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count); | |||
273 | #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) | 273 | #define unsafe_put_user(x, ptr, err) do { if (unlikely(__put_user(x, ptr))) goto err; } while (0) |
274 | #endif | 274 | #endif |
275 | 275 | ||
276 | #ifdef CONFIG_HARDENED_USERCOPY | ||
277 | void usercopy_warn(const char *name, const char *detail, bool to_user, | ||
278 | unsigned long offset, unsigned long len); | ||
279 | void __noreturn usercopy_abort(const char *name, const char *detail, | ||
280 | bool to_user, unsigned long offset, | ||
281 | unsigned long len); | ||
282 | #endif | ||
283 | |||
276 | #endif /* __LINUX_UACCESS_H__ */ | 284 | #endif /* __LINUX_UACCESS_H__ */ |
diff --git a/include/linux/usb.h b/include/linux/usb.h index fbbe974661f2..0173597e59aa 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
@@ -609,6 +609,10 @@ struct usb3_lpm_parameters { | |||
609 | * to keep track of the number of functions that require USB 3.0 Link Power | 609 | * to keep track of the number of functions that require USB 3.0 Link Power |
610 | * Management to be disabled for this usb_device. This count should only | 610 | * Management to be disabled for this usb_device. This count should only |
611 | * be manipulated by those functions, with the bandwidth_mutex is held. | 611 | * be manipulated by those functions, with the bandwidth_mutex is held. |
612 | * @hub_delay: cached value consisting of: | ||
613 | * parent->hub_delay + wHubDelay + tTPTransmissionDelay (40ns) | ||
614 | * | ||
615 | * Will be used as wValue for SetIsochDelay requests. | ||
612 | * | 616 | * |
613 | * Notes: | 617 | * Notes: |
614 | * Usbcore drivers should not set usbdev->state directly. Instead use | 618 | * Usbcore drivers should not set usbdev->state directly. Instead use |
@@ -689,6 +693,8 @@ struct usb_device { | |||
689 | struct usb3_lpm_parameters u1_params; | 693 | struct usb3_lpm_parameters u1_params; |
690 | struct usb3_lpm_parameters u2_params; | 694 | struct usb3_lpm_parameters u2_params; |
691 | unsigned lpm_disable_count; | 695 | unsigned lpm_disable_count; |
696 | |||
697 | u16 hub_delay; | ||
692 | }; | 698 | }; |
693 | #define to_usb_device(d) container_of(d, struct usb_device, dev) | 699 | #define to_usb_device(d) container_of(d, struct usb_device, dev) |
694 | 700 | ||
@@ -1293,7 +1299,6 @@ extern int usb_disabled(void); | |||
1293 | #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired | 1299 | #define URB_ISO_ASAP 0x0002 /* iso-only; use the first unexpired |
1294 | * slot in the schedule */ | 1300 | * slot in the schedule */ |
1295 | #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ | 1301 | #define URB_NO_TRANSFER_DMA_MAP 0x0004 /* urb->transfer_dma valid on submit */ |
1296 | #define URB_NO_FSBR 0x0020 /* UHCI-specific */ | ||
1297 | #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ | 1302 | #define URB_ZERO_PACKET 0x0040 /* Finish bulk OUT with short packet */ |
1298 | #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt | 1303 | #define URB_NO_INTERRUPT 0x0080 /* HINT: no non-error interrupt |
1299 | * needed */ | 1304 | * needed */ |
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 0142f3af0da6..66a5cff7ee14 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h | |||
@@ -330,6 +330,7 @@ struct usb_gadget_ops { | |||
330 | * @name: Identifies the controller hardware type. Used in diagnostics | 330 | * @name: Identifies the controller hardware type. Used in diagnostics |
331 | * and sometimes configuration. | 331 | * and sometimes configuration. |
332 | * @dev: Driver model state for this abstract device. | 332 | * @dev: Driver model state for this abstract device. |
333 | * @isoch_delay: value from Set Isoch Delay request. Only valid on SS/SSP | ||
333 | * @out_epnum: last used out ep number | 334 | * @out_epnum: last used out ep number |
334 | * @in_epnum: last used in ep number | 335 | * @in_epnum: last used in ep number |
335 | * @mA: last set mA value | 336 | * @mA: last set mA value |
@@ -394,6 +395,7 @@ struct usb_gadget { | |||
394 | enum usb_device_state state; | 395 | enum usb_device_state state; |
395 | const char *name; | 396 | const char *name; |
396 | struct device dev; | 397 | struct device dev; |
398 | unsigned isoch_delay; | ||
397 | unsigned out_epnum; | 399 | unsigned out_epnum; |
398 | unsigned in_epnum; | 400 | unsigned in_epnum; |
399 | unsigned mA; | 401 | unsigned mA; |
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h index 6cbe7a5c2b57..dba55ccb9b53 100644 --- a/include/linux/usb/of.h +++ b/include/linux/usb/of.h | |||
@@ -12,13 +12,17 @@ | |||
12 | #include <linux/usb/otg.h> | 12 | #include <linux/usb/otg.h> |
13 | #include <linux/usb/phy.h> | 13 | #include <linux/usb/phy.h> |
14 | 14 | ||
15 | struct usb_device; | ||
16 | |||
15 | #if IS_ENABLED(CONFIG_OF) | 17 | #if IS_ENABLED(CONFIG_OF) |
16 | enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); | 18 | enum usb_dr_mode of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0); |
17 | bool of_usb_host_tpl_support(struct device_node *np); | 19 | bool of_usb_host_tpl_support(struct device_node *np); |
18 | int of_usb_update_otg_caps(struct device_node *np, | 20 | int of_usb_update_otg_caps(struct device_node *np, |
19 | struct usb_otg_caps *otg_caps); | 21 | struct usb_otg_caps *otg_caps); |
20 | struct device_node *usb_of_get_child_node(struct device_node *parent, | 22 | struct device_node *usb_of_get_device_node(struct usb_device *hub, int port1); |
21 | int portnum); | 23 | bool usb_of_has_combined_node(struct usb_device *udev); |
24 | struct device_node *usb_of_get_interface_node(struct usb_device *udev, | ||
25 | u8 config, u8 ifnum); | ||
22 | struct device *usb_of_get_companion_dev(struct device *dev); | 26 | struct device *usb_of_get_companion_dev(struct device *dev); |
23 | #else | 27 | #else |
24 | static inline enum usb_dr_mode | 28 | static inline enum usb_dr_mode |
@@ -35,8 +39,17 @@ static inline int of_usb_update_otg_caps(struct device_node *np, | |||
35 | { | 39 | { |
36 | return 0; | 40 | return 0; |
37 | } | 41 | } |
38 | static inline struct device_node *usb_of_get_child_node | 42 | static inline struct device_node * |
39 | (struct device_node *parent, int portnum) | 43 | usb_of_get_device_node(struct usb_device *hub, int port1) |
44 | { | ||
45 | return NULL; | ||
46 | } | ||
47 | static inline bool usb_of_has_combined_node(struct usb_device *udev) | ||
48 | { | ||
49 | return false; | ||
50 | } | ||
51 | static inline struct device_node * | ||
52 | usb_of_get_interface_node(struct usb_device *udev, u8 config, u8 ifnum) | ||
40 | { | 53 | { |
41 | return NULL; | 54 | return NULL; |
42 | } | 55 | } |
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index e00051ced806..b3d41d7409b3 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h | |||
@@ -148,6 +148,8 @@ enum pd_pdo_type { | |||
148 | (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ | 148 | (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \ |
149 | PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) | 149 | PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma)) |
150 | 150 | ||
151 | #define VSAFE5V 5000 /* mv units */ | ||
152 | |||
151 | #define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ | 153 | #define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */ |
152 | #define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ | 154 | #define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */ |
153 | #define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ | 155 | #define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */ |
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h index d92259f8de0a..2b64d23ace5c 100644 --- a/include/linux/usb/pd_vdo.h +++ b/include/linux/usb/pd_vdo.h | |||
@@ -65,7 +65,7 @@ | |||
65 | #define CMD_EXIT_MODE 5 | 65 | #define CMD_EXIT_MODE 5 |
66 | #define CMD_ATTENTION 6 | 66 | #define CMD_ATTENTION 6 |
67 | 67 | ||
68 | #define VDO_CMD_VENDOR(x) (((10 + (x)) & 0x1f)) | 68 | #define VDO_CMD_VENDOR(x) (((0x10 + (x)) & 0x1f)) |
69 | 69 | ||
70 | /* ChromeOS specific commands */ | 70 | /* ChromeOS specific commands */ |
71 | #define VDO_CMD_VERSION VDO_CMD_VENDOR(0) | 71 | #define VDO_CMD_VERSION VDO_CMD_VENDOR(0) |
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h index 67102f3d59d4..53924f8e840c 100644 --- a/include/linux/usb/renesas_usbhs.h +++ b/include/linux/usb/renesas_usbhs.h | |||
@@ -17,6 +17,7 @@ | |||
17 | */ | 17 | */ |
18 | #ifndef RENESAS_USB_H | 18 | #ifndef RENESAS_USB_H |
19 | #define RENESAS_USB_H | 19 | #define RENESAS_USB_H |
20 | #include <linux/notifier.h> | ||
20 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
21 | #include <linux/usb/ch9.h> | 22 | #include <linux/usb/ch9.h> |
22 | 23 | ||
@@ -98,6 +99,13 @@ struct renesas_usbhs_platform_callback { | |||
98 | * VBUS control is needed for Host | 99 | * VBUS control is needed for Host |
99 | */ | 100 | */ |
100 | int (*set_vbus)(struct platform_device *pdev, int enable); | 101 | int (*set_vbus)(struct platform_device *pdev, int enable); |
102 | |||
103 | /* | ||
104 | * option: | ||
105 | * extcon notifier to set host/peripheral mode. | ||
106 | */ | ||
107 | int (*notifier)(struct notifier_block *nb, unsigned long event, | ||
108 | void *data); | ||
101 | }; | 109 | }; |
102 | 110 | ||
103 | /* | 111 | /* |
@@ -187,6 +195,7 @@ struct renesas_usbhs_driver_param { | |||
187 | #define USBHS_TYPE_RCAR_GEN2 1 | 195 | #define USBHS_TYPE_RCAR_GEN2 1 |
188 | #define USBHS_TYPE_RCAR_GEN3 2 | 196 | #define USBHS_TYPE_RCAR_GEN3 2 |
189 | #define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 | 197 | #define USBHS_TYPE_RCAR_GEN3_WITH_PLL 3 |
198 | #define USBHS_TYPE_RZA1 4 | ||
190 | 199 | ||
191 | /* | 200 | /* |
192 | * option: | 201 | * option: |
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 073197f0d2bb..ca1c0b57f03f 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h | |||
@@ -183,14 +183,14 @@ struct tcpm_port; | |||
183 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); | 183 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); |
184 | void tcpm_unregister_port(struct tcpm_port *port); | 184 | void tcpm_unregister_port(struct tcpm_port *port); |
185 | 185 | ||
186 | void tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, | 186 | int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, |
187 | unsigned int nr_pdo); | 187 | unsigned int nr_pdo); |
188 | void tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, | 188 | int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, |
189 | unsigned int nr_pdo, | 189 | unsigned int nr_pdo, |
190 | unsigned int max_snk_mv, | 190 | unsigned int max_snk_mv, |
191 | unsigned int max_snk_ma, | 191 | unsigned int max_snk_ma, |
192 | unsigned int max_snk_mw, | 192 | unsigned int max_snk_mw, |
193 | unsigned int operating_snk_mw); | 193 | unsigned int operating_snk_mw); |
194 | 194 | ||
195 | void tcpm_vbus_change(struct tcpm_port *port); | 195 | void tcpm_vbus_change(struct tcpm_port *port); |
196 | void tcpm_cc_change(struct tcpm_port *port); | 196 | void tcpm_cc_change(struct tcpm_port *port); |
diff --git a/include/linux/uuid.h b/include/linux/uuid.h index 33b0bdbb613c..d9c4a6cce3c2 100644 --- a/include/linux/uuid.h +++ b/include/linux/uuid.h | |||
@@ -17,6 +17,7 @@ | |||
17 | #define _LINUX_UUID_H_ | 17 | #define _LINUX_UUID_H_ |
18 | 18 | ||
19 | #include <uapi/linux/uuid.h> | 19 | #include <uapi/linux/uuid.h> |
20 | #include <linux/string.h> | ||
20 | 21 | ||
21 | #define UUID_SIZE 16 | 22 | #define UUID_SIZE 16 |
22 | 23 | ||
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h new file mode 100644 index 000000000000..c71def6b310f --- /dev/null +++ b/include/linux/vbox_utils.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */ | ||
2 | /* Copyright (C) 2006-2016 Oracle Corporation */ | ||
3 | |||
4 | #ifndef __VBOX_UTILS_H__ | ||
5 | #define __VBOX_UTILS_H__ | ||
6 | |||
7 | #include <linux/printk.h> | ||
8 | #include <linux/vbox_vmmdev_types.h> | ||
9 | |||
10 | struct vbg_dev; | ||
11 | |||
12 | /** | ||
13 | * vboxguest logging functions, these log both to the backdoor and call | ||
14 | * the equivalent kernel pr_foo function. | ||
15 | */ | ||
16 | __printf(1, 2) void vbg_info(const char *fmt, ...); | ||
17 | __printf(1, 2) void vbg_warn(const char *fmt, ...); | ||
18 | __printf(1, 2) void vbg_err(const char *fmt, ...); | ||
19 | |||
20 | /* Only use backdoor logging for non-dynamic debug builds */ | ||
21 | #if defined(DEBUG) && !defined(CONFIG_DYNAMIC_DEBUG) | ||
22 | __printf(1, 2) void vbg_debug(const char *fmt, ...); | ||
23 | #else | ||
24 | #define vbg_debug pr_debug | ||
25 | #endif | ||
26 | |||
27 | /** | ||
28 | * Allocate memory for generic request and initialize the request header. | ||
29 | * | ||
30 | * Return: the allocated memory | ||
31 | * @len: Size of memory block required for the request. | ||
32 | * @req_type: The generic request type. | ||
33 | */ | ||
34 | void *vbg_req_alloc(size_t len, enum vmmdev_request_type req_type); | ||
35 | |||
36 | /** | ||
37 | * Perform a generic request. | ||
38 | * | ||
39 | * Return: VBox status code | ||
40 | * @gdev: The Guest extension device. | ||
41 | * @req: Pointer to the request structure. | ||
42 | */ | ||
43 | int vbg_req_perform(struct vbg_dev *gdev, void *req); | ||
44 | |||
45 | int vbg_hgcm_connect(struct vbg_dev *gdev, | ||
46 | struct vmmdev_hgcm_service_location *loc, | ||
47 | u32 *client_id, int *vbox_status); | ||
48 | |||
49 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); | ||
50 | |||
51 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | ||
52 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, | ||
53 | u32 parm_count, int *vbox_status); | ||
54 | |||
55 | int vbg_hgcm_call32( | ||
56 | struct vbg_dev *gdev, u32 client_id, u32 function, u32 timeout_ms, | ||
57 | struct vmmdev_hgcm_function_parameter32 *parm32, u32 parm_count, | ||
58 | int *vbox_status); | ||
59 | |||
60 | /** | ||
61 | * Convert a VirtualBox status code to a standard Linux kernel return value. | ||
62 | * Return: 0 or negative errno value. | ||
63 | * @rc: VirtualBox status code to convert. | ||
64 | */ | ||
65 | int vbg_status_code_to_errno(int rc); | ||
66 | |||
67 | /** | ||
68 | * Helper for the vboxsf driver to get a reference to the guest device. | ||
69 | * Return: a pointer to the gdev; or a ERR_PTR value on error. | ||
70 | */ | ||
71 | struct vbg_dev *vbg_get_gdev(void); | ||
72 | |||
73 | /** | ||
74 | * Helper for the vboxsf driver to put a guest device reference. | ||
75 | * @gdev: Reference returned by vbg_get_gdev to put. | ||
76 | */ | ||
77 | void vbg_put_gdev(struct vbg_dev *gdev); | ||
78 | |||
79 | #endif | ||
diff --git a/include/linux/vfio.h b/include/linux/vfio.h index a47b985341d1..66741ab087c1 100644 --- a/include/linux/vfio.h +++ b/include/linux/vfio.h | |||
@@ -145,7 +145,8 @@ extern struct vfio_info_cap_header *vfio_info_cap_add( | |||
145 | extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); | 145 | extern void vfio_info_cap_shift(struct vfio_info_cap *caps, size_t offset); |
146 | 146 | ||
147 | extern int vfio_info_add_capability(struct vfio_info_cap *caps, | 147 | extern int vfio_info_add_capability(struct vfio_info_cap *caps, |
148 | int cap_type_id, void *cap_type); | 148 | struct vfio_info_cap_header *cap, |
149 | size_t size); | ||
149 | 150 | ||
150 | extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, | 151 | extern int vfio_set_irqs_validate_and_prepare(struct vfio_irq_set *hdr, |
151 | int num_irqs, int max_irq_type, | 152 | int num_irqs, int max_irq_type, |
diff --git a/include/linux/visorbus.h b/include/linux/visorbus.h new file mode 100644 index 000000000000..0d8bd6769b13 --- /dev/null +++ b/include/linux/visorbus.h | |||
@@ -0,0 +1,344 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
2 | /* | ||
3 | * Copyright (C) 2010 - 2013 UNISYS CORPORATION | ||
4 | * All rights reserved. | ||
5 | */ | ||
6 | |||
7 | /* | ||
8 | * This header file is to be included by other kernel mode components that | ||
9 | * implement a particular kind of visor_device. Each of these other kernel | ||
10 | * mode components is called a visor device driver. Refer to visortemplate | ||
11 | * for a minimal sample visor device driver. | ||
12 | * | ||
13 | * There should be nothing in this file that is private to the visorbus | ||
14 | * bus implementation itself. | ||
15 | */ | ||
16 | |||
17 | #ifndef __VISORBUS_H__ | ||
18 | #define __VISORBUS_H__ | ||
19 | |||
20 | #include <linux/device.h> | ||
21 | |||
22 | #define VISOR_CHANNEL_SIGNATURE ('L' << 24 | 'N' << 16 | 'C' << 8 | 'E') | ||
23 | |||
24 | /* | ||
25 | * enum channel_serverstate | ||
26 | * @CHANNELSRV_UNINITIALIZED: Channel is in an undefined state. | ||
27 | * @CHANNELSRV_READY: Channel has been initialized by server. | ||
28 | */ | ||
29 | enum channel_serverstate { | ||
30 | CHANNELSRV_UNINITIALIZED = 0, | ||
31 | CHANNELSRV_READY = 1 | ||
32 | }; | ||
33 | |||
34 | /* | ||
35 | * enum channel_clientstate | ||
36 | * @CHANNELCLI_DETACHED: | ||
37 | * @CHANNELCLI_DISABLED: Client can see channel but is NOT allowed to use it | ||
38 | * unless given TBD* explicit request | ||
39 | * (should actually be < DETACHED). | ||
40 | * @CHANNELCLI_ATTACHING: Legacy EFI client request for EFI server to attach. | ||
41 | * @CHANNELCLI_ATTACHED: Idle, but client may want to use channel any time. | ||
42 | * @CHANNELCLI_BUSY: Client either wants to use or is using channel. | ||
43 | * @CHANNELCLI_OWNED: "No worries" state - client can access channel | ||
44 | * anytime. | ||
45 | */ | ||
46 | enum channel_clientstate { | ||
47 | CHANNELCLI_DETACHED = 0, | ||
48 | CHANNELCLI_DISABLED = 1, | ||
49 | CHANNELCLI_ATTACHING = 2, | ||
50 | CHANNELCLI_ATTACHED = 3, | ||
51 | CHANNELCLI_BUSY = 4, | ||
52 | CHANNELCLI_OWNED = 5 | ||
53 | }; | ||
54 | |||
55 | /* | ||
56 | * Values for VISOR_CHANNEL_PROTOCOL.Features: This define exists so that | ||
57 | * a guest can look at the FeatureFlags in the io channel, and configure the | ||
58 | * driver to use interrupts or not based on this setting. All feature bits for | ||
59 | * all channels should be defined here. The io channel feature bits are defined | ||
60 | * below. | ||
61 | */ | ||
62 | #define VISOR_DRIVER_ENABLES_INTS (0x1ULL << 1) | ||
63 | #define VISOR_CHANNEL_IS_POLLING (0x1ULL << 3) | ||
64 | #define VISOR_IOVM_OK_DRIVER_DISABLING_INTS (0x1ULL << 4) | ||
65 | #define VISOR_DRIVER_DISABLES_INTS (0x1ULL << 5) | ||
66 | #define VISOR_DRIVER_ENHANCED_RCVBUF_CHECKING (0x1ULL << 6) | ||
67 | |||
68 | /* | ||
69 | * struct channel_header - Common Channel Header | ||
70 | * @signature: Signature. | ||
71 | * @legacy_state: DEPRECATED - being replaced by. | ||
72 | * @header_size: sizeof(struct channel_header). | ||
73 | * @size: Total size of this channel in bytes. | ||
74 | * @features: Flags to modify behavior. | ||
75 | * @chtype: Channel type: data, bus, control, etc.. | ||
76 | * @partition_handle: ID of guest partition. | ||
77 | * @handle: Device number of this channel in client. | ||
78 | * @ch_space_offset: Offset in bytes to channel specific area. | ||
79 | * @version_id: Struct channel_header Version ID. | ||
80 | * @partition_index: Index of guest partition. | ||
81 | * @zone_uuid: Guid of Channel's zone. | ||
82 | * @cli_str_offset: Offset from channel header to null-terminated | ||
83 | * ClientString (0 if ClientString not present). | ||
84 | * @cli_state_boot: CHANNEL_CLIENTSTATE of pre-boot EFI client of this | ||
85 | * channel. | ||
86 | * @cmd_state_cli: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see | ||
87 | * ServerStateUp, ServerStateDown, etc). | ||
88 | * @cli_state_os: CHANNEL_CLIENTSTATE of Guest OS client of this channel. | ||
89 | * @ch_characteristic: CHANNEL_CHARACTERISTIC_<xxx>. | ||
90 | * @cmd_state_srv: CHANNEL_COMMANDSTATE (overloaded in Windows drivers, see | ||
91 | * ServerStateUp, ServerStateDown, etc). | ||
92 | * @srv_state: CHANNEL_SERVERSTATE. | ||
93 | * @cli_error_boot: Bits to indicate err states for boot clients, so err | ||
94 | * messages can be throttled. | ||
95 | * @cli_error_os: Bits to indicate err states for OS clients, so err | ||
96 | * messages can be throttled. | ||
97 | * @filler: Pad out to 128 byte cacheline. | ||
98 | * @recover_channel: Please add all new single-byte values below here. | ||
99 | */ | ||
100 | struct channel_header { | ||
101 | u64 signature; | ||
102 | u32 legacy_state; | ||
103 | /* SrvState, CliStateBoot, and CliStateOS below */ | ||
104 | u32 header_size; | ||
105 | u64 size; | ||
106 | u64 features; | ||
107 | guid_t chtype; | ||
108 | u64 partition_handle; | ||
109 | u64 handle; | ||
110 | u64 ch_space_offset; | ||
111 | u32 version_id; | ||
112 | u32 partition_index; | ||
113 | guid_t zone_guid; | ||
114 | u32 cli_str_offset; | ||
115 | u32 cli_state_boot; | ||
116 | u32 cmd_state_cli; | ||
117 | u32 cli_state_os; | ||
118 | u32 ch_characteristic; | ||
119 | u32 cmd_state_srv; | ||
120 | u32 srv_state; | ||
121 | u8 cli_error_boot; | ||
122 | u8 cli_error_os; | ||
123 | u8 filler[1]; | ||
124 | u8 recover_channel; | ||
125 | } __packed; | ||
126 | |||
127 | #define VISOR_CHANNEL_ENABLE_INTS (0x1ULL << 0) | ||
128 | |||
129 | /* | ||
130 | * struct signal_queue_header - Subheader for the Signal Type variation of the | ||
131 | * Common Channel. | ||
132 | * @version: SIGNAL_QUEUE_HEADER Version ID. | ||
133 | * @chtype: Queue type: storage, network. | ||
134 | * @size: Total size of this queue in bytes. | ||
135 | * @sig_base_offset: Offset to signal queue area. | ||
136 | * @features: Flags to modify behavior. | ||
137 | * @num_sent: Total # of signals placed in this queue. | ||
138 | * @num_overflows: Total # of inserts failed due to full queue. | ||
139 | * @signal_size: Total size of a signal for this queue. | ||
140 | * @max_slots: Max # of slots in queue, 1 slot is always empty. | ||
141 | * @max_signals: Max # of signals in queue (MaxSignalSlots-1). | ||
142 | * @head: Queue head signal #. | ||
143 | * @num_received: Total # of signals removed from this queue. | ||
144 | * @tail: Queue tail signal. | ||
145 | * @reserved1: Reserved field. | ||
146 | * @reserved2: Reserved field. | ||
147 | * @client_queue: | ||
148 | * @num_irq_received: Total # of Interrupts received. This is incremented by the | ||
149 | * ISR in the guest windows driver. | ||
150 | * @num_empty: Number of times that visor_signal_remove is called and | ||
151 | * returned Empty Status. | ||
152 | * @errorflags: Error bits set during SignalReinit to denote trouble with | ||
153 | * client's fields. | ||
154 | * @filler: Pad out to 64 byte cacheline. | ||
155 | */ | ||
156 | struct signal_queue_header { | ||
157 | /* 1st cache line */ | ||
158 | u32 version; | ||
159 | u32 chtype; | ||
160 | u64 size; | ||
161 | u64 sig_base_offset; | ||
162 | u64 features; | ||
163 | u64 num_sent; | ||
164 | u64 num_overflows; | ||
165 | u32 signal_size; | ||
166 | u32 max_slots; | ||
167 | u32 max_signals; | ||
168 | u32 head; | ||
169 | /* 2nd cache line */ | ||
170 | u64 num_received; | ||
171 | u32 tail; | ||
172 | u32 reserved1; | ||
173 | u64 reserved2; | ||
174 | u64 client_queue; | ||
175 | u64 num_irq_received; | ||
176 | u64 num_empty; | ||
177 | u32 errorflags; | ||
178 | u8 filler[12]; | ||
179 | } __packed; | ||
180 | |||
181 | /* VISORCHANNEL Guids */ | ||
182 | /* {414815ed-c58c-11da-95a9-00e08161165f} */ | ||
183 | #define VISOR_VHBA_CHANNEL_GUID \ | ||
184 | GUID_INIT(0x414815ed, 0xc58c, 0x11da, \ | ||
185 | 0x95, 0xa9, 0x0, 0xe0, 0x81, 0x61, 0x16, 0x5f) | ||
186 | #define VISOR_VHBA_CHANNEL_GUID_STR \ | ||
187 | "414815ed-c58c-11da-95a9-00e08161165f" | ||
188 | struct visorchipset_state { | ||
189 | u32 created:1; | ||
190 | u32 attached:1; | ||
191 | u32 configured:1; | ||
192 | u32 running:1; | ||
193 | /* Remaining bits in this 32-bit word are reserved. */ | ||
194 | }; | ||
195 | |||
196 | /** | ||
197 | * struct visor_device - A device type for things "plugged" into the visorbus | ||
198 | * bus | ||
199 | * @visorchannel: Points to the channel that the device is | ||
200 | * associated with. | ||
201 | * @channel_type_guid: Identifies the channel type to the bus driver. | ||
202 | * @device: Device struct meant for use by the bus driver | ||
203 | * only. | ||
204 | * @list_all: Used by the bus driver to enumerate devices. | ||
205 | * @timer: Timer fired periodically to do interrupt-type | ||
206 | * activity. | ||
207 | * @being_removed: Indicates that the device is being removed from | ||
208 | * the bus. Private bus driver use only. | ||
209 | * @visordriver_callback_lock: Used by the bus driver to lock when adding and | ||
210 | * removing devices. | ||
211 | * @pausing: Indicates that a change towards a paused state. | ||
212 | * is in progress. Only modified by the bus driver. | ||
213 | * @resuming: Indicates that a change towards a running state | ||
214 | * is in progress. Only modified by the bus driver. | ||
215 | * @chipset_bus_no: Private field used by the bus driver. | ||
216 | * @chipset_dev_no: Private field used the bus driver. | ||
217 | * @state: Used to indicate the current state of the | ||
218 | * device. | ||
219 | * @inst: Unique GUID for this instance of the device. | ||
220 | * @name: Name of the device. | ||
221 | * @pending_msg_hdr: For private use by bus driver to respond to | ||
222 | * hypervisor requests. | ||
223 | * @vbus_hdr_info: A pointer to header info. Private use by bus | ||
224 | * driver. | ||
225 | * @partition_guid: Indicates client partion id. This should be the | ||
226 | * same across all visor_devices in the current | ||
227 | * guest. Private use by bus driver only. | ||
228 | */ | ||
229 | struct visor_device { | ||
230 | struct visorchannel *visorchannel; | ||
231 | guid_t channel_type_guid; | ||
232 | /* These fields are for private use by the bus driver only. */ | ||
233 | struct device device; | ||
234 | struct list_head list_all; | ||
235 | struct timer_list timer; | ||
236 | bool timer_active; | ||
237 | bool being_removed; | ||
238 | struct mutex visordriver_callback_lock; /* synchronize probe/remove */ | ||
239 | bool pausing; | ||
240 | bool resuming; | ||
241 | u32 chipset_bus_no; | ||
242 | u32 chipset_dev_no; | ||
243 | struct visorchipset_state state; | ||
244 | guid_t inst; | ||
245 | u8 *name; | ||
246 | struct controlvm_message_header *pending_msg_hdr; | ||
247 | void *vbus_hdr_info; | ||
248 | guid_t partition_guid; | ||
249 | struct dentry *debugfs_dir; | ||
250 | struct dentry *debugfs_bus_info; | ||
251 | }; | ||
252 | |||
253 | #define to_visor_device(x) container_of(x, struct visor_device, device) | ||
254 | |||
255 | typedef void (*visorbus_state_complete_func) (struct visor_device *dev, | ||
256 | int status); | ||
257 | |||
258 | /* | ||
259 | * This struct describes a specific visor channel, by providing its GUID, name, | ||
260 | * and sizes. | ||
261 | */ | ||
262 | struct visor_channeltype_descriptor { | ||
263 | const guid_t guid; | ||
264 | const char *name; | ||
265 | u64 min_bytes; | ||
266 | u32 version; | ||
267 | }; | ||
268 | |||
269 | /** | ||
270 | * struct visor_driver - Information provided by each visor driver when it | ||
271 | * registers with the visorbus driver | ||
272 | * @name: Name of the visor driver. | ||
273 | * @owner: The module owner. | ||
274 | * @channel_types: Types of channels handled by this driver, ending with | ||
275 | * a zero GUID. Our specialized BUS.match() method knows | ||
276 | * about this list, and uses it to determine whether this | ||
277 | * driver will in fact handle a new device that it has | ||
278 | * detected. | ||
279 | * @probe: Called when a new device comes online, by our probe() | ||
280 | * function specified by driver.probe() (triggered | ||
281 | * ultimately by some call to driver_register(), | ||
282 | * bus_add_driver(), or driver_attach()). | ||
283 | * @remove: Called when a new device is removed, by our remove() | ||
284 | * function specified by driver.remove() (triggered | ||
285 | * ultimately by some call to device_release_driver()). | ||
286 | * @channel_interrupt: Called periodically, whenever there is a possiblity | ||
287 | * that "something interesting" may have happened to the | ||
288 | * channel. | ||
289 | * @pause: Called to initiate a change of the device's state. If | ||
290 | * the return valu`e is < 0, there was an error and the | ||
291 | * state transition will NOT occur. If the return value | ||
292 | * is >= 0, then the state transition was INITIATED | ||
293 | * successfully, and complete_func() will be called (or | ||
294 | * was just called) with the final status when either the | ||
295 | * state transition fails or completes successfully. | ||
296 | * @resume: Behaves similar to pause. | ||
297 | * @driver: Private reference to the device driver. For use by bus | ||
298 | * driver only. | ||
299 | */ | ||
300 | struct visor_driver { | ||
301 | const char *name; | ||
302 | struct module *owner; | ||
303 | struct visor_channeltype_descriptor *channel_types; | ||
304 | int (*probe)(struct visor_device *dev); | ||
305 | void (*remove)(struct visor_device *dev); | ||
306 | void (*channel_interrupt)(struct visor_device *dev); | ||
307 | int (*pause)(struct visor_device *dev, | ||
308 | visorbus_state_complete_func complete_func); | ||
309 | int (*resume)(struct visor_device *dev, | ||
310 | visorbus_state_complete_func complete_func); | ||
311 | |||
312 | /* These fields are for private use by the bus driver only. */ | ||
313 | struct device_driver driver; | ||
314 | }; | ||
315 | |||
316 | #define to_visor_driver(x) (container_of(x, struct visor_driver, driver)) | ||
317 | |||
318 | int visor_check_channel(struct channel_header *ch, struct device *dev, | ||
319 | const guid_t *expected_uuid, char *chname, | ||
320 | u64 expected_min_bytes, u32 expected_version, | ||
321 | u64 expected_signature); | ||
322 | |||
323 | int visorbus_register_visor_driver(struct visor_driver *drv); | ||
324 | void visorbus_unregister_visor_driver(struct visor_driver *drv); | ||
325 | int visorbus_read_channel(struct visor_device *dev, | ||
326 | unsigned long offset, void *dest, | ||
327 | unsigned long nbytes); | ||
328 | int visorbus_write_channel(struct visor_device *dev, | ||
329 | unsigned long offset, void *src, | ||
330 | unsigned long nbytes); | ||
331 | int visorbus_enable_channel_interrupts(struct visor_device *dev); | ||
332 | void visorbus_disable_channel_interrupts(struct visor_device *dev); | ||
333 | |||
334 | int visorchannel_signalremove(struct visorchannel *channel, u32 queue, | ||
335 | void *msg); | ||
336 | int visorchannel_signalinsert(struct visorchannel *channel, u32 queue, | ||
337 | void *msg); | ||
338 | bool visorchannel_signalempty(struct visorchannel *channel, u32 queue); | ||
339 | const guid_t *visorchannel_get_guid(struct visorchannel *channel); | ||
340 | |||
341 | #define BUS_ROOT_DEVICE UINT_MAX | ||
342 | struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no, | ||
343 | struct visor_device *from); | ||
344 | #endif | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 1779c9817b39..a4c2317d8b9f 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -216,23 +216,6 @@ static inline unsigned long zone_page_state_snapshot(struct zone *zone, | |||
216 | return x; | 216 | return x; |
217 | } | 217 | } |
218 | 218 | ||
219 | static inline unsigned long node_page_state_snapshot(pg_data_t *pgdat, | ||
220 | enum node_stat_item item) | ||
221 | { | ||
222 | long x = atomic_long_read(&pgdat->vm_stat[item]); | ||
223 | |||
224 | #ifdef CONFIG_SMP | ||
225 | int cpu; | ||
226 | for_each_online_cpu(cpu) | ||
227 | x += per_cpu_ptr(pgdat->per_cpu_nodestats, cpu)->vm_node_stat_diff[item]; | ||
228 | |||
229 | if (x < 0) | ||
230 | x = 0; | ||
231 | #endif | ||
232 | return x; | ||
233 | } | ||
234 | |||
235 | |||
236 | #ifdef CONFIG_NUMA | 219 | #ifdef CONFIG_NUMA |
237 | extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); | 220 | extern void __inc_numa_state(struct zone *zone, enum numa_stat_item item); |
238 | extern unsigned long sum_zone_node_page_state(int node, | 221 | extern unsigned long sum_zone_node_page_state(int node, |
diff --git a/include/linux/w1-gpio.h b/include/linux/w1-gpio.h index d58594a32324..78901ecd2f95 100644 --- a/include/linux/w1-gpio.h +++ b/include/linux/w1-gpio.h | |||
@@ -10,16 +10,15 @@ | |||
10 | #ifndef _LINUX_W1_GPIO_H | 10 | #ifndef _LINUX_W1_GPIO_H |
11 | #define _LINUX_W1_GPIO_H | 11 | #define _LINUX_W1_GPIO_H |
12 | 12 | ||
13 | struct gpio_desc; | ||
14 | |||
13 | /** | 15 | /** |
14 | * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio | 16 | * struct w1_gpio_platform_data - Platform-dependent data for w1-gpio |
15 | * @pin: GPIO pin to use | ||
16 | * @is_open_drain: GPIO pin is configured as open drain | ||
17 | */ | 17 | */ |
18 | struct w1_gpio_platform_data { | 18 | struct w1_gpio_platform_data { |
19 | unsigned int pin; | 19 | struct gpio_desc *gpiod; |
20 | unsigned int is_open_drain:1; | 20 | struct gpio_desc *pullup_gpiod; |
21 | void (*enable_external_pullup)(int enable); | 21 | void (*enable_external_pullup)(int enable); |
22 | unsigned int ext_pullup_enable_pin; | ||
23 | unsigned int pullup_duration; | 22 | unsigned int pullup_duration; |
24 | }; | 23 | }; |
25 | 24 | ||
diff --git a/include/linux/wait.h b/include/linux/wait.h index 158715445ffb..55a611486bac 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
@@ -206,14 +206,16 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr); | |||
206 | /* | 206 | /* |
207 | * Wakeup macros to be used to report events to the targets. | 207 | * Wakeup macros to be used to report events to the targets. |
208 | */ | 208 | */ |
209 | #define poll_to_key(m) ((void *)(__force uintptr_t)(__poll_t)(m)) | ||
210 | #define key_to_poll(m) ((__force __poll_t)(uintptr_t)(void *)(m)) | ||
209 | #define wake_up_poll(x, m) \ | 211 | #define wake_up_poll(x, m) \ |
210 | __wake_up(x, TASK_NORMAL, 1, (void *) (m)) | 212 | __wake_up(x, TASK_NORMAL, 1, poll_to_key(m)) |
211 | #define wake_up_locked_poll(x, m) \ | 213 | #define wake_up_locked_poll(x, m) \ |
212 | __wake_up_locked_key((x), TASK_NORMAL, (void *) (m)) | 214 | __wake_up_locked_key((x), TASK_NORMAL, poll_to_key(m)) |
213 | #define wake_up_interruptible_poll(x, m) \ | 215 | #define wake_up_interruptible_poll(x, m) \ |
214 | __wake_up(x, TASK_INTERRUPTIBLE, 1, (void *) (m)) | 216 | __wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
215 | #define wake_up_interruptible_sync_poll(x, m) \ | 217 | #define wake_up_interruptible_sync_poll(x, m) \ |
216 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, (void *) (m)) | 218 | __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m)) |
217 | 219 | ||
218 | #define ___wait_cond_timeout(condition) \ | 220 | #define ___wait_cond_timeout(condition) \ |
219 | ({ \ | 221 | ({ \ |
diff --git a/include/linux/zpool.h b/include/linux/zpool.h index 004ba807df96..7238865e75b0 100644 --- a/include/linux/zpool.h +++ b/include/linux/zpool.h | |||
@@ -108,4 +108,6 @@ void zpool_register_driver(struct zpool_driver *driver); | |||
108 | 108 | ||
109 | int zpool_unregister_driver(struct zpool_driver *driver); | 109 | int zpool_unregister_driver(struct zpool_driver *driver); |
110 | 110 | ||
111 | bool zpool_evictable(struct zpool *pool); | ||
112 | |||
111 | #endif | 113 | #endif |