diff options
Diffstat (limited to 'include/linux')
376 files changed, 10381 insertions, 4378 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 87715f20b69a..d5dcebd7aad3 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h | |||
| @@ -400,12 +400,17 @@ extern bool acpi_osi_is_win8(void); | |||
| 400 | 400 | ||
| 401 | #ifdef CONFIG_ACPI_NUMA | 401 | #ifdef CONFIG_ACPI_NUMA |
| 402 | int acpi_map_pxm_to_online_node(int pxm); | 402 | int acpi_map_pxm_to_online_node(int pxm); |
| 403 | int acpi_map_pxm_to_node(int pxm); | ||
| 403 | int acpi_get_node(acpi_handle handle); | 404 | int acpi_get_node(acpi_handle handle); |
| 404 | #else | 405 | #else |
| 405 | static inline int acpi_map_pxm_to_online_node(int pxm) | 406 | static inline int acpi_map_pxm_to_online_node(int pxm) |
| 406 | { | 407 | { |
| 407 | return 0; | 408 | return 0; |
| 408 | } | 409 | } |
| 410 | static inline int acpi_map_pxm_to_node(int pxm) | ||
| 411 | { | ||
| 412 | return 0; | ||
| 413 | } | ||
| 409 | static inline int acpi_get_node(acpi_handle handle) | 414 | static inline int acpi_get_node(acpi_handle handle) |
| 410 | { | 415 | { |
| 411 | return 0; | 416 | return 0; |
| @@ -953,9 +958,6 @@ acpi_handle_printk(const char *level, void *handle, const char *fmt, ...) {} | |||
| 953 | #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) | 958 | #if defined(CONFIG_ACPI) && defined(CONFIG_DYNAMIC_DEBUG) |
| 954 | __printf(3, 4) | 959 | __printf(3, 4) |
| 955 | void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); | 960 | void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const char *fmt, ...); |
| 956 | #else | ||
| 957 | #define __acpi_handle_debug(descriptor, handle, fmt, ...) \ | ||
| 958 | acpi_handle_printk(KERN_DEBUG, handle, fmt, ##__VA_ARGS__); | ||
| 959 | #endif | 961 | #endif |
| 960 | 962 | ||
| 961 | /* | 963 | /* |
| @@ -985,12 +987,8 @@ void __acpi_handle_debug(struct _ddebug *descriptor, acpi_handle handle, const c | |||
| 985 | #else | 987 | #else |
| 986 | #if defined(CONFIG_DYNAMIC_DEBUG) | 988 | #if defined(CONFIG_DYNAMIC_DEBUG) |
| 987 | #define acpi_handle_debug(handle, fmt, ...) \ | 989 | #define acpi_handle_debug(handle, fmt, ...) \ |
| 988 | do { \ | 990 | _dynamic_func_call(fmt, __acpi_handle_debug, \ |
| 989 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 991 | handle, pr_fmt(fmt), ##__VA_ARGS__) |
| 990 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT)) \ | ||
| 991 | __acpi_handle_debug(&descriptor, handle, pr_fmt(fmt), \ | ||
| 992 | ##__VA_ARGS__); \ | ||
| 993 | } while (0) | ||
| 994 | #else | 992 | #else |
| 995 | #define acpi_handle_debug(handle, fmt, ...) \ | 993 | #define acpi_handle_debug(handle, fmt, ...) \ |
| 996 | ({ \ | 994 | ({ \ |
| @@ -1014,6 +1012,13 @@ struct acpi_gpio_mapping { | |||
| 1014 | 1012 | ||
| 1015 | /* Ignore IoRestriction field */ | 1013 | /* Ignore IoRestriction field */ |
| 1016 | #define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) | 1014 | #define ACPI_GPIO_QUIRK_NO_IO_RESTRICTION BIT(0) |
| 1015 | /* | ||
| 1016 | * When ACPI GPIO mapping table is in use the index parameter inside it | ||
| 1017 | * refers to the GPIO resource in _CRS method. That index has no | ||
| 1018 | * distinction of actual type of the resource. When consumer wants to | ||
| 1019 | * get GpioIo type explicitly, this quirk may be used. | ||
| 1020 | */ | ||
| 1021 | #define ACPI_GPIO_QUIRK_ONLY_GPIOIO BIT(1) | ||
| 1017 | 1022 | ||
| 1018 | unsigned int quirks; | 1023 | unsigned int quirks; |
| 1019 | }; | 1024 | }; |
| @@ -1061,17 +1066,6 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index) | |||
| 1061 | } | 1066 | } |
| 1062 | #endif | 1067 | #endif |
| 1063 | 1068 | ||
| 1064 | #if defined(CONFIG_ACPI) && IS_ENABLED(CONFIG_I2C) | ||
| 1065 | bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, | ||
| 1066 | struct acpi_resource_i2c_serialbus **i2c); | ||
| 1067 | #else | ||
| 1068 | static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, | ||
| 1069 | struct acpi_resource_i2c_serialbus **i2c) | ||
| 1070 | { | ||
| 1071 | return false; | ||
| 1072 | } | ||
| 1073 | #endif | ||
| 1074 | |||
| 1075 | /* Device properties */ | 1069 | /* Device properties */ |
| 1076 | 1070 | ||
| 1077 | #ifdef CONFIG_ACPI | 1071 | #ifdef CONFIG_ACPI |
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h index d143c13bed26..f99b74a6e4ca 100644 --- a/include/linux/amba/bus.h +++ b/include/linux/amba/bus.h | |||
| @@ -25,6 +25,43 @@ | |||
| 25 | #define AMBA_CID 0xb105f00d | 25 | #define AMBA_CID 0xb105f00d |
| 26 | #define CORESIGHT_CID 0xb105900d | 26 | #define CORESIGHT_CID 0xb105900d |
| 27 | 27 | ||
| 28 | /* | ||
| 29 | * CoreSight Architecture specification updates the ID specification | ||
| 30 | * for components on the AMBA bus. (ARM IHI 0029E) | ||
| 31 | * | ||
| 32 | * Bits 15:12 of the CID are the device class. | ||
| 33 | * | ||
| 34 | * Class 0xF remains for PrimeCell and legacy components. (AMBA_CID above) | ||
| 35 | * Class 0x9 defines the component as CoreSight (CORESIGHT_CID above) | ||
| 36 | * Class 0x0, 0x1, 0xB, 0xE define components that do not have driver support | ||
| 37 | * at present. | ||
| 38 | * Class 0x2-0x8,0xA and 0xD-0xD are presently reserved. | ||
| 39 | * | ||
| 40 | * Remaining CID bits stay as 0xb105-00d | ||
| 41 | */ | ||
| 42 | |||
| 43 | /** | ||
| 44 | * Class 0x9 components use additional values to form a Unique Component | ||
| 45 | * Identifier (UCI), where peripheral ID values are identical for different | ||
| 46 | * components. Passed to the amba bus code from the component driver via | ||
| 47 | * the amba_id->data pointer. | ||
| 48 | * @devarch : coresight devarch register value | ||
| 49 | * @devarch_mask: mask bits used for matching. 0 indicates UCI not used. | ||
| 50 | * @devtype : coresight device type value | ||
| 51 | * @data : additional driver data. As we have usurped the original | ||
| 52 | * pointer some devices may still need additional data | ||
| 53 | */ | ||
| 54 | struct amba_cs_uci_id { | ||
| 55 | unsigned int devarch; | ||
| 56 | unsigned int devarch_mask; | ||
| 57 | unsigned int devtype; | ||
| 58 | void *data; | ||
| 59 | }; | ||
| 60 | |||
| 61 | /* define offsets for registers used by UCI */ | ||
| 62 | #define UCI_REG_DEVTYPE_OFFSET 0xFCC | ||
| 63 | #define UCI_REG_DEVARCH_OFFSET 0xFBC | ||
| 64 | |||
| 28 | struct clk; | 65 | struct clk; |
| 29 | 66 | ||
| 30 | struct amba_device { | 67 | struct amba_device { |
| @@ -32,6 +69,8 @@ struct amba_device { | |||
| 32 | struct resource res; | 69 | struct resource res; |
| 33 | struct clk *pclk; | 70 | struct clk *pclk; |
| 34 | unsigned int periphid; | 71 | unsigned int periphid; |
| 72 | unsigned int cid; | ||
| 73 | struct amba_cs_uci_id uci; | ||
| 35 | unsigned int irq[AMBA_NR_IRQS]; | 74 | unsigned int irq[AMBA_NR_IRQS]; |
| 36 | char *driver_override; | 75 | char *driver_override; |
| 37 | }; | 76 | }; |
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h index 942afbd544b7..3305ea7f9dc7 100644 --- a/include/linux/arm_sdei.h +++ b/include/linux/arm_sdei.h | |||
| @@ -11,7 +11,11 @@ enum sdei_conduit_types { | |||
| 11 | CONDUIT_HVC, | 11 | CONDUIT_HVC, |
| 12 | }; | 12 | }; |
| 13 | 13 | ||
| 14 | #include <acpi/ghes.h> | ||
| 15 | |||
| 16 | #ifdef CONFIG_ARM_SDE_INTERFACE | ||
| 14 | #include <asm/sdei.h> | 17 | #include <asm/sdei.h> |
| 18 | #endif | ||
| 15 | 19 | ||
| 16 | /* Arch code should override this to set the entry point from firmware... */ | 20 | /* Arch code should override this to set the entry point from firmware... */ |
| 17 | #ifndef sdei_arch_get_entry_point | 21 | #ifndef sdei_arch_get_entry_point |
| @@ -39,6 +43,11 @@ int sdei_event_unregister(u32 event_num); | |||
| 39 | int sdei_event_enable(u32 event_num); | 43 | int sdei_event_enable(u32 event_num); |
| 40 | int sdei_event_disable(u32 event_num); | 44 | int sdei_event_disable(u32 event_num); |
| 41 | 45 | ||
| 46 | /* GHES register/unregister helpers */ | ||
| 47 | int sdei_register_ghes(struct ghes *ghes, sdei_event_callback *normal_cb, | ||
| 48 | sdei_event_callback *critical_cb); | ||
| 49 | int sdei_unregister_ghes(struct ghes *ghes); | ||
| 50 | |||
| 42 | #ifdef CONFIG_ARM_SDE_INTERFACE | 51 | #ifdef CONFIG_ARM_SDE_INTERFACE |
| 43 | /* For use by arch code when CPU hotplug notifiers are not appropriate. */ | 52 | /* For use by arch code when CPU hotplug notifiers are not appropriate. */ |
| 44 | int sdei_mask_local_cpu(void); | 53 | int sdei_mask_local_cpu(void); |
diff --git a/include/linux/async.h b/include/linux/async.h index 6b0226bdaadc..f81d6dbffe68 100644 --- a/include/linux/async.h +++ b/include/linux/async.h | |||
| @@ -14,6 +14,8 @@ | |||
| 14 | 14 | ||
| 15 | #include <linux/types.h> | 15 | #include <linux/types.h> |
| 16 | #include <linux/list.h> | 16 | #include <linux/list.h> |
| 17 | #include <linux/numa.h> | ||
| 18 | #include <linux/device.h> | ||
| 17 | 19 | ||
| 18 | typedef u64 async_cookie_t; | 20 | typedef u64 async_cookie_t; |
| 19 | typedef void (*async_func_t) (void *data, async_cookie_t cookie); | 21 | typedef void (*async_func_t) (void *data, async_cookie_t cookie); |
| @@ -37,9 +39,83 @@ struct async_domain { | |||
| 37 | struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ | 39 | struct async_domain _name = { .pending = LIST_HEAD_INIT(_name.pending), \ |
| 38 | .registered = 0 } | 40 | .registered = 0 } |
| 39 | 41 | ||
| 40 | extern async_cookie_t async_schedule(async_func_t func, void *data); | 42 | async_cookie_t async_schedule_node(async_func_t func, void *data, |
| 41 | extern async_cookie_t async_schedule_domain(async_func_t func, void *data, | 43 | int node); |
| 42 | struct async_domain *domain); | 44 | async_cookie_t async_schedule_node_domain(async_func_t func, void *data, |
| 45 | int node, | ||
| 46 | struct async_domain *domain); | ||
| 47 | |||
| 48 | /** | ||
| 49 | * async_schedule - schedule a function for asynchronous execution | ||
| 50 | * @func: function to execute asynchronously | ||
| 51 | * @data: data pointer to pass to the function | ||
| 52 | * | ||
| 53 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 54 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 55 | */ | ||
| 56 | static inline async_cookie_t async_schedule(async_func_t func, void *data) | ||
| 57 | { | ||
| 58 | return async_schedule_node(func, data, NUMA_NO_NODE); | ||
| 59 | } | ||
| 60 | |||
| 61 | /** | ||
| 62 | * async_schedule_domain - schedule a function for asynchronous execution within a certain domain | ||
| 63 | * @func: function to execute asynchronously | ||
| 64 | * @data: data pointer to pass to the function | ||
| 65 | * @domain: the domain | ||
| 66 | * | ||
| 67 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 68 | * @domain may be used in the async_synchronize_*_domain() functions to | ||
| 69 | * wait within a certain synchronization domain rather than globally. | ||
| 70 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 71 | */ | ||
| 72 | static inline async_cookie_t | ||
| 73 | async_schedule_domain(async_func_t func, void *data, | ||
| 74 | struct async_domain *domain) | ||
| 75 | { | ||
| 76 | return async_schedule_node_domain(func, data, NUMA_NO_NODE, domain); | ||
| 77 | } | ||
| 78 | |||
| 79 | /** | ||
| 80 | * async_schedule_dev - A device specific version of async_schedule | ||
| 81 | * @func: function to execute asynchronously | ||
| 82 | * @dev: device argument to be passed to function | ||
| 83 | * | ||
| 84 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 85 | * @dev is used as both the argument for the function and to provide NUMA | ||
| 86 | * context for where to run the function. By doing this we can try to | ||
| 87 | * provide for the best possible outcome by operating on the device on the | ||
| 88 | * CPUs closest to the device. | ||
| 89 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 90 | */ | ||
| 91 | static inline async_cookie_t | ||
| 92 | async_schedule_dev(async_func_t func, struct device *dev) | ||
| 93 | { | ||
| 94 | return async_schedule_node(func, dev, dev_to_node(dev)); | ||
| 95 | } | ||
| 96 | |||
| 97 | /** | ||
| 98 | * async_schedule_dev_domain - A device specific version of async_schedule_domain | ||
| 99 | * @func: function to execute asynchronously | ||
| 100 | * @dev: device argument to be passed to function | ||
| 101 | * @domain: the domain | ||
| 102 | * | ||
| 103 | * Returns an async_cookie_t that may be used for checkpointing later. | ||
| 104 | * @dev is used as both the argument for the function and to provide NUMA | ||
| 105 | * context for where to run the function. By doing this we can try to | ||
| 106 | * provide for the best possible outcome by operating on the device on the | ||
| 107 | * CPUs closest to the device. | ||
| 108 | * @domain may be used in the async_synchronize_*_domain() functions to | ||
| 109 | * wait within a certain synchronization domain rather than globally. | ||
| 110 | * Note: This function may be called from atomic or non-atomic contexts. | ||
| 111 | */ | ||
| 112 | static inline async_cookie_t | ||
| 113 | async_schedule_dev_domain(async_func_t func, struct device *dev, | ||
| 114 | struct async_domain *domain) | ||
| 115 | { | ||
| 116 | return async_schedule_node_domain(func, dev, dev_to_node(dev), domain); | ||
| 117 | } | ||
| 118 | |||
| 43 | void async_unregister_domain(struct async_domain *domain); | 119 | void async_unregister_domain(struct async_domain *domain); |
| 44 | extern void async_synchronize_full(void); | 120 | extern void async_synchronize_full(void); |
| 45 | extern void async_synchronize_full_domain(struct async_domain *domain); | 121 | extern void async_synchronize_full_domain(struct async_domain *domain); |
diff --git a/include/linux/ata_platform.h b/include/linux/ata_platform.h index ff2120215dec..9cafec92282d 100644 --- a/include/linux/ata_platform.h +++ b/include/linux/ata_platform.h | |||
| @@ -19,7 +19,8 @@ extern int __pata_platform_probe(struct device *dev, | |||
| 19 | struct resource *irq_res, | 19 | struct resource *irq_res, |
| 20 | unsigned int ioport_shift, | 20 | unsigned int ioport_shift, |
| 21 | int __pio_mask, | 21 | int __pio_mask, |
| 22 | struct scsi_host_template *sht); | 22 | struct scsi_host_template *sht, |
| 23 | bool use16bit); | ||
| 23 | 24 | ||
| 24 | /* | 25 | /* |
| 25 | * Marvell SATA private data | 26 | * Marvell SATA private data |
diff --git a/include/linux/atalk.h b/include/linux/atalk.h index 23f805562f4e..f6034ba774be 100644 --- a/include/linux/atalk.h +++ b/include/linux/atalk.h | |||
| @@ -108,7 +108,7 @@ static __inline__ struct elapaarp *aarp_hdr(struct sk_buff *skb) | |||
| 108 | #define AARP_RESOLVE_TIME (10 * HZ) | 108 | #define AARP_RESOLVE_TIME (10 * HZ) |
| 109 | 109 | ||
| 110 | extern struct datalink_proto *ddp_dl, *aarp_dl; | 110 | extern struct datalink_proto *ddp_dl, *aarp_dl; |
| 111 | extern void aarp_proto_init(void); | 111 | extern int aarp_proto_init(void); |
| 112 | 112 | ||
| 113 | /* Inter module exports */ | 113 | /* Inter module exports */ |
| 114 | 114 | ||
| @@ -158,19 +158,29 @@ extern int sysctl_aarp_retransmit_limit; | |||
| 158 | extern int sysctl_aarp_resolve_time; | 158 | extern int sysctl_aarp_resolve_time; |
| 159 | 159 | ||
| 160 | #ifdef CONFIG_SYSCTL | 160 | #ifdef CONFIG_SYSCTL |
| 161 | extern void atalk_register_sysctl(void); | 161 | extern int atalk_register_sysctl(void); |
| 162 | extern void atalk_unregister_sysctl(void); | 162 | extern void atalk_unregister_sysctl(void); |
| 163 | #else | 163 | #else |
| 164 | #define atalk_register_sysctl() do { } while(0) | 164 | static inline int atalk_register_sysctl(void) |
| 165 | #define atalk_unregister_sysctl() do { } while(0) | 165 | { |
| 166 | return 0; | ||
| 167 | } | ||
| 168 | static inline void atalk_unregister_sysctl(void) | ||
| 169 | { | ||
| 170 | } | ||
| 166 | #endif | 171 | #endif |
| 167 | 172 | ||
| 168 | #ifdef CONFIG_PROC_FS | 173 | #ifdef CONFIG_PROC_FS |
| 169 | extern int atalk_proc_init(void); | 174 | extern int atalk_proc_init(void); |
| 170 | extern void atalk_proc_exit(void); | 175 | extern void atalk_proc_exit(void); |
| 171 | #else | 176 | #else |
| 172 | #define atalk_proc_init() ({ 0; }) | 177 | static inline int atalk_proc_init(void) |
| 173 | #define atalk_proc_exit() do { } while(0) | 178 | { |
| 179 | return 0; | ||
| 180 | } | ||
| 181 | static inline void atalk_proc_exit(void) | ||
| 182 | { | ||
| 183 | } | ||
| 174 | #endif /* CONFIG_PROC_FS */ | 184 | #endif /* CONFIG_PROC_FS */ |
| 175 | 185 | ||
| 176 | #endif /* __LINUX_ATALK_H__ */ | 186 | #endif /* __LINUX_ATALK_H__ */ |
diff --git a/include/linux/atomic-fallback.h b/include/linux/atomic-fallback.h new file mode 100644 index 000000000000..a7d240e465c0 --- /dev/null +++ b/include/linux/atomic-fallback.h | |||
| @@ -0,0 +1,2295 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | ||
| 2 | |||
| 3 | // Generated by scripts/atomic/gen-atomic-fallback.sh | ||
| 4 | // DO NOT MODIFY THIS FILE DIRECTLY | ||
| 5 | |||
| 6 | #ifndef _LINUX_ATOMIC_FALLBACK_H | ||
| 7 | #define _LINUX_ATOMIC_FALLBACK_H | ||
| 8 | |||
| 9 | #ifndef xchg_relaxed | ||
| 10 | #define xchg_relaxed xchg | ||
| 11 | #define xchg_acquire xchg | ||
| 12 | #define xchg_release xchg | ||
| 13 | #else /* xchg_relaxed */ | ||
| 14 | |||
| 15 | #ifndef xchg_acquire | ||
| 16 | #define xchg_acquire(...) \ | ||
| 17 | __atomic_op_acquire(xchg, __VA_ARGS__) | ||
| 18 | #endif | ||
| 19 | |||
| 20 | #ifndef xchg_release | ||
| 21 | #define xchg_release(...) \ | ||
| 22 | __atomic_op_release(xchg, __VA_ARGS__) | ||
| 23 | #endif | ||
| 24 | |||
| 25 | #ifndef xchg | ||
| 26 | #define xchg(...) \ | ||
| 27 | __atomic_op_fence(xchg, __VA_ARGS__) | ||
| 28 | #endif | ||
| 29 | |||
| 30 | #endif /* xchg_relaxed */ | ||
| 31 | |||
| 32 | #ifndef cmpxchg_relaxed | ||
| 33 | #define cmpxchg_relaxed cmpxchg | ||
| 34 | #define cmpxchg_acquire cmpxchg | ||
| 35 | #define cmpxchg_release cmpxchg | ||
| 36 | #else /* cmpxchg_relaxed */ | ||
| 37 | |||
| 38 | #ifndef cmpxchg_acquire | ||
| 39 | #define cmpxchg_acquire(...) \ | ||
| 40 | __atomic_op_acquire(cmpxchg, __VA_ARGS__) | ||
| 41 | #endif | ||
| 42 | |||
| 43 | #ifndef cmpxchg_release | ||
| 44 | #define cmpxchg_release(...) \ | ||
| 45 | __atomic_op_release(cmpxchg, __VA_ARGS__) | ||
| 46 | #endif | ||
| 47 | |||
| 48 | #ifndef cmpxchg | ||
| 49 | #define cmpxchg(...) \ | ||
| 50 | __atomic_op_fence(cmpxchg, __VA_ARGS__) | ||
| 51 | #endif | ||
| 52 | |||
| 53 | #endif /* cmpxchg_relaxed */ | ||
| 54 | |||
| 55 | #ifndef cmpxchg64_relaxed | ||
| 56 | #define cmpxchg64_relaxed cmpxchg64 | ||
| 57 | #define cmpxchg64_acquire cmpxchg64 | ||
| 58 | #define cmpxchg64_release cmpxchg64 | ||
| 59 | #else /* cmpxchg64_relaxed */ | ||
| 60 | |||
| 61 | #ifndef cmpxchg64_acquire | ||
| 62 | #define cmpxchg64_acquire(...) \ | ||
| 63 | __atomic_op_acquire(cmpxchg64, __VA_ARGS__) | ||
| 64 | #endif | ||
| 65 | |||
| 66 | #ifndef cmpxchg64_release | ||
| 67 | #define cmpxchg64_release(...) \ | ||
| 68 | __atomic_op_release(cmpxchg64, __VA_ARGS__) | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #ifndef cmpxchg64 | ||
| 72 | #define cmpxchg64(...) \ | ||
| 73 | __atomic_op_fence(cmpxchg64, __VA_ARGS__) | ||
| 74 | #endif | ||
| 75 | |||
| 76 | #endif /* cmpxchg64_relaxed */ | ||
| 77 | |||
| 78 | #ifndef atomic_read_acquire | ||
| 79 | static inline int | ||
| 80 | atomic_read_acquire(const atomic_t *v) | ||
| 81 | { | ||
| 82 | return smp_load_acquire(&(v)->counter); | ||
| 83 | } | ||
| 84 | #define atomic_read_acquire atomic_read_acquire | ||
| 85 | #endif | ||
| 86 | |||
| 87 | #ifndef atomic_set_release | ||
| 88 | static inline void | ||
| 89 | atomic_set_release(atomic_t *v, int i) | ||
| 90 | { | ||
| 91 | smp_store_release(&(v)->counter, i); | ||
| 92 | } | ||
| 93 | #define atomic_set_release atomic_set_release | ||
| 94 | #endif | ||
| 95 | |||
| 96 | #ifndef atomic_add_return_relaxed | ||
| 97 | #define atomic_add_return_acquire atomic_add_return | ||
| 98 | #define atomic_add_return_release atomic_add_return | ||
| 99 | #define atomic_add_return_relaxed atomic_add_return | ||
| 100 | #else /* atomic_add_return_relaxed */ | ||
| 101 | |||
| 102 | #ifndef atomic_add_return_acquire | ||
| 103 | static inline int | ||
| 104 | atomic_add_return_acquire(int i, atomic_t *v) | ||
| 105 | { | ||
| 106 | int ret = atomic_add_return_relaxed(i, v); | ||
| 107 | __atomic_acquire_fence(); | ||
| 108 | return ret; | ||
| 109 | } | ||
| 110 | #define atomic_add_return_acquire atomic_add_return_acquire | ||
| 111 | #endif | ||
| 112 | |||
| 113 | #ifndef atomic_add_return_release | ||
| 114 | static inline int | ||
| 115 | atomic_add_return_release(int i, atomic_t *v) | ||
| 116 | { | ||
| 117 | __atomic_release_fence(); | ||
| 118 | return atomic_add_return_relaxed(i, v); | ||
| 119 | } | ||
| 120 | #define atomic_add_return_release atomic_add_return_release | ||
| 121 | #endif | ||
| 122 | |||
| 123 | #ifndef atomic_add_return | ||
| 124 | static inline int | ||
| 125 | atomic_add_return(int i, atomic_t *v) | ||
| 126 | { | ||
| 127 | int ret; | ||
| 128 | __atomic_pre_full_fence(); | ||
| 129 | ret = atomic_add_return_relaxed(i, v); | ||
| 130 | __atomic_post_full_fence(); | ||
| 131 | return ret; | ||
| 132 | } | ||
| 133 | #define atomic_add_return atomic_add_return | ||
| 134 | #endif | ||
| 135 | |||
| 136 | #endif /* atomic_add_return_relaxed */ | ||
| 137 | |||
| 138 | #ifndef atomic_fetch_add_relaxed | ||
| 139 | #define atomic_fetch_add_acquire atomic_fetch_add | ||
| 140 | #define atomic_fetch_add_release atomic_fetch_add | ||
| 141 | #define atomic_fetch_add_relaxed atomic_fetch_add | ||
| 142 | #else /* atomic_fetch_add_relaxed */ | ||
| 143 | |||
| 144 | #ifndef atomic_fetch_add_acquire | ||
| 145 | static inline int | ||
| 146 | atomic_fetch_add_acquire(int i, atomic_t *v) | ||
| 147 | { | ||
| 148 | int ret = atomic_fetch_add_relaxed(i, v); | ||
| 149 | __atomic_acquire_fence(); | ||
| 150 | return ret; | ||
| 151 | } | ||
| 152 | #define atomic_fetch_add_acquire atomic_fetch_add_acquire | ||
| 153 | #endif | ||
| 154 | |||
| 155 | #ifndef atomic_fetch_add_release | ||
| 156 | static inline int | ||
| 157 | atomic_fetch_add_release(int i, atomic_t *v) | ||
| 158 | { | ||
| 159 | __atomic_release_fence(); | ||
| 160 | return atomic_fetch_add_relaxed(i, v); | ||
| 161 | } | ||
| 162 | #define atomic_fetch_add_release atomic_fetch_add_release | ||
| 163 | #endif | ||
| 164 | |||
| 165 | #ifndef atomic_fetch_add | ||
| 166 | static inline int | ||
| 167 | atomic_fetch_add(int i, atomic_t *v) | ||
| 168 | { | ||
| 169 | int ret; | ||
| 170 | __atomic_pre_full_fence(); | ||
| 171 | ret = atomic_fetch_add_relaxed(i, v); | ||
| 172 | __atomic_post_full_fence(); | ||
| 173 | return ret; | ||
| 174 | } | ||
| 175 | #define atomic_fetch_add atomic_fetch_add | ||
| 176 | #endif | ||
| 177 | |||
| 178 | #endif /* atomic_fetch_add_relaxed */ | ||
| 179 | |||
| 180 | #ifndef atomic_sub_return_relaxed | ||
| 181 | #define atomic_sub_return_acquire atomic_sub_return | ||
| 182 | #define atomic_sub_return_release atomic_sub_return | ||
| 183 | #define atomic_sub_return_relaxed atomic_sub_return | ||
| 184 | #else /* atomic_sub_return_relaxed */ | ||
| 185 | |||
| 186 | #ifndef atomic_sub_return_acquire | ||
| 187 | static inline int | ||
| 188 | atomic_sub_return_acquire(int i, atomic_t *v) | ||
| 189 | { | ||
| 190 | int ret = atomic_sub_return_relaxed(i, v); | ||
| 191 | __atomic_acquire_fence(); | ||
| 192 | return ret; | ||
| 193 | } | ||
| 194 | #define atomic_sub_return_acquire atomic_sub_return_acquire | ||
| 195 | #endif | ||
| 196 | |||
| 197 | #ifndef atomic_sub_return_release | ||
| 198 | static inline int | ||
| 199 | atomic_sub_return_release(int i, atomic_t *v) | ||
| 200 | { | ||
| 201 | __atomic_release_fence(); | ||
| 202 | return atomic_sub_return_relaxed(i, v); | ||
| 203 | } | ||
| 204 | #define atomic_sub_return_release atomic_sub_return_release | ||
| 205 | #endif | ||
| 206 | |||
| 207 | #ifndef atomic_sub_return | ||
| 208 | static inline int | ||
| 209 | atomic_sub_return(int i, atomic_t *v) | ||
| 210 | { | ||
| 211 | int ret; | ||
| 212 | __atomic_pre_full_fence(); | ||
| 213 | ret = atomic_sub_return_relaxed(i, v); | ||
| 214 | __atomic_post_full_fence(); | ||
| 215 | return ret; | ||
| 216 | } | ||
| 217 | #define atomic_sub_return atomic_sub_return | ||
| 218 | #endif | ||
| 219 | |||
| 220 | #endif /* atomic_sub_return_relaxed */ | ||
| 221 | |||
| 222 | #ifndef atomic_fetch_sub_relaxed | ||
| 223 | #define atomic_fetch_sub_acquire atomic_fetch_sub | ||
| 224 | #define atomic_fetch_sub_release atomic_fetch_sub | ||
| 225 | #define atomic_fetch_sub_relaxed atomic_fetch_sub | ||
| 226 | #else /* atomic_fetch_sub_relaxed */ | ||
| 227 | |||
| 228 | #ifndef atomic_fetch_sub_acquire | ||
| 229 | static inline int | ||
| 230 | atomic_fetch_sub_acquire(int i, atomic_t *v) | ||
| 231 | { | ||
| 232 | int ret = atomic_fetch_sub_relaxed(i, v); | ||
| 233 | __atomic_acquire_fence(); | ||
| 234 | return ret; | ||
| 235 | } | ||
| 236 | #define atomic_fetch_sub_acquire atomic_fetch_sub_acquire | ||
| 237 | #endif | ||
| 238 | |||
| 239 | #ifndef atomic_fetch_sub_release | ||
| 240 | static inline int | ||
| 241 | atomic_fetch_sub_release(int i, atomic_t *v) | ||
| 242 | { | ||
| 243 | __atomic_release_fence(); | ||
| 244 | return atomic_fetch_sub_relaxed(i, v); | ||
| 245 | } | ||
| 246 | #define atomic_fetch_sub_release atomic_fetch_sub_release | ||
| 247 | #endif | ||
| 248 | |||
| 249 | #ifndef atomic_fetch_sub | ||
| 250 | static inline int | ||
| 251 | atomic_fetch_sub(int i, atomic_t *v) | ||
| 252 | { | ||
| 253 | int ret; | ||
| 254 | __atomic_pre_full_fence(); | ||
| 255 | ret = atomic_fetch_sub_relaxed(i, v); | ||
| 256 | __atomic_post_full_fence(); | ||
| 257 | return ret; | ||
| 258 | } | ||
| 259 | #define atomic_fetch_sub atomic_fetch_sub | ||
| 260 | #endif | ||
| 261 | |||
| 262 | #endif /* atomic_fetch_sub_relaxed */ | ||
| 263 | |||
| 264 | #ifndef atomic_inc | ||
| 265 | static inline void | ||
| 266 | atomic_inc(atomic_t *v) | ||
| 267 | { | ||
| 268 | atomic_add(1, v); | ||
| 269 | } | ||
| 270 | #define atomic_inc atomic_inc | ||
| 271 | #endif | ||
| 272 | |||
| 273 | #ifndef atomic_inc_return_relaxed | ||
| 274 | #ifdef atomic_inc_return | ||
| 275 | #define atomic_inc_return_acquire atomic_inc_return | ||
| 276 | #define atomic_inc_return_release atomic_inc_return | ||
| 277 | #define atomic_inc_return_relaxed atomic_inc_return | ||
| 278 | #endif /* atomic_inc_return */ | ||
| 279 | |||
| 280 | #ifndef atomic_inc_return | ||
| 281 | static inline int | ||
| 282 | atomic_inc_return(atomic_t *v) | ||
| 283 | { | ||
| 284 | return atomic_add_return(1, v); | ||
| 285 | } | ||
| 286 | #define atomic_inc_return atomic_inc_return | ||
| 287 | #endif | ||
| 288 | |||
| 289 | #ifndef atomic_inc_return_acquire | ||
| 290 | static inline int | ||
| 291 | atomic_inc_return_acquire(atomic_t *v) | ||
| 292 | { | ||
| 293 | return atomic_add_return_acquire(1, v); | ||
| 294 | } | ||
| 295 | #define atomic_inc_return_acquire atomic_inc_return_acquire | ||
| 296 | #endif | ||
| 297 | |||
| 298 | #ifndef atomic_inc_return_release | ||
| 299 | static inline int | ||
| 300 | atomic_inc_return_release(atomic_t *v) | ||
| 301 | { | ||
| 302 | return atomic_add_return_release(1, v); | ||
| 303 | } | ||
| 304 | #define atomic_inc_return_release atomic_inc_return_release | ||
| 305 | #endif | ||
| 306 | |||
| 307 | #ifndef atomic_inc_return_relaxed | ||
| 308 | static inline int | ||
| 309 | atomic_inc_return_relaxed(atomic_t *v) | ||
| 310 | { | ||
| 311 | return atomic_add_return_relaxed(1, v); | ||
| 312 | } | ||
| 313 | #define atomic_inc_return_relaxed atomic_inc_return_relaxed | ||
| 314 | #endif | ||
| 315 | |||
| 316 | #else /* atomic_inc_return_relaxed */ | ||
| 317 | |||
| 318 | #ifndef atomic_inc_return_acquire | ||
| 319 | static inline int | ||
| 320 | atomic_inc_return_acquire(atomic_t *v) | ||
| 321 | { | ||
| 322 | int ret = atomic_inc_return_relaxed(v); | ||
| 323 | __atomic_acquire_fence(); | ||
| 324 | return ret; | ||
| 325 | } | ||
| 326 | #define atomic_inc_return_acquire atomic_inc_return_acquire | ||
| 327 | #endif | ||
| 328 | |||
| 329 | #ifndef atomic_inc_return_release | ||
| 330 | static inline int | ||
| 331 | atomic_inc_return_release(atomic_t *v) | ||
| 332 | { | ||
| 333 | __atomic_release_fence(); | ||
| 334 | return atomic_inc_return_relaxed(v); | ||
| 335 | } | ||
| 336 | #define atomic_inc_return_release atomic_inc_return_release | ||
| 337 | #endif | ||
| 338 | |||
| 339 | #ifndef atomic_inc_return | ||
| 340 | static inline int | ||
| 341 | atomic_inc_return(atomic_t *v) | ||
| 342 | { | ||
| 343 | int ret; | ||
| 344 | __atomic_pre_full_fence(); | ||
| 345 | ret = atomic_inc_return_relaxed(v); | ||
| 346 | __atomic_post_full_fence(); | ||
| 347 | return ret; | ||
| 348 | } | ||
| 349 | #define atomic_inc_return atomic_inc_return | ||
| 350 | #endif | ||
| 351 | |||
| 352 | #endif /* atomic_inc_return_relaxed */ | ||
| 353 | |||
| 354 | #ifndef atomic_fetch_inc_relaxed | ||
| 355 | #ifdef atomic_fetch_inc | ||
| 356 | #define atomic_fetch_inc_acquire atomic_fetch_inc | ||
| 357 | #define atomic_fetch_inc_release atomic_fetch_inc | ||
| 358 | #define atomic_fetch_inc_relaxed atomic_fetch_inc | ||
| 359 | #endif /* atomic_fetch_inc */ | ||
| 360 | |||
| 361 | #ifndef atomic_fetch_inc | ||
| 362 | static inline int | ||
| 363 | atomic_fetch_inc(atomic_t *v) | ||
| 364 | { | ||
| 365 | return atomic_fetch_add(1, v); | ||
| 366 | } | ||
| 367 | #define atomic_fetch_inc atomic_fetch_inc | ||
| 368 | #endif | ||
| 369 | |||
| 370 | #ifndef atomic_fetch_inc_acquire | ||
| 371 | static inline int | ||
| 372 | atomic_fetch_inc_acquire(atomic_t *v) | ||
| 373 | { | ||
| 374 | return atomic_fetch_add_acquire(1, v); | ||
| 375 | } | ||
| 376 | #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire | ||
| 377 | #endif | ||
| 378 | |||
| 379 | #ifndef atomic_fetch_inc_release | ||
| 380 | static inline int | ||
| 381 | atomic_fetch_inc_release(atomic_t *v) | ||
| 382 | { | ||
| 383 | return atomic_fetch_add_release(1, v); | ||
| 384 | } | ||
| 385 | #define atomic_fetch_inc_release atomic_fetch_inc_release | ||
| 386 | #endif | ||
| 387 | |||
| 388 | #ifndef atomic_fetch_inc_relaxed | ||
| 389 | static inline int | ||
| 390 | atomic_fetch_inc_relaxed(atomic_t *v) | ||
| 391 | { | ||
| 392 | return atomic_fetch_add_relaxed(1, v); | ||
| 393 | } | ||
| 394 | #define atomic_fetch_inc_relaxed atomic_fetch_inc_relaxed | ||
| 395 | #endif | ||
| 396 | |||
| 397 | #else /* atomic_fetch_inc_relaxed */ | ||
| 398 | |||
| 399 | #ifndef atomic_fetch_inc_acquire | ||
| 400 | static inline int | ||
| 401 | atomic_fetch_inc_acquire(atomic_t *v) | ||
| 402 | { | ||
| 403 | int ret = atomic_fetch_inc_relaxed(v); | ||
| 404 | __atomic_acquire_fence(); | ||
| 405 | return ret; | ||
| 406 | } | ||
| 407 | #define atomic_fetch_inc_acquire atomic_fetch_inc_acquire | ||
| 408 | #endif | ||
| 409 | |||
| 410 | #ifndef atomic_fetch_inc_release | ||
| 411 | static inline int | ||
| 412 | atomic_fetch_inc_release(atomic_t *v) | ||
| 413 | { | ||
| 414 | __atomic_release_fence(); | ||
| 415 | return atomic_fetch_inc_relaxed(v); | ||
| 416 | } | ||
| 417 | #define atomic_fetch_inc_release atomic_fetch_inc_release | ||
| 418 | #endif | ||
| 419 | |||
| 420 | #ifndef atomic_fetch_inc | ||
| 421 | static inline int | ||
| 422 | atomic_fetch_inc(atomic_t *v) | ||
| 423 | { | ||
| 424 | int ret; | ||
| 425 | __atomic_pre_full_fence(); | ||
| 426 | ret = atomic_fetch_inc_relaxed(v); | ||
| 427 | __atomic_post_full_fence(); | ||
| 428 | return ret; | ||
| 429 | } | ||
| 430 | #define atomic_fetch_inc atomic_fetch_inc | ||
| 431 | #endif | ||
| 432 | |||
| 433 | #endif /* atomic_fetch_inc_relaxed */ | ||
| 434 | |||
| 435 | #ifndef atomic_dec | ||
| 436 | static inline void | ||
| 437 | atomic_dec(atomic_t *v) | ||
| 438 | { | ||
| 439 | atomic_sub(1, v); | ||
| 440 | } | ||
| 441 | #define atomic_dec atomic_dec | ||
| 442 | #endif | ||
| 443 | |||
| 444 | #ifndef atomic_dec_return_relaxed | ||
| 445 | #ifdef atomic_dec_return | ||
| 446 | #define atomic_dec_return_acquire atomic_dec_return | ||
| 447 | #define atomic_dec_return_release atomic_dec_return | ||
| 448 | #define atomic_dec_return_relaxed atomic_dec_return | ||
| 449 | #endif /* atomic_dec_return */ | ||
| 450 | |||
| 451 | #ifndef atomic_dec_return | ||
| 452 | static inline int | ||
| 453 | atomic_dec_return(atomic_t *v) | ||
| 454 | { | ||
| 455 | return atomic_sub_return(1, v); | ||
| 456 | } | ||
| 457 | #define atomic_dec_return atomic_dec_return | ||
| 458 | #endif | ||
| 459 | |||
| 460 | #ifndef atomic_dec_return_acquire | ||
| 461 | static inline int | ||
| 462 | atomic_dec_return_acquire(atomic_t *v) | ||
| 463 | { | ||
| 464 | return atomic_sub_return_acquire(1, v); | ||
| 465 | } | ||
| 466 | #define atomic_dec_return_acquire atomic_dec_return_acquire | ||
| 467 | #endif | ||
| 468 | |||
| 469 | #ifndef atomic_dec_return_release | ||
| 470 | static inline int | ||
| 471 | atomic_dec_return_release(atomic_t *v) | ||
| 472 | { | ||
| 473 | return atomic_sub_return_release(1, v); | ||
| 474 | } | ||
| 475 | #define atomic_dec_return_release atomic_dec_return_release | ||
| 476 | #endif | ||
| 477 | |||
| 478 | #ifndef atomic_dec_return_relaxed | ||
| 479 | static inline int | ||
| 480 | atomic_dec_return_relaxed(atomic_t *v) | ||
| 481 | { | ||
| 482 | return atomic_sub_return_relaxed(1, v); | ||
| 483 | } | ||
| 484 | #define atomic_dec_return_relaxed atomic_dec_return_relaxed | ||
| 485 | #endif | ||
| 486 | |||
| 487 | #else /* atomic_dec_return_relaxed */ | ||
| 488 | |||
| 489 | #ifndef atomic_dec_return_acquire | ||
| 490 | static inline int | ||
| 491 | atomic_dec_return_acquire(atomic_t *v) | ||
| 492 | { | ||
| 493 | int ret = atomic_dec_return_relaxed(v); | ||
| 494 | __atomic_acquire_fence(); | ||
| 495 | return ret; | ||
| 496 | } | ||
| 497 | #define atomic_dec_return_acquire atomic_dec_return_acquire | ||
| 498 | #endif | ||
| 499 | |||
| 500 | #ifndef atomic_dec_return_release | ||
| 501 | static inline int | ||
| 502 | atomic_dec_return_release(atomic_t *v) | ||
| 503 | { | ||
| 504 | __atomic_release_fence(); | ||
| 505 | return atomic_dec_return_relaxed(v); | ||
| 506 | } | ||
| 507 | #define atomic_dec_return_release atomic_dec_return_release | ||
| 508 | #endif | ||
| 509 | |||
| 510 | #ifndef atomic_dec_return | ||
| 511 | static inline int | ||
| 512 | atomic_dec_return(atomic_t *v) | ||
| 513 | { | ||
| 514 | int ret; | ||
| 515 | __atomic_pre_full_fence(); | ||
| 516 | ret = atomic_dec_return_relaxed(v); | ||
| 517 | __atomic_post_full_fence(); | ||
| 518 | return ret; | ||
| 519 | } | ||
| 520 | #define atomic_dec_return atomic_dec_return | ||
| 521 | #endif | ||
| 522 | |||
| 523 | #endif /* atomic_dec_return_relaxed */ | ||
| 524 | |||
| 525 | #ifndef atomic_fetch_dec_relaxed | ||
| 526 | #ifdef atomic_fetch_dec | ||
| 527 | #define atomic_fetch_dec_acquire atomic_fetch_dec | ||
| 528 | #define atomic_fetch_dec_release atomic_fetch_dec | ||
| 529 | #define atomic_fetch_dec_relaxed atomic_fetch_dec | ||
| 530 | #endif /* atomic_fetch_dec */ | ||
| 531 | |||
| 532 | #ifndef atomic_fetch_dec | ||
| 533 | static inline int | ||
| 534 | atomic_fetch_dec(atomic_t *v) | ||
| 535 | { | ||
| 536 | return atomic_fetch_sub(1, v); | ||
| 537 | } | ||
| 538 | #define atomic_fetch_dec atomic_fetch_dec | ||
| 539 | #endif | ||
| 540 | |||
| 541 | #ifndef atomic_fetch_dec_acquire | ||
| 542 | static inline int | ||
| 543 | atomic_fetch_dec_acquire(atomic_t *v) | ||
| 544 | { | ||
| 545 | return atomic_fetch_sub_acquire(1, v); | ||
| 546 | } | ||
| 547 | #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire | ||
| 548 | #endif | ||
| 549 | |||
| 550 | #ifndef atomic_fetch_dec_release | ||
| 551 | static inline int | ||
| 552 | atomic_fetch_dec_release(atomic_t *v) | ||
| 553 | { | ||
| 554 | return atomic_fetch_sub_release(1, v); | ||
| 555 | } | ||
| 556 | #define atomic_fetch_dec_release atomic_fetch_dec_release | ||
| 557 | #endif | ||
| 558 | |||
| 559 | #ifndef atomic_fetch_dec_relaxed | ||
| 560 | static inline int | ||
| 561 | atomic_fetch_dec_relaxed(atomic_t *v) | ||
| 562 | { | ||
| 563 | return atomic_fetch_sub_relaxed(1, v); | ||
| 564 | } | ||
| 565 | #define atomic_fetch_dec_relaxed atomic_fetch_dec_relaxed | ||
| 566 | #endif | ||
| 567 | |||
| 568 | #else /* atomic_fetch_dec_relaxed */ | ||
| 569 | |||
| 570 | #ifndef atomic_fetch_dec_acquire | ||
| 571 | static inline int | ||
| 572 | atomic_fetch_dec_acquire(atomic_t *v) | ||
| 573 | { | ||
| 574 | int ret = atomic_fetch_dec_relaxed(v); | ||
| 575 | __atomic_acquire_fence(); | ||
| 576 | return ret; | ||
| 577 | } | ||
| 578 | #define atomic_fetch_dec_acquire atomic_fetch_dec_acquire | ||
| 579 | #endif | ||
| 580 | |||
| 581 | #ifndef atomic_fetch_dec_release | ||
| 582 | static inline int | ||
| 583 | atomic_fetch_dec_release(atomic_t *v) | ||
| 584 | { | ||
| 585 | __atomic_release_fence(); | ||
| 586 | return atomic_fetch_dec_relaxed(v); | ||
| 587 | } | ||
| 588 | #define atomic_fetch_dec_release atomic_fetch_dec_release | ||
| 589 | #endif | ||
| 590 | |||
| 591 | #ifndef atomic_fetch_dec | ||
| 592 | static inline int | ||
| 593 | atomic_fetch_dec(atomic_t *v) | ||
| 594 | { | ||
| 595 | int ret; | ||
| 596 | __atomic_pre_full_fence(); | ||
| 597 | ret = atomic_fetch_dec_relaxed(v); | ||
| 598 | __atomic_post_full_fence(); | ||
| 599 | return ret; | ||
| 600 | } | ||
| 601 | #define atomic_fetch_dec atomic_fetch_dec | ||
| 602 | #endif | ||
| 603 | |||
| 604 | #endif /* atomic_fetch_dec_relaxed */ | ||
| 605 | |||
| 606 | #ifndef atomic_fetch_and_relaxed | ||
| 607 | #define atomic_fetch_and_acquire atomic_fetch_and | ||
| 608 | #define atomic_fetch_and_release atomic_fetch_and | ||
| 609 | #define atomic_fetch_and_relaxed atomic_fetch_and | ||
| 610 | #else /* atomic_fetch_and_relaxed */ | ||
| 611 | |||
| 612 | #ifndef atomic_fetch_and_acquire | ||
| 613 | static inline int | ||
| 614 | atomic_fetch_and_acquire(int i, atomic_t *v) | ||
| 615 | { | ||
| 616 | int ret = atomic_fetch_and_relaxed(i, v); | ||
| 617 | __atomic_acquire_fence(); | ||
| 618 | return ret; | ||
| 619 | } | ||
| 620 | #define atomic_fetch_and_acquire atomic_fetch_and_acquire | ||
| 621 | #endif | ||
| 622 | |||
| 623 | #ifndef atomic_fetch_and_release | ||
| 624 | static inline int | ||
| 625 | atomic_fetch_and_release(int i, atomic_t *v) | ||
| 626 | { | ||
| 627 | __atomic_release_fence(); | ||
| 628 | return atomic_fetch_and_relaxed(i, v); | ||
| 629 | } | ||
| 630 | #define atomic_fetch_and_release atomic_fetch_and_release | ||
| 631 | #endif | ||
| 632 | |||
| 633 | #ifndef atomic_fetch_and | ||
| 634 | static inline int | ||
| 635 | atomic_fetch_and(int i, atomic_t *v) | ||
| 636 | { | ||
| 637 | int ret; | ||
| 638 | __atomic_pre_full_fence(); | ||
| 639 | ret = atomic_fetch_and_relaxed(i, v); | ||
| 640 | __atomic_post_full_fence(); | ||
| 641 | return ret; | ||
| 642 | } | ||
| 643 | #define atomic_fetch_and atomic_fetch_and | ||
| 644 | #endif | ||
| 645 | |||
| 646 | #endif /* atomic_fetch_and_relaxed */ | ||
| 647 | |||
| 648 | #ifndef atomic_andnot | ||
| 649 | static inline void | ||
| 650 | atomic_andnot(int i, atomic_t *v) | ||
| 651 | { | ||
| 652 | atomic_and(~i, v); | ||
| 653 | } | ||
| 654 | #define atomic_andnot atomic_andnot | ||
| 655 | #endif | ||
| 656 | |||
| 657 | #ifndef atomic_fetch_andnot_relaxed | ||
| 658 | #ifdef atomic_fetch_andnot | ||
| 659 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | ||
| 660 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
| 661 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | ||
| 662 | #endif /* atomic_fetch_andnot */ | ||
| 663 | |||
| 664 | #ifndef atomic_fetch_andnot | ||
| 665 | static inline int | ||
| 666 | atomic_fetch_andnot(int i, atomic_t *v) | ||
| 667 | { | ||
| 668 | return atomic_fetch_and(~i, v); | ||
| 669 | } | ||
| 670 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
| 671 | #endif | ||
| 672 | |||
| 673 | #ifndef atomic_fetch_andnot_acquire | ||
| 674 | static inline int | ||
| 675 | atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
| 676 | { | ||
| 677 | return atomic_fetch_and_acquire(~i, v); | ||
| 678 | } | ||
| 679 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
| 680 | #endif | ||
| 681 | |||
| 682 | #ifndef atomic_fetch_andnot_release | ||
| 683 | static inline int | ||
| 684 | atomic_fetch_andnot_release(int i, atomic_t *v) | ||
| 685 | { | ||
| 686 | return atomic_fetch_and_release(~i, v); | ||
| 687 | } | ||
| 688 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
| 689 | #endif | ||
| 690 | |||
| 691 | #ifndef atomic_fetch_andnot_relaxed | ||
| 692 | static inline int | ||
| 693 | atomic_fetch_andnot_relaxed(int i, atomic_t *v) | ||
| 694 | { | ||
| 695 | return atomic_fetch_and_relaxed(~i, v); | ||
| 696 | } | ||
| 697 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed | ||
| 698 | #endif | ||
| 699 | |||
| 700 | #else /* atomic_fetch_andnot_relaxed */ | ||
| 701 | |||
| 702 | #ifndef atomic_fetch_andnot_acquire | ||
| 703 | static inline int | ||
| 704 | atomic_fetch_andnot_acquire(int i, atomic_t *v) | ||
| 705 | { | ||
| 706 | int ret = atomic_fetch_andnot_relaxed(i, v); | ||
| 707 | __atomic_acquire_fence(); | ||
| 708 | return ret; | ||
| 709 | } | ||
| 710 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot_acquire | ||
| 711 | #endif | ||
| 712 | |||
| 713 | #ifndef atomic_fetch_andnot_release | ||
| 714 | static inline int | ||
| 715 | atomic_fetch_andnot_release(int i, atomic_t *v) | ||
| 716 | { | ||
| 717 | __atomic_release_fence(); | ||
| 718 | return atomic_fetch_andnot_relaxed(i, v); | ||
| 719 | } | ||
| 720 | #define atomic_fetch_andnot_release atomic_fetch_andnot_release | ||
| 721 | #endif | ||
| 722 | |||
| 723 | #ifndef atomic_fetch_andnot | ||
| 724 | static inline int | ||
| 725 | atomic_fetch_andnot(int i, atomic_t *v) | ||
| 726 | { | ||
| 727 | int ret; | ||
| 728 | __atomic_pre_full_fence(); | ||
| 729 | ret = atomic_fetch_andnot_relaxed(i, v); | ||
| 730 | __atomic_post_full_fence(); | ||
| 731 | return ret; | ||
| 732 | } | ||
| 733 | #define atomic_fetch_andnot atomic_fetch_andnot | ||
| 734 | #endif | ||
| 735 | |||
| 736 | #endif /* atomic_fetch_andnot_relaxed */ | ||
| 737 | |||
| 738 | #ifndef atomic_fetch_or_relaxed | ||
| 739 | #define atomic_fetch_or_acquire atomic_fetch_or | ||
| 740 | #define atomic_fetch_or_release atomic_fetch_or | ||
| 741 | #define atomic_fetch_or_relaxed atomic_fetch_or | ||
| 742 | #else /* atomic_fetch_or_relaxed */ | ||
| 743 | |||
| 744 | #ifndef atomic_fetch_or_acquire | ||
| 745 | static inline int | ||
| 746 | atomic_fetch_or_acquire(int i, atomic_t *v) | ||
| 747 | { | ||
| 748 | int ret = atomic_fetch_or_relaxed(i, v); | ||
| 749 | __atomic_acquire_fence(); | ||
| 750 | return ret; | ||
| 751 | } | ||
| 752 | #define atomic_fetch_or_acquire atomic_fetch_or_acquire | ||
| 753 | #endif | ||
| 754 | |||
| 755 | #ifndef atomic_fetch_or_release | ||
| 756 | static inline int | ||
| 757 | atomic_fetch_or_release(int i, atomic_t *v) | ||
| 758 | { | ||
| 759 | __atomic_release_fence(); | ||
| 760 | return atomic_fetch_or_relaxed(i, v); | ||
| 761 | } | ||
| 762 | #define atomic_fetch_or_release atomic_fetch_or_release | ||
| 763 | #endif | ||
| 764 | |||
| 765 | #ifndef atomic_fetch_or | ||
| 766 | static inline int | ||
| 767 | atomic_fetch_or(int i, atomic_t *v) | ||
| 768 | { | ||
| 769 | int ret; | ||
| 770 | __atomic_pre_full_fence(); | ||
| 771 | ret = atomic_fetch_or_relaxed(i, v); | ||
| 772 | __atomic_post_full_fence(); | ||
| 773 | return ret; | ||
| 774 | } | ||
| 775 | #define atomic_fetch_or atomic_fetch_or | ||
| 776 | #endif | ||
| 777 | |||
| 778 | #endif /* atomic_fetch_or_relaxed */ | ||
| 779 | |||
| 780 | #ifndef atomic_fetch_xor_relaxed | ||
| 781 | #define atomic_fetch_xor_acquire atomic_fetch_xor | ||
| 782 | #define atomic_fetch_xor_release atomic_fetch_xor | ||
| 783 | #define atomic_fetch_xor_relaxed atomic_fetch_xor | ||
| 784 | #else /* atomic_fetch_xor_relaxed */ | ||
| 785 | |||
| 786 | #ifndef atomic_fetch_xor_acquire | ||
| 787 | static inline int | ||
| 788 | atomic_fetch_xor_acquire(int i, atomic_t *v) | ||
| 789 | { | ||
| 790 | int ret = atomic_fetch_xor_relaxed(i, v); | ||
| 791 | __atomic_acquire_fence(); | ||
| 792 | return ret; | ||
| 793 | } | ||
| 794 | #define atomic_fetch_xor_acquire atomic_fetch_xor_acquire | ||
| 795 | #endif | ||
| 796 | |||
| 797 | #ifndef atomic_fetch_xor_release | ||
| 798 | static inline int | ||
| 799 | atomic_fetch_xor_release(int i, atomic_t *v) | ||
| 800 | { | ||
| 801 | __atomic_release_fence(); | ||
| 802 | return atomic_fetch_xor_relaxed(i, v); | ||
| 803 | } | ||
| 804 | #define atomic_fetch_xor_release atomic_fetch_xor_release | ||
| 805 | #endif | ||
| 806 | |||
| 807 | #ifndef atomic_fetch_xor | ||
| 808 | static inline int | ||
| 809 | atomic_fetch_xor(int i, atomic_t *v) | ||
| 810 | { | ||
| 811 | int ret; | ||
| 812 | __atomic_pre_full_fence(); | ||
| 813 | ret = atomic_fetch_xor_relaxed(i, v); | ||
| 814 | __atomic_post_full_fence(); | ||
| 815 | return ret; | ||
| 816 | } | ||
| 817 | #define atomic_fetch_xor atomic_fetch_xor | ||
| 818 | #endif | ||
| 819 | |||
| 820 | #endif /* atomic_fetch_xor_relaxed */ | ||
| 821 | |||
| 822 | #ifndef atomic_xchg_relaxed | ||
| 823 | #define atomic_xchg_acquire atomic_xchg | ||
| 824 | #define atomic_xchg_release atomic_xchg | ||
| 825 | #define atomic_xchg_relaxed atomic_xchg | ||
| 826 | #else /* atomic_xchg_relaxed */ | ||
| 827 | |||
| 828 | #ifndef atomic_xchg_acquire | ||
| 829 | static inline int | ||
| 830 | atomic_xchg_acquire(atomic_t *v, int i) | ||
| 831 | { | ||
| 832 | int ret = atomic_xchg_relaxed(v, i); | ||
| 833 | __atomic_acquire_fence(); | ||
| 834 | return ret; | ||
| 835 | } | ||
| 836 | #define atomic_xchg_acquire atomic_xchg_acquire | ||
| 837 | #endif | ||
| 838 | |||
| 839 | #ifndef atomic_xchg_release | ||
| 840 | static inline int | ||
| 841 | atomic_xchg_release(atomic_t *v, int i) | ||
| 842 | { | ||
| 843 | __atomic_release_fence(); | ||
| 844 | return atomic_xchg_relaxed(v, i); | ||
| 845 | } | ||
| 846 | #define atomic_xchg_release atomic_xchg_release | ||
| 847 | #endif | ||
| 848 | |||
| 849 | #ifndef atomic_xchg | ||
| 850 | static inline int | ||
| 851 | atomic_xchg(atomic_t *v, int i) | ||
| 852 | { | ||
| 853 | int ret; | ||
| 854 | __atomic_pre_full_fence(); | ||
| 855 | ret = atomic_xchg_relaxed(v, i); | ||
| 856 | __atomic_post_full_fence(); | ||
| 857 | return ret; | ||
| 858 | } | ||
| 859 | #define atomic_xchg atomic_xchg | ||
| 860 | #endif | ||
| 861 | |||
| 862 | #endif /* atomic_xchg_relaxed */ | ||
| 863 | |||
| 864 | #ifndef atomic_cmpxchg_relaxed | ||
| 865 | #define atomic_cmpxchg_acquire atomic_cmpxchg | ||
| 866 | #define atomic_cmpxchg_release atomic_cmpxchg | ||
| 867 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | ||
| 868 | #else /* atomic_cmpxchg_relaxed */ | ||
| 869 | |||
| 870 | #ifndef atomic_cmpxchg_acquire | ||
| 871 | static inline int | ||
| 872 | atomic_cmpxchg_acquire(atomic_t *v, int old, int new) | ||
| 873 | { | ||
| 874 | int ret = atomic_cmpxchg_relaxed(v, old, new); | ||
| 875 | __atomic_acquire_fence(); | ||
| 876 | return ret; | ||
| 877 | } | ||
| 878 | #define atomic_cmpxchg_acquire atomic_cmpxchg_acquire | ||
| 879 | #endif | ||
| 880 | |||
| 881 | #ifndef atomic_cmpxchg_release | ||
| 882 | static inline int | ||
| 883 | atomic_cmpxchg_release(atomic_t *v, int old, int new) | ||
| 884 | { | ||
| 885 | __atomic_release_fence(); | ||
| 886 | return atomic_cmpxchg_relaxed(v, old, new); | ||
| 887 | } | ||
| 888 | #define atomic_cmpxchg_release atomic_cmpxchg_release | ||
| 889 | #endif | ||
| 890 | |||
| 891 | #ifndef atomic_cmpxchg | ||
| 892 | static inline int | ||
| 893 | atomic_cmpxchg(atomic_t *v, int old, int new) | ||
| 894 | { | ||
| 895 | int ret; | ||
| 896 | __atomic_pre_full_fence(); | ||
| 897 | ret = atomic_cmpxchg_relaxed(v, old, new); | ||
| 898 | __atomic_post_full_fence(); | ||
| 899 | return ret; | ||
| 900 | } | ||
| 901 | #define atomic_cmpxchg atomic_cmpxchg | ||
| 902 | #endif | ||
| 903 | |||
| 904 | #endif /* atomic_cmpxchg_relaxed */ | ||
| 905 | |||
| 906 | #ifndef atomic_try_cmpxchg_relaxed | ||
| 907 | #ifdef atomic_try_cmpxchg | ||
| 908 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg | ||
| 909 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg | ||
| 910 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg | ||
| 911 | #endif /* atomic_try_cmpxchg */ | ||
| 912 | |||
| 913 | #ifndef atomic_try_cmpxchg | ||
| 914 | static inline bool | ||
| 915 | atomic_try_cmpxchg(atomic_t *v, int *old, int new) | ||
| 916 | { | ||
| 917 | int r, o = *old; | ||
| 918 | r = atomic_cmpxchg(v, o, new); | ||
| 919 | if (unlikely(r != o)) | ||
| 920 | *old = r; | ||
| 921 | return likely(r == o); | ||
| 922 | } | ||
| 923 | #define atomic_try_cmpxchg atomic_try_cmpxchg | ||
| 924 | #endif | ||
| 925 | |||
| 926 | #ifndef atomic_try_cmpxchg_acquire | ||
| 927 | static inline bool | ||
| 928 | atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) | ||
| 929 | { | ||
| 930 | int r, o = *old; | ||
| 931 | r = atomic_cmpxchg_acquire(v, o, new); | ||
| 932 | if (unlikely(r != o)) | ||
| 933 | *old = r; | ||
| 934 | return likely(r == o); | ||
| 935 | } | ||
| 936 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire | ||
| 937 | #endif | ||
| 938 | |||
| 939 | #ifndef atomic_try_cmpxchg_release | ||
| 940 | static inline bool | ||
| 941 | atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) | ||
| 942 | { | ||
| 943 | int r, o = *old; | ||
| 944 | r = atomic_cmpxchg_release(v, o, new); | ||
| 945 | if (unlikely(r != o)) | ||
| 946 | *old = r; | ||
| 947 | return likely(r == o); | ||
| 948 | } | ||
| 949 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release | ||
| 950 | #endif | ||
| 951 | |||
| 952 | #ifndef atomic_try_cmpxchg_relaxed | ||
| 953 | static inline bool | ||
| 954 | atomic_try_cmpxchg_relaxed(atomic_t *v, int *old, int new) | ||
| 955 | { | ||
| 956 | int r, o = *old; | ||
| 957 | r = atomic_cmpxchg_relaxed(v, o, new); | ||
| 958 | if (unlikely(r != o)) | ||
| 959 | *old = r; | ||
| 960 | return likely(r == o); | ||
| 961 | } | ||
| 962 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg_relaxed | ||
| 963 | #endif | ||
| 964 | |||
| 965 | #else /* atomic_try_cmpxchg_relaxed */ | ||
| 966 | |||
| 967 | #ifndef atomic_try_cmpxchg_acquire | ||
| 968 | static inline bool | ||
| 969 | atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new) | ||
| 970 | { | ||
| 971 | bool ret = atomic_try_cmpxchg_relaxed(v, old, new); | ||
| 972 | __atomic_acquire_fence(); | ||
| 973 | return ret; | ||
| 974 | } | ||
| 975 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg_acquire | ||
| 976 | #endif | ||
| 977 | |||
| 978 | #ifndef atomic_try_cmpxchg_release | ||
| 979 | static inline bool | ||
| 980 | atomic_try_cmpxchg_release(atomic_t *v, int *old, int new) | ||
| 981 | { | ||
| 982 | __atomic_release_fence(); | ||
| 983 | return atomic_try_cmpxchg_relaxed(v, old, new); | ||
| 984 | } | ||
| 985 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg_release | ||
| 986 | #endif | ||
| 987 | |||
| 988 | #ifndef atomic_try_cmpxchg | ||
| 989 | static inline bool | ||
| 990 | atomic_try_cmpxchg(atomic_t *v, int *old, int new) | ||
| 991 | { | ||
| 992 | bool ret; | ||
| 993 | __atomic_pre_full_fence(); | ||
| 994 | ret = atomic_try_cmpxchg_relaxed(v, old, new); | ||
| 995 | __atomic_post_full_fence(); | ||
| 996 | return ret; | ||
| 997 | } | ||
| 998 | #define atomic_try_cmpxchg atomic_try_cmpxchg | ||
| 999 | #endif | ||
| 1000 | |||
| 1001 | #endif /* atomic_try_cmpxchg_relaxed */ | ||
| 1002 | |||
| 1003 | #ifndef atomic_sub_and_test | ||
| 1004 | /** | ||
| 1005 | * atomic_sub_and_test - subtract value from variable and test result | ||
| 1006 | * @i: integer value to subtract | ||
| 1007 | * @v: pointer of type atomic_t | ||
| 1008 | * | ||
| 1009 | * Atomically subtracts @i from @v and returns | ||
| 1010 | * true if the result is zero, or false for all | ||
| 1011 | * other cases. | ||
| 1012 | */ | ||
| 1013 | static inline bool | ||
| 1014 | atomic_sub_and_test(int i, atomic_t *v) | ||
| 1015 | { | ||
| 1016 | return atomic_sub_return(i, v) == 0; | ||
| 1017 | } | ||
| 1018 | #define atomic_sub_and_test atomic_sub_and_test | ||
| 1019 | #endif | ||
| 1020 | |||
| 1021 | #ifndef atomic_dec_and_test | ||
| 1022 | /** | ||
| 1023 | * atomic_dec_and_test - decrement and test | ||
| 1024 | * @v: pointer of type atomic_t | ||
| 1025 | * | ||
| 1026 | * Atomically decrements @v by 1 and | ||
| 1027 | * returns true if the result is 0, or false for all other | ||
| 1028 | * cases. | ||
| 1029 | */ | ||
| 1030 | static inline bool | ||
| 1031 | atomic_dec_and_test(atomic_t *v) | ||
| 1032 | { | ||
| 1033 | return atomic_dec_return(v) == 0; | ||
| 1034 | } | ||
| 1035 | #define atomic_dec_and_test atomic_dec_and_test | ||
| 1036 | #endif | ||
| 1037 | |||
| 1038 | #ifndef atomic_inc_and_test | ||
| 1039 | /** | ||
| 1040 | * atomic_inc_and_test - increment and test | ||
| 1041 | * @v: pointer of type atomic_t | ||
| 1042 | * | ||
| 1043 | * Atomically increments @v by 1 | ||
| 1044 | * and returns true if the result is zero, or false for all | ||
| 1045 | * other cases. | ||
| 1046 | */ | ||
| 1047 | static inline bool | ||
| 1048 | atomic_inc_and_test(atomic_t *v) | ||
| 1049 | { | ||
| 1050 | return atomic_inc_return(v) == 0; | ||
| 1051 | } | ||
| 1052 | #define atomic_inc_and_test atomic_inc_and_test | ||
| 1053 | #endif | ||
| 1054 | |||
| 1055 | #ifndef atomic_add_negative | ||
| 1056 | /** | ||
| 1057 | * atomic_add_negative - add and test if negative | ||
| 1058 | * @i: integer value to add | ||
| 1059 | * @v: pointer of type atomic_t | ||
| 1060 | * | ||
| 1061 | * Atomically adds @i to @v and returns true | ||
| 1062 | * if the result is negative, or false when | ||
| 1063 | * result is greater than or equal to zero. | ||
| 1064 | */ | ||
| 1065 | static inline bool | ||
| 1066 | atomic_add_negative(int i, atomic_t *v) | ||
| 1067 | { | ||
| 1068 | return atomic_add_return(i, v) < 0; | ||
| 1069 | } | ||
| 1070 | #define atomic_add_negative atomic_add_negative | ||
| 1071 | #endif | ||
| 1072 | |||
| 1073 | #ifndef atomic_fetch_add_unless | ||
| 1074 | /** | ||
| 1075 | * atomic_fetch_add_unless - add unless the number is already a given value | ||
| 1076 | * @v: pointer of type atomic_t | ||
| 1077 | * @a: the amount to add to v... | ||
| 1078 | * @u: ...unless v is equal to u. | ||
| 1079 | * | ||
| 1080 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
| 1081 | * Returns original value of @v | ||
| 1082 | */ | ||
| 1083 | static inline int | ||
| 1084 | atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
| 1085 | { | ||
| 1086 | int c = atomic_read(v); | ||
| 1087 | |||
| 1088 | do { | ||
| 1089 | if (unlikely(c == u)) | ||
| 1090 | break; | ||
| 1091 | } while (!atomic_try_cmpxchg(v, &c, c + a)); | ||
| 1092 | |||
| 1093 | return c; | ||
| 1094 | } | ||
| 1095 | #define atomic_fetch_add_unless atomic_fetch_add_unless | ||
| 1096 | #endif | ||
| 1097 | |||
| 1098 | #ifndef atomic_add_unless | ||
| 1099 | /** | ||
| 1100 | * atomic_add_unless - add unless the number is already a given value | ||
| 1101 | * @v: pointer of type atomic_t | ||
| 1102 | * @a: the amount to add to v... | ||
| 1103 | * @u: ...unless v is equal to u. | ||
| 1104 | * | ||
| 1105 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 1106 | * Returns true if the addition was done. | ||
| 1107 | */ | ||
| 1108 | static inline bool | ||
| 1109 | atomic_add_unless(atomic_t *v, int a, int u) | ||
| 1110 | { | ||
| 1111 | return atomic_fetch_add_unless(v, a, u) != u; | ||
| 1112 | } | ||
| 1113 | #define atomic_add_unless atomic_add_unless | ||
| 1114 | #endif | ||
| 1115 | |||
| 1116 | #ifndef atomic_inc_not_zero | ||
| 1117 | /** | ||
| 1118 | * atomic_inc_not_zero - increment unless the number is zero | ||
| 1119 | * @v: pointer of type atomic_t | ||
| 1120 | * | ||
| 1121 | * Atomically increments @v by 1, if @v is non-zero. | ||
| 1122 | * Returns true if the increment was done. | ||
| 1123 | */ | ||
| 1124 | static inline bool | ||
| 1125 | atomic_inc_not_zero(atomic_t *v) | ||
| 1126 | { | ||
| 1127 | return atomic_add_unless(v, 1, 0); | ||
| 1128 | } | ||
| 1129 | #define atomic_inc_not_zero atomic_inc_not_zero | ||
| 1130 | #endif | ||
| 1131 | |||
| 1132 | #ifndef atomic_inc_unless_negative | ||
| 1133 | static inline bool | ||
| 1134 | atomic_inc_unless_negative(atomic_t *v) | ||
| 1135 | { | ||
| 1136 | int c = atomic_read(v); | ||
| 1137 | |||
| 1138 | do { | ||
| 1139 | if (unlikely(c < 0)) | ||
| 1140 | return false; | ||
| 1141 | } while (!atomic_try_cmpxchg(v, &c, c + 1)); | ||
| 1142 | |||
| 1143 | return true; | ||
| 1144 | } | ||
| 1145 | #define atomic_inc_unless_negative atomic_inc_unless_negative | ||
| 1146 | #endif | ||
| 1147 | |||
| 1148 | #ifndef atomic_dec_unless_positive | ||
| 1149 | static inline bool | ||
| 1150 | atomic_dec_unless_positive(atomic_t *v) | ||
| 1151 | { | ||
| 1152 | int c = atomic_read(v); | ||
| 1153 | |||
| 1154 | do { | ||
| 1155 | if (unlikely(c > 0)) | ||
| 1156 | return false; | ||
| 1157 | } while (!atomic_try_cmpxchg(v, &c, c - 1)); | ||
| 1158 | |||
| 1159 | return true; | ||
| 1160 | } | ||
| 1161 | #define atomic_dec_unless_positive atomic_dec_unless_positive | ||
| 1162 | #endif | ||
| 1163 | |||
| 1164 | #ifndef atomic_dec_if_positive | ||
| 1165 | static inline int | ||
| 1166 | atomic_dec_if_positive(atomic_t *v) | ||
| 1167 | { | ||
| 1168 | int dec, c = atomic_read(v); | ||
| 1169 | |||
| 1170 | do { | ||
| 1171 | dec = c - 1; | ||
| 1172 | if (unlikely(dec < 0)) | ||
| 1173 | break; | ||
| 1174 | } while (!atomic_try_cmpxchg(v, &c, dec)); | ||
| 1175 | |||
| 1176 | return dec; | ||
| 1177 | } | ||
| 1178 | #define atomic_dec_if_positive atomic_dec_if_positive | ||
| 1179 | #endif | ||
| 1180 | |||
| 1181 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
| 1182 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
| 1183 | |||
| 1184 | #ifdef CONFIG_GENERIC_ATOMIC64 | ||
| 1185 | #include <asm-generic/atomic64.h> | ||
| 1186 | #endif | ||
| 1187 | |||
| 1188 | #ifndef atomic64_read_acquire | ||
| 1189 | static inline s64 | ||
| 1190 | atomic64_read_acquire(const atomic64_t *v) | ||
| 1191 | { | ||
| 1192 | return smp_load_acquire(&(v)->counter); | ||
| 1193 | } | ||
| 1194 | #define atomic64_read_acquire atomic64_read_acquire | ||
| 1195 | #endif | ||
| 1196 | |||
| 1197 | #ifndef atomic64_set_release | ||
| 1198 | static inline void | ||
| 1199 | atomic64_set_release(atomic64_t *v, s64 i) | ||
| 1200 | { | ||
| 1201 | smp_store_release(&(v)->counter, i); | ||
| 1202 | } | ||
| 1203 | #define atomic64_set_release atomic64_set_release | ||
| 1204 | #endif | ||
| 1205 | |||
| 1206 | #ifndef atomic64_add_return_relaxed | ||
| 1207 | #define atomic64_add_return_acquire atomic64_add_return | ||
| 1208 | #define atomic64_add_return_release atomic64_add_return | ||
| 1209 | #define atomic64_add_return_relaxed atomic64_add_return | ||
| 1210 | #else /* atomic64_add_return_relaxed */ | ||
| 1211 | |||
| 1212 | #ifndef atomic64_add_return_acquire | ||
| 1213 | static inline s64 | ||
| 1214 | atomic64_add_return_acquire(s64 i, atomic64_t *v) | ||
| 1215 | { | ||
| 1216 | s64 ret = atomic64_add_return_relaxed(i, v); | ||
| 1217 | __atomic_acquire_fence(); | ||
| 1218 | return ret; | ||
| 1219 | } | ||
| 1220 | #define atomic64_add_return_acquire atomic64_add_return_acquire | ||
| 1221 | #endif | ||
| 1222 | |||
| 1223 | #ifndef atomic64_add_return_release | ||
| 1224 | static inline s64 | ||
| 1225 | atomic64_add_return_release(s64 i, atomic64_t *v) | ||
| 1226 | { | ||
| 1227 | __atomic_release_fence(); | ||
| 1228 | return atomic64_add_return_relaxed(i, v); | ||
| 1229 | } | ||
| 1230 | #define atomic64_add_return_release atomic64_add_return_release | ||
| 1231 | #endif | ||
| 1232 | |||
| 1233 | #ifndef atomic64_add_return | ||
| 1234 | static inline s64 | ||
| 1235 | atomic64_add_return(s64 i, atomic64_t *v) | ||
| 1236 | { | ||
| 1237 | s64 ret; | ||
| 1238 | __atomic_pre_full_fence(); | ||
| 1239 | ret = atomic64_add_return_relaxed(i, v); | ||
| 1240 | __atomic_post_full_fence(); | ||
| 1241 | return ret; | ||
| 1242 | } | ||
| 1243 | #define atomic64_add_return atomic64_add_return | ||
| 1244 | #endif | ||
| 1245 | |||
| 1246 | #endif /* atomic64_add_return_relaxed */ | ||
| 1247 | |||
| 1248 | #ifndef atomic64_fetch_add_relaxed | ||
| 1249 | #define atomic64_fetch_add_acquire atomic64_fetch_add | ||
| 1250 | #define atomic64_fetch_add_release atomic64_fetch_add | ||
| 1251 | #define atomic64_fetch_add_relaxed atomic64_fetch_add | ||
| 1252 | #else /* atomic64_fetch_add_relaxed */ | ||
| 1253 | |||
| 1254 | #ifndef atomic64_fetch_add_acquire | ||
| 1255 | static inline s64 | ||
| 1256 | atomic64_fetch_add_acquire(s64 i, atomic64_t *v) | ||
| 1257 | { | ||
| 1258 | s64 ret = atomic64_fetch_add_relaxed(i, v); | ||
| 1259 | __atomic_acquire_fence(); | ||
| 1260 | return ret; | ||
| 1261 | } | ||
| 1262 | #define atomic64_fetch_add_acquire atomic64_fetch_add_acquire | ||
| 1263 | #endif | ||
| 1264 | |||
| 1265 | #ifndef atomic64_fetch_add_release | ||
| 1266 | static inline s64 | ||
| 1267 | atomic64_fetch_add_release(s64 i, atomic64_t *v) | ||
| 1268 | { | ||
| 1269 | __atomic_release_fence(); | ||
| 1270 | return atomic64_fetch_add_relaxed(i, v); | ||
| 1271 | } | ||
| 1272 | #define atomic64_fetch_add_release atomic64_fetch_add_release | ||
| 1273 | #endif | ||
| 1274 | |||
| 1275 | #ifndef atomic64_fetch_add | ||
| 1276 | static inline s64 | ||
| 1277 | atomic64_fetch_add(s64 i, atomic64_t *v) | ||
| 1278 | { | ||
| 1279 | s64 ret; | ||
| 1280 | __atomic_pre_full_fence(); | ||
| 1281 | ret = atomic64_fetch_add_relaxed(i, v); | ||
| 1282 | __atomic_post_full_fence(); | ||
| 1283 | return ret; | ||
| 1284 | } | ||
| 1285 | #define atomic64_fetch_add atomic64_fetch_add | ||
| 1286 | #endif | ||
| 1287 | |||
| 1288 | #endif /* atomic64_fetch_add_relaxed */ | ||
| 1289 | |||
| 1290 | #ifndef atomic64_sub_return_relaxed | ||
| 1291 | #define atomic64_sub_return_acquire atomic64_sub_return | ||
| 1292 | #define atomic64_sub_return_release atomic64_sub_return | ||
| 1293 | #define atomic64_sub_return_relaxed atomic64_sub_return | ||
| 1294 | #else /* atomic64_sub_return_relaxed */ | ||
| 1295 | |||
| 1296 | #ifndef atomic64_sub_return_acquire | ||
| 1297 | static inline s64 | ||
| 1298 | atomic64_sub_return_acquire(s64 i, atomic64_t *v) | ||
| 1299 | { | ||
| 1300 | s64 ret = atomic64_sub_return_relaxed(i, v); | ||
| 1301 | __atomic_acquire_fence(); | ||
| 1302 | return ret; | ||
| 1303 | } | ||
| 1304 | #define atomic64_sub_return_acquire atomic64_sub_return_acquire | ||
| 1305 | #endif | ||
| 1306 | |||
| 1307 | #ifndef atomic64_sub_return_release | ||
| 1308 | static inline s64 | ||
| 1309 | atomic64_sub_return_release(s64 i, atomic64_t *v) | ||
| 1310 | { | ||
| 1311 | __atomic_release_fence(); | ||
| 1312 | return atomic64_sub_return_relaxed(i, v); | ||
| 1313 | } | ||
| 1314 | #define atomic64_sub_return_release atomic64_sub_return_release | ||
| 1315 | #endif | ||
| 1316 | |||
| 1317 | #ifndef atomic64_sub_return | ||
| 1318 | static inline s64 | ||
| 1319 | atomic64_sub_return(s64 i, atomic64_t *v) | ||
| 1320 | { | ||
| 1321 | s64 ret; | ||
| 1322 | __atomic_pre_full_fence(); | ||
| 1323 | ret = atomic64_sub_return_relaxed(i, v); | ||
| 1324 | __atomic_post_full_fence(); | ||
| 1325 | return ret; | ||
| 1326 | } | ||
| 1327 | #define atomic64_sub_return atomic64_sub_return | ||
| 1328 | #endif | ||
| 1329 | |||
| 1330 | #endif /* atomic64_sub_return_relaxed */ | ||
| 1331 | |||
| 1332 | #ifndef atomic64_fetch_sub_relaxed | ||
| 1333 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub | ||
| 1334 | #define atomic64_fetch_sub_release atomic64_fetch_sub | ||
| 1335 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub | ||
| 1336 | #else /* atomic64_fetch_sub_relaxed */ | ||
| 1337 | |||
| 1338 | #ifndef atomic64_fetch_sub_acquire | ||
| 1339 | static inline s64 | ||
| 1340 | atomic64_fetch_sub_acquire(s64 i, atomic64_t *v) | ||
| 1341 | { | ||
| 1342 | s64 ret = atomic64_fetch_sub_relaxed(i, v); | ||
| 1343 | __atomic_acquire_fence(); | ||
| 1344 | return ret; | ||
| 1345 | } | ||
| 1346 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub_acquire | ||
| 1347 | #endif | ||
| 1348 | |||
| 1349 | #ifndef atomic64_fetch_sub_release | ||
| 1350 | static inline s64 | ||
| 1351 | atomic64_fetch_sub_release(s64 i, atomic64_t *v) | ||
| 1352 | { | ||
| 1353 | __atomic_release_fence(); | ||
| 1354 | return atomic64_fetch_sub_relaxed(i, v); | ||
| 1355 | } | ||
| 1356 | #define atomic64_fetch_sub_release atomic64_fetch_sub_release | ||
| 1357 | #endif | ||
| 1358 | |||
| 1359 | #ifndef atomic64_fetch_sub | ||
| 1360 | static inline s64 | ||
| 1361 | atomic64_fetch_sub(s64 i, atomic64_t *v) | ||
| 1362 | { | ||
| 1363 | s64 ret; | ||
| 1364 | __atomic_pre_full_fence(); | ||
| 1365 | ret = atomic64_fetch_sub_relaxed(i, v); | ||
| 1366 | __atomic_post_full_fence(); | ||
| 1367 | return ret; | ||
| 1368 | } | ||
| 1369 | #define atomic64_fetch_sub atomic64_fetch_sub | ||
| 1370 | #endif | ||
| 1371 | |||
| 1372 | #endif /* atomic64_fetch_sub_relaxed */ | ||
| 1373 | |||
| 1374 | #ifndef atomic64_inc | ||
| 1375 | static inline void | ||
| 1376 | atomic64_inc(atomic64_t *v) | ||
| 1377 | { | ||
| 1378 | atomic64_add(1, v); | ||
| 1379 | } | ||
| 1380 | #define atomic64_inc atomic64_inc | ||
| 1381 | #endif | ||
| 1382 | |||
| 1383 | #ifndef atomic64_inc_return_relaxed | ||
| 1384 | #ifdef atomic64_inc_return | ||
| 1385 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
| 1386 | #define atomic64_inc_return_release atomic64_inc_return | ||
| 1387 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
| 1388 | #endif /* atomic64_inc_return */ | ||
| 1389 | |||
| 1390 | #ifndef atomic64_inc_return | ||
| 1391 | static inline s64 | ||
| 1392 | atomic64_inc_return(atomic64_t *v) | ||
| 1393 | { | ||
| 1394 | return atomic64_add_return(1, v); | ||
| 1395 | } | ||
| 1396 | #define atomic64_inc_return atomic64_inc_return | ||
| 1397 | #endif | ||
| 1398 | |||
| 1399 | #ifndef atomic64_inc_return_acquire | ||
| 1400 | static inline s64 | ||
| 1401 | atomic64_inc_return_acquire(atomic64_t *v) | ||
| 1402 | { | ||
| 1403 | return atomic64_add_return_acquire(1, v); | ||
| 1404 | } | ||
| 1405 | #define atomic64_inc_return_acquire atomic64_inc_return_acquire | ||
| 1406 | #endif | ||
| 1407 | |||
| 1408 | #ifndef atomic64_inc_return_release | ||
| 1409 | static inline s64 | ||
| 1410 | atomic64_inc_return_release(atomic64_t *v) | ||
| 1411 | { | ||
| 1412 | return atomic64_add_return_release(1, v); | ||
| 1413 | } | ||
| 1414 | #define atomic64_inc_return_release atomic64_inc_return_release | ||
| 1415 | #endif | ||
| 1416 | |||
| 1417 | #ifndef atomic64_inc_return_relaxed | ||
| 1418 | static inline s64 | ||
| 1419 | atomic64_inc_return_relaxed(atomic64_t *v) | ||
| 1420 | { | ||
| 1421 | return atomic64_add_return_relaxed(1, v); | ||
| 1422 | } | ||
| 1423 | #define atomic64_inc_return_relaxed atomic64_inc_return_relaxed | ||
| 1424 | #endif | ||
| 1425 | |||
| 1426 | #else /* atomic64_inc_return_relaxed */ | ||
| 1427 | |||
| 1428 | #ifndef atomic64_inc_return_acquire | ||
| 1429 | static inline s64 | ||
| 1430 | atomic64_inc_return_acquire(atomic64_t *v) | ||
| 1431 | { | ||
| 1432 | s64 ret = atomic64_inc_return_relaxed(v); | ||
| 1433 | __atomic_acquire_fence(); | ||
| 1434 | return ret; | ||
| 1435 | } | ||
| 1436 | #define atomic64_inc_return_acquire atomic64_inc_return_acquire | ||
| 1437 | #endif | ||
| 1438 | |||
| 1439 | #ifndef atomic64_inc_return_release | ||
| 1440 | static inline s64 | ||
| 1441 | atomic64_inc_return_release(atomic64_t *v) | ||
| 1442 | { | ||
| 1443 | __atomic_release_fence(); | ||
| 1444 | return atomic64_inc_return_relaxed(v); | ||
| 1445 | } | ||
| 1446 | #define atomic64_inc_return_release atomic64_inc_return_release | ||
| 1447 | #endif | ||
| 1448 | |||
| 1449 | #ifndef atomic64_inc_return | ||
| 1450 | static inline s64 | ||
| 1451 | atomic64_inc_return(atomic64_t *v) | ||
| 1452 | { | ||
| 1453 | s64 ret; | ||
| 1454 | __atomic_pre_full_fence(); | ||
| 1455 | ret = atomic64_inc_return_relaxed(v); | ||
| 1456 | __atomic_post_full_fence(); | ||
| 1457 | return ret; | ||
| 1458 | } | ||
| 1459 | #define atomic64_inc_return atomic64_inc_return | ||
| 1460 | #endif | ||
| 1461 | |||
| 1462 | #endif /* atomic64_inc_return_relaxed */ | ||
| 1463 | |||
| 1464 | #ifndef atomic64_fetch_inc_relaxed | ||
| 1465 | #ifdef atomic64_fetch_inc | ||
| 1466 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc | ||
| 1467 | #define atomic64_fetch_inc_release atomic64_fetch_inc | ||
| 1468 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc | ||
| 1469 | #endif /* atomic64_fetch_inc */ | ||
| 1470 | |||
| 1471 | #ifndef atomic64_fetch_inc | ||
| 1472 | static inline s64 | ||
| 1473 | atomic64_fetch_inc(atomic64_t *v) | ||
| 1474 | { | ||
| 1475 | return atomic64_fetch_add(1, v); | ||
| 1476 | } | ||
| 1477 | #define atomic64_fetch_inc atomic64_fetch_inc | ||
| 1478 | #endif | ||
| 1479 | |||
| 1480 | #ifndef atomic64_fetch_inc_acquire | ||
| 1481 | static inline s64 | ||
| 1482 | atomic64_fetch_inc_acquire(atomic64_t *v) | ||
| 1483 | { | ||
| 1484 | return atomic64_fetch_add_acquire(1, v); | ||
| 1485 | } | ||
| 1486 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire | ||
| 1487 | #endif | ||
| 1488 | |||
| 1489 | #ifndef atomic64_fetch_inc_release | ||
| 1490 | static inline s64 | ||
| 1491 | atomic64_fetch_inc_release(atomic64_t *v) | ||
| 1492 | { | ||
| 1493 | return atomic64_fetch_add_release(1, v); | ||
| 1494 | } | ||
| 1495 | #define atomic64_fetch_inc_release atomic64_fetch_inc_release | ||
| 1496 | #endif | ||
| 1497 | |||
| 1498 | #ifndef atomic64_fetch_inc_relaxed | ||
| 1499 | static inline s64 | ||
| 1500 | atomic64_fetch_inc_relaxed(atomic64_t *v) | ||
| 1501 | { | ||
| 1502 | return atomic64_fetch_add_relaxed(1, v); | ||
| 1503 | } | ||
| 1504 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc_relaxed | ||
| 1505 | #endif | ||
| 1506 | |||
| 1507 | #else /* atomic64_fetch_inc_relaxed */ | ||
| 1508 | |||
| 1509 | #ifndef atomic64_fetch_inc_acquire | ||
| 1510 | static inline s64 | ||
| 1511 | atomic64_fetch_inc_acquire(atomic64_t *v) | ||
| 1512 | { | ||
| 1513 | s64 ret = atomic64_fetch_inc_relaxed(v); | ||
| 1514 | __atomic_acquire_fence(); | ||
| 1515 | return ret; | ||
| 1516 | } | ||
| 1517 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc_acquire | ||
| 1518 | #endif | ||
| 1519 | |||
| 1520 | #ifndef atomic64_fetch_inc_release | ||
| 1521 | static inline s64 | ||
| 1522 | atomic64_fetch_inc_release(atomic64_t *v) | ||
| 1523 | { | ||
| 1524 | __atomic_release_fence(); | ||
| 1525 | return atomic64_fetch_inc_relaxed(v); | ||
| 1526 | } | ||
| 1527 | #define atomic64_fetch_inc_release atomic64_fetch_inc_release | ||
| 1528 | #endif | ||
| 1529 | |||
| 1530 | #ifndef atomic64_fetch_inc | ||
| 1531 | static inline s64 | ||
| 1532 | atomic64_fetch_inc(atomic64_t *v) | ||
| 1533 | { | ||
| 1534 | s64 ret; | ||
| 1535 | __atomic_pre_full_fence(); | ||
| 1536 | ret = atomic64_fetch_inc_relaxed(v); | ||
| 1537 | __atomic_post_full_fence(); | ||
| 1538 | return ret; | ||
| 1539 | } | ||
| 1540 | #define atomic64_fetch_inc atomic64_fetch_inc | ||
| 1541 | #endif | ||
| 1542 | |||
| 1543 | #endif /* atomic64_fetch_inc_relaxed */ | ||
| 1544 | |||
| 1545 | #ifndef atomic64_dec | ||
| 1546 | static inline void | ||
| 1547 | atomic64_dec(atomic64_t *v) | ||
| 1548 | { | ||
| 1549 | atomic64_sub(1, v); | ||
| 1550 | } | ||
| 1551 | #define atomic64_dec atomic64_dec | ||
| 1552 | #endif | ||
| 1553 | |||
| 1554 | #ifndef atomic64_dec_return_relaxed | ||
| 1555 | #ifdef atomic64_dec_return | ||
| 1556 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
| 1557 | #define atomic64_dec_return_release atomic64_dec_return | ||
| 1558 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
| 1559 | #endif /* atomic64_dec_return */ | ||
| 1560 | |||
| 1561 | #ifndef atomic64_dec_return | ||
| 1562 | static inline s64 | ||
| 1563 | atomic64_dec_return(atomic64_t *v) | ||
| 1564 | { | ||
| 1565 | return atomic64_sub_return(1, v); | ||
| 1566 | } | ||
| 1567 | #define atomic64_dec_return atomic64_dec_return | ||
| 1568 | #endif | ||
| 1569 | |||
| 1570 | #ifndef atomic64_dec_return_acquire | ||
| 1571 | static inline s64 | ||
| 1572 | atomic64_dec_return_acquire(atomic64_t *v) | ||
| 1573 | { | ||
| 1574 | return atomic64_sub_return_acquire(1, v); | ||
| 1575 | } | ||
| 1576 | #define atomic64_dec_return_acquire atomic64_dec_return_acquire | ||
| 1577 | #endif | ||
| 1578 | |||
| 1579 | #ifndef atomic64_dec_return_release | ||
| 1580 | static inline s64 | ||
| 1581 | atomic64_dec_return_release(atomic64_t *v) | ||
| 1582 | { | ||
| 1583 | return atomic64_sub_return_release(1, v); | ||
| 1584 | } | ||
| 1585 | #define atomic64_dec_return_release atomic64_dec_return_release | ||
| 1586 | #endif | ||
| 1587 | |||
| 1588 | #ifndef atomic64_dec_return_relaxed | ||
| 1589 | static inline s64 | ||
| 1590 | atomic64_dec_return_relaxed(atomic64_t *v) | ||
| 1591 | { | ||
| 1592 | return atomic64_sub_return_relaxed(1, v); | ||
| 1593 | } | ||
| 1594 | #define atomic64_dec_return_relaxed atomic64_dec_return_relaxed | ||
| 1595 | #endif | ||
| 1596 | |||
| 1597 | #else /* atomic64_dec_return_relaxed */ | ||
| 1598 | |||
| 1599 | #ifndef atomic64_dec_return_acquire | ||
| 1600 | static inline s64 | ||
| 1601 | atomic64_dec_return_acquire(atomic64_t *v) | ||
| 1602 | { | ||
| 1603 | s64 ret = atomic64_dec_return_relaxed(v); | ||
| 1604 | __atomic_acquire_fence(); | ||
| 1605 | return ret; | ||
| 1606 | } | ||
| 1607 | #define atomic64_dec_return_acquire atomic64_dec_return_acquire | ||
| 1608 | #endif | ||
| 1609 | |||
| 1610 | #ifndef atomic64_dec_return_release | ||
| 1611 | static inline s64 | ||
| 1612 | atomic64_dec_return_release(atomic64_t *v) | ||
| 1613 | { | ||
| 1614 | __atomic_release_fence(); | ||
| 1615 | return atomic64_dec_return_relaxed(v); | ||
| 1616 | } | ||
| 1617 | #define atomic64_dec_return_release atomic64_dec_return_release | ||
| 1618 | #endif | ||
| 1619 | |||
| 1620 | #ifndef atomic64_dec_return | ||
| 1621 | static inline s64 | ||
| 1622 | atomic64_dec_return(atomic64_t *v) | ||
| 1623 | { | ||
| 1624 | s64 ret; | ||
| 1625 | __atomic_pre_full_fence(); | ||
| 1626 | ret = atomic64_dec_return_relaxed(v); | ||
| 1627 | __atomic_post_full_fence(); | ||
| 1628 | return ret; | ||
| 1629 | } | ||
| 1630 | #define atomic64_dec_return atomic64_dec_return | ||
| 1631 | #endif | ||
| 1632 | |||
| 1633 | #endif /* atomic64_dec_return_relaxed */ | ||
| 1634 | |||
| 1635 | #ifndef atomic64_fetch_dec_relaxed | ||
| 1636 | #ifdef atomic64_fetch_dec | ||
| 1637 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec | ||
| 1638 | #define atomic64_fetch_dec_release atomic64_fetch_dec | ||
| 1639 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec | ||
| 1640 | #endif /* atomic64_fetch_dec */ | ||
| 1641 | |||
| 1642 | #ifndef atomic64_fetch_dec | ||
| 1643 | static inline s64 | ||
| 1644 | atomic64_fetch_dec(atomic64_t *v) | ||
| 1645 | { | ||
| 1646 | return atomic64_fetch_sub(1, v); | ||
| 1647 | } | ||
| 1648 | #define atomic64_fetch_dec atomic64_fetch_dec | ||
| 1649 | #endif | ||
| 1650 | |||
| 1651 | #ifndef atomic64_fetch_dec_acquire | ||
| 1652 | static inline s64 | ||
| 1653 | atomic64_fetch_dec_acquire(atomic64_t *v) | ||
| 1654 | { | ||
| 1655 | return atomic64_fetch_sub_acquire(1, v); | ||
| 1656 | } | ||
| 1657 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire | ||
| 1658 | #endif | ||
| 1659 | |||
| 1660 | #ifndef atomic64_fetch_dec_release | ||
| 1661 | static inline s64 | ||
| 1662 | atomic64_fetch_dec_release(atomic64_t *v) | ||
| 1663 | { | ||
| 1664 | return atomic64_fetch_sub_release(1, v); | ||
| 1665 | } | ||
| 1666 | #define atomic64_fetch_dec_release atomic64_fetch_dec_release | ||
| 1667 | #endif | ||
| 1668 | |||
| 1669 | #ifndef atomic64_fetch_dec_relaxed | ||
| 1670 | static inline s64 | ||
| 1671 | atomic64_fetch_dec_relaxed(atomic64_t *v) | ||
| 1672 | { | ||
| 1673 | return atomic64_fetch_sub_relaxed(1, v); | ||
| 1674 | } | ||
| 1675 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec_relaxed | ||
| 1676 | #endif | ||
| 1677 | |||
| 1678 | #else /* atomic64_fetch_dec_relaxed */ | ||
| 1679 | |||
| 1680 | #ifndef atomic64_fetch_dec_acquire | ||
| 1681 | static inline s64 | ||
| 1682 | atomic64_fetch_dec_acquire(atomic64_t *v) | ||
| 1683 | { | ||
| 1684 | s64 ret = atomic64_fetch_dec_relaxed(v); | ||
| 1685 | __atomic_acquire_fence(); | ||
| 1686 | return ret; | ||
| 1687 | } | ||
| 1688 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec_acquire | ||
| 1689 | #endif | ||
| 1690 | |||
| 1691 | #ifndef atomic64_fetch_dec_release | ||
| 1692 | static inline s64 | ||
| 1693 | atomic64_fetch_dec_release(atomic64_t *v) | ||
| 1694 | { | ||
| 1695 | __atomic_release_fence(); | ||
| 1696 | return atomic64_fetch_dec_relaxed(v); | ||
| 1697 | } | ||
| 1698 | #define atomic64_fetch_dec_release atomic64_fetch_dec_release | ||
| 1699 | #endif | ||
| 1700 | |||
| 1701 | #ifndef atomic64_fetch_dec | ||
| 1702 | static inline s64 | ||
| 1703 | atomic64_fetch_dec(atomic64_t *v) | ||
| 1704 | { | ||
| 1705 | s64 ret; | ||
| 1706 | __atomic_pre_full_fence(); | ||
| 1707 | ret = atomic64_fetch_dec_relaxed(v); | ||
| 1708 | __atomic_post_full_fence(); | ||
| 1709 | return ret; | ||
| 1710 | } | ||
| 1711 | #define atomic64_fetch_dec atomic64_fetch_dec | ||
| 1712 | #endif | ||
| 1713 | |||
| 1714 | #endif /* atomic64_fetch_dec_relaxed */ | ||
| 1715 | |||
| 1716 | #ifndef atomic64_fetch_and_relaxed | ||
| 1717 | #define atomic64_fetch_and_acquire atomic64_fetch_and | ||
| 1718 | #define atomic64_fetch_and_release atomic64_fetch_and | ||
| 1719 | #define atomic64_fetch_and_relaxed atomic64_fetch_and | ||
| 1720 | #else /* atomic64_fetch_and_relaxed */ | ||
| 1721 | |||
| 1722 | #ifndef atomic64_fetch_and_acquire | ||
| 1723 | static inline s64 | ||
| 1724 | atomic64_fetch_and_acquire(s64 i, atomic64_t *v) | ||
| 1725 | { | ||
| 1726 | s64 ret = atomic64_fetch_and_relaxed(i, v); | ||
| 1727 | __atomic_acquire_fence(); | ||
| 1728 | return ret; | ||
| 1729 | } | ||
| 1730 | #define atomic64_fetch_and_acquire atomic64_fetch_and_acquire | ||
| 1731 | #endif | ||
| 1732 | |||
| 1733 | #ifndef atomic64_fetch_and_release | ||
| 1734 | static inline s64 | ||
| 1735 | atomic64_fetch_and_release(s64 i, atomic64_t *v) | ||
| 1736 | { | ||
| 1737 | __atomic_release_fence(); | ||
| 1738 | return atomic64_fetch_and_relaxed(i, v); | ||
| 1739 | } | ||
| 1740 | #define atomic64_fetch_and_release atomic64_fetch_and_release | ||
| 1741 | #endif | ||
| 1742 | |||
| 1743 | #ifndef atomic64_fetch_and | ||
| 1744 | static inline s64 | ||
| 1745 | atomic64_fetch_and(s64 i, atomic64_t *v) | ||
| 1746 | { | ||
| 1747 | s64 ret; | ||
| 1748 | __atomic_pre_full_fence(); | ||
| 1749 | ret = atomic64_fetch_and_relaxed(i, v); | ||
| 1750 | __atomic_post_full_fence(); | ||
| 1751 | return ret; | ||
| 1752 | } | ||
| 1753 | #define atomic64_fetch_and atomic64_fetch_and | ||
| 1754 | #endif | ||
| 1755 | |||
| 1756 | #endif /* atomic64_fetch_and_relaxed */ | ||
| 1757 | |||
| 1758 | #ifndef atomic64_andnot | ||
| 1759 | static inline void | ||
| 1760 | atomic64_andnot(s64 i, atomic64_t *v) | ||
| 1761 | { | ||
| 1762 | atomic64_and(~i, v); | ||
| 1763 | } | ||
| 1764 | #define atomic64_andnot atomic64_andnot | ||
| 1765 | #endif | ||
| 1766 | |||
| 1767 | #ifndef atomic64_fetch_andnot_relaxed | ||
| 1768 | #ifdef atomic64_fetch_andnot | ||
| 1769 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
| 1770 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
| 1771 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
| 1772 | #endif /* atomic64_fetch_andnot */ | ||
| 1773 | |||
| 1774 | #ifndef atomic64_fetch_andnot | ||
| 1775 | static inline s64 | ||
| 1776 | atomic64_fetch_andnot(s64 i, atomic64_t *v) | ||
| 1777 | { | ||
| 1778 | return atomic64_fetch_and(~i, v); | ||
| 1779 | } | ||
| 1780 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
| 1781 | #endif | ||
| 1782 | |||
| 1783 | #ifndef atomic64_fetch_andnot_acquire | ||
| 1784 | static inline s64 | ||
| 1785 | atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) | ||
| 1786 | { | ||
| 1787 | return atomic64_fetch_and_acquire(~i, v); | ||
| 1788 | } | ||
| 1789 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
| 1790 | #endif | ||
| 1791 | |||
| 1792 | #ifndef atomic64_fetch_andnot_release | ||
| 1793 | static inline s64 | ||
| 1794 | atomic64_fetch_andnot_release(s64 i, atomic64_t *v) | ||
| 1795 | { | ||
| 1796 | return atomic64_fetch_and_release(~i, v); | ||
| 1797 | } | ||
| 1798 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
| 1799 | #endif | ||
| 1800 | |||
| 1801 | #ifndef atomic64_fetch_andnot_relaxed | ||
| 1802 | static inline s64 | ||
| 1803 | atomic64_fetch_andnot_relaxed(s64 i, atomic64_t *v) | ||
| 1804 | { | ||
| 1805 | return atomic64_fetch_and_relaxed(~i, v); | ||
| 1806 | } | ||
| 1807 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed | ||
| 1808 | #endif | ||
| 1809 | |||
| 1810 | #else /* atomic64_fetch_andnot_relaxed */ | ||
| 1811 | |||
| 1812 | #ifndef atomic64_fetch_andnot_acquire | ||
| 1813 | static inline s64 | ||
| 1814 | atomic64_fetch_andnot_acquire(s64 i, atomic64_t *v) | ||
| 1815 | { | ||
| 1816 | s64 ret = atomic64_fetch_andnot_relaxed(i, v); | ||
| 1817 | __atomic_acquire_fence(); | ||
| 1818 | return ret; | ||
| 1819 | } | ||
| 1820 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot_acquire | ||
| 1821 | #endif | ||
| 1822 | |||
| 1823 | #ifndef atomic64_fetch_andnot_release | ||
| 1824 | static inline s64 | ||
| 1825 | atomic64_fetch_andnot_release(s64 i, atomic64_t *v) | ||
| 1826 | { | ||
| 1827 | __atomic_release_fence(); | ||
| 1828 | return atomic64_fetch_andnot_relaxed(i, v); | ||
| 1829 | } | ||
| 1830 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot_release | ||
| 1831 | #endif | ||
| 1832 | |||
| 1833 | #ifndef atomic64_fetch_andnot | ||
| 1834 | static inline s64 | ||
| 1835 | atomic64_fetch_andnot(s64 i, atomic64_t *v) | ||
| 1836 | { | ||
| 1837 | s64 ret; | ||
| 1838 | __atomic_pre_full_fence(); | ||
| 1839 | ret = atomic64_fetch_andnot_relaxed(i, v); | ||
| 1840 | __atomic_post_full_fence(); | ||
| 1841 | return ret; | ||
| 1842 | } | ||
| 1843 | #define atomic64_fetch_andnot atomic64_fetch_andnot | ||
| 1844 | #endif | ||
| 1845 | |||
| 1846 | #endif /* atomic64_fetch_andnot_relaxed */ | ||
| 1847 | |||
| 1848 | #ifndef atomic64_fetch_or_relaxed | ||
| 1849 | #define atomic64_fetch_or_acquire atomic64_fetch_or | ||
| 1850 | #define atomic64_fetch_or_release atomic64_fetch_or | ||
| 1851 | #define atomic64_fetch_or_relaxed atomic64_fetch_or | ||
| 1852 | #else /* atomic64_fetch_or_relaxed */ | ||
| 1853 | |||
| 1854 | #ifndef atomic64_fetch_or_acquire | ||
| 1855 | static inline s64 | ||
| 1856 | atomic64_fetch_or_acquire(s64 i, atomic64_t *v) | ||
| 1857 | { | ||
| 1858 | s64 ret = atomic64_fetch_or_relaxed(i, v); | ||
| 1859 | __atomic_acquire_fence(); | ||
| 1860 | return ret; | ||
| 1861 | } | ||
| 1862 | #define atomic64_fetch_or_acquire atomic64_fetch_or_acquire | ||
| 1863 | #endif | ||
| 1864 | |||
| 1865 | #ifndef atomic64_fetch_or_release | ||
| 1866 | static inline s64 | ||
| 1867 | atomic64_fetch_or_release(s64 i, atomic64_t *v) | ||
| 1868 | { | ||
| 1869 | __atomic_release_fence(); | ||
| 1870 | return atomic64_fetch_or_relaxed(i, v); | ||
| 1871 | } | ||
| 1872 | #define atomic64_fetch_or_release atomic64_fetch_or_release | ||
| 1873 | #endif | ||
| 1874 | |||
| 1875 | #ifndef atomic64_fetch_or | ||
| 1876 | static inline s64 | ||
| 1877 | atomic64_fetch_or(s64 i, atomic64_t *v) | ||
| 1878 | { | ||
| 1879 | s64 ret; | ||
| 1880 | __atomic_pre_full_fence(); | ||
| 1881 | ret = atomic64_fetch_or_relaxed(i, v); | ||
| 1882 | __atomic_post_full_fence(); | ||
| 1883 | return ret; | ||
| 1884 | } | ||
| 1885 | #define atomic64_fetch_or atomic64_fetch_or | ||
| 1886 | #endif | ||
| 1887 | |||
| 1888 | #endif /* atomic64_fetch_or_relaxed */ | ||
| 1889 | |||
| 1890 | #ifndef atomic64_fetch_xor_relaxed | ||
| 1891 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor | ||
| 1892 | #define atomic64_fetch_xor_release atomic64_fetch_xor | ||
| 1893 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor | ||
| 1894 | #else /* atomic64_fetch_xor_relaxed */ | ||
| 1895 | |||
| 1896 | #ifndef atomic64_fetch_xor_acquire | ||
| 1897 | static inline s64 | ||
| 1898 | atomic64_fetch_xor_acquire(s64 i, atomic64_t *v) | ||
| 1899 | { | ||
| 1900 | s64 ret = atomic64_fetch_xor_relaxed(i, v); | ||
| 1901 | __atomic_acquire_fence(); | ||
| 1902 | return ret; | ||
| 1903 | } | ||
| 1904 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor_acquire | ||
| 1905 | #endif | ||
| 1906 | |||
| 1907 | #ifndef atomic64_fetch_xor_release | ||
| 1908 | static inline s64 | ||
| 1909 | atomic64_fetch_xor_release(s64 i, atomic64_t *v) | ||
| 1910 | { | ||
| 1911 | __atomic_release_fence(); | ||
| 1912 | return atomic64_fetch_xor_relaxed(i, v); | ||
| 1913 | } | ||
| 1914 | #define atomic64_fetch_xor_release atomic64_fetch_xor_release | ||
| 1915 | #endif | ||
| 1916 | |||
| 1917 | #ifndef atomic64_fetch_xor | ||
| 1918 | static inline s64 | ||
| 1919 | atomic64_fetch_xor(s64 i, atomic64_t *v) | ||
| 1920 | { | ||
| 1921 | s64 ret; | ||
| 1922 | __atomic_pre_full_fence(); | ||
| 1923 | ret = atomic64_fetch_xor_relaxed(i, v); | ||
| 1924 | __atomic_post_full_fence(); | ||
| 1925 | return ret; | ||
| 1926 | } | ||
| 1927 | #define atomic64_fetch_xor atomic64_fetch_xor | ||
| 1928 | #endif | ||
| 1929 | |||
| 1930 | #endif /* atomic64_fetch_xor_relaxed */ | ||
| 1931 | |||
| 1932 | #ifndef atomic64_xchg_relaxed | ||
| 1933 | #define atomic64_xchg_acquire atomic64_xchg | ||
| 1934 | #define atomic64_xchg_release atomic64_xchg | ||
| 1935 | #define atomic64_xchg_relaxed atomic64_xchg | ||
| 1936 | #else /* atomic64_xchg_relaxed */ | ||
| 1937 | |||
| 1938 | #ifndef atomic64_xchg_acquire | ||
| 1939 | static inline s64 | ||
| 1940 | atomic64_xchg_acquire(atomic64_t *v, s64 i) | ||
| 1941 | { | ||
| 1942 | s64 ret = atomic64_xchg_relaxed(v, i); | ||
| 1943 | __atomic_acquire_fence(); | ||
| 1944 | return ret; | ||
| 1945 | } | ||
| 1946 | #define atomic64_xchg_acquire atomic64_xchg_acquire | ||
| 1947 | #endif | ||
| 1948 | |||
| 1949 | #ifndef atomic64_xchg_release | ||
| 1950 | static inline s64 | ||
| 1951 | atomic64_xchg_release(atomic64_t *v, s64 i) | ||
| 1952 | { | ||
| 1953 | __atomic_release_fence(); | ||
| 1954 | return atomic64_xchg_relaxed(v, i); | ||
| 1955 | } | ||
| 1956 | #define atomic64_xchg_release atomic64_xchg_release | ||
| 1957 | #endif | ||
| 1958 | |||
| 1959 | #ifndef atomic64_xchg | ||
| 1960 | static inline s64 | ||
| 1961 | atomic64_xchg(atomic64_t *v, s64 i) | ||
| 1962 | { | ||
| 1963 | s64 ret; | ||
| 1964 | __atomic_pre_full_fence(); | ||
| 1965 | ret = atomic64_xchg_relaxed(v, i); | ||
| 1966 | __atomic_post_full_fence(); | ||
| 1967 | return ret; | ||
| 1968 | } | ||
| 1969 | #define atomic64_xchg atomic64_xchg | ||
| 1970 | #endif | ||
| 1971 | |||
| 1972 | #endif /* atomic64_xchg_relaxed */ | ||
| 1973 | |||
| 1974 | #ifndef atomic64_cmpxchg_relaxed | ||
| 1975 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | ||
| 1976 | #define atomic64_cmpxchg_release atomic64_cmpxchg | ||
| 1977 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | ||
| 1978 | #else /* atomic64_cmpxchg_relaxed */ | ||
| 1979 | |||
| 1980 | #ifndef atomic64_cmpxchg_acquire | ||
| 1981 | static inline s64 | ||
| 1982 | atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new) | ||
| 1983 | { | ||
| 1984 | s64 ret = atomic64_cmpxchg_relaxed(v, old, new); | ||
| 1985 | __atomic_acquire_fence(); | ||
| 1986 | return ret; | ||
| 1987 | } | ||
| 1988 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg_acquire | ||
| 1989 | #endif | ||
| 1990 | |||
| 1991 | #ifndef atomic64_cmpxchg_release | ||
| 1992 | static inline s64 | ||
| 1993 | atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new) | ||
| 1994 | { | ||
| 1995 | __atomic_release_fence(); | ||
| 1996 | return atomic64_cmpxchg_relaxed(v, old, new); | ||
| 1997 | } | ||
| 1998 | #define atomic64_cmpxchg_release atomic64_cmpxchg_release | ||
| 1999 | #endif | ||
| 2000 | |||
| 2001 | #ifndef atomic64_cmpxchg | ||
| 2002 | static inline s64 | ||
| 2003 | atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new) | ||
| 2004 | { | ||
| 2005 | s64 ret; | ||
| 2006 | __atomic_pre_full_fence(); | ||
| 2007 | ret = atomic64_cmpxchg_relaxed(v, old, new); | ||
| 2008 | __atomic_post_full_fence(); | ||
| 2009 | return ret; | ||
| 2010 | } | ||
| 2011 | #define atomic64_cmpxchg atomic64_cmpxchg | ||
| 2012 | #endif | ||
| 2013 | |||
| 2014 | #endif /* atomic64_cmpxchg_relaxed */ | ||
| 2015 | |||
| 2016 | #ifndef atomic64_try_cmpxchg_relaxed | ||
| 2017 | #ifdef atomic64_try_cmpxchg | ||
| 2018 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg | ||
| 2019 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg | ||
| 2020 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg | ||
| 2021 | #endif /* atomic64_try_cmpxchg */ | ||
| 2022 | |||
| 2023 | #ifndef atomic64_try_cmpxchg | ||
| 2024 | static inline bool | ||
| 2025 | atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | ||
| 2026 | { | ||
| 2027 | s64 r, o = *old; | ||
| 2028 | r = atomic64_cmpxchg(v, o, new); | ||
| 2029 | if (unlikely(r != o)) | ||
| 2030 | *old = r; | ||
| 2031 | return likely(r == o); | ||
| 2032 | } | ||
| 2033 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | ||
| 2034 | #endif | ||
| 2035 | |||
| 2036 | #ifndef atomic64_try_cmpxchg_acquire | ||
| 2037 | static inline bool | ||
| 2038 | atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) | ||
| 2039 | { | ||
| 2040 | s64 r, o = *old; | ||
| 2041 | r = atomic64_cmpxchg_acquire(v, o, new); | ||
| 2042 | if (unlikely(r != o)) | ||
| 2043 | *old = r; | ||
| 2044 | return likely(r == o); | ||
| 2045 | } | ||
| 2046 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire | ||
| 2047 | #endif | ||
| 2048 | |||
| 2049 | #ifndef atomic64_try_cmpxchg_release | ||
| 2050 | static inline bool | ||
| 2051 | atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) | ||
| 2052 | { | ||
| 2053 | s64 r, o = *old; | ||
| 2054 | r = atomic64_cmpxchg_release(v, o, new); | ||
| 2055 | if (unlikely(r != o)) | ||
| 2056 | *old = r; | ||
| 2057 | return likely(r == o); | ||
| 2058 | } | ||
| 2059 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release | ||
| 2060 | #endif | ||
| 2061 | |||
| 2062 | #ifndef atomic64_try_cmpxchg_relaxed | ||
| 2063 | static inline bool | ||
| 2064 | atomic64_try_cmpxchg_relaxed(atomic64_t *v, s64 *old, s64 new) | ||
| 2065 | { | ||
| 2066 | s64 r, o = *old; | ||
| 2067 | r = atomic64_cmpxchg_relaxed(v, o, new); | ||
| 2068 | if (unlikely(r != o)) | ||
| 2069 | *old = r; | ||
| 2070 | return likely(r == o); | ||
| 2071 | } | ||
| 2072 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg_relaxed | ||
| 2073 | #endif | ||
| 2074 | |||
| 2075 | #else /* atomic64_try_cmpxchg_relaxed */ | ||
| 2076 | |||
| 2077 | #ifndef atomic64_try_cmpxchg_acquire | ||
| 2078 | static inline bool | ||
| 2079 | atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new) | ||
| 2080 | { | ||
| 2081 | bool ret = atomic64_try_cmpxchg_relaxed(v, old, new); | ||
| 2082 | __atomic_acquire_fence(); | ||
| 2083 | return ret; | ||
| 2084 | } | ||
| 2085 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg_acquire | ||
| 2086 | #endif | ||
| 2087 | |||
| 2088 | #ifndef atomic64_try_cmpxchg_release | ||
| 2089 | static inline bool | ||
| 2090 | atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new) | ||
| 2091 | { | ||
| 2092 | __atomic_release_fence(); | ||
| 2093 | return atomic64_try_cmpxchg_relaxed(v, old, new); | ||
| 2094 | } | ||
| 2095 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg_release | ||
| 2096 | #endif | ||
| 2097 | |||
| 2098 | #ifndef atomic64_try_cmpxchg | ||
| 2099 | static inline bool | ||
| 2100 | atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new) | ||
| 2101 | { | ||
| 2102 | bool ret; | ||
| 2103 | __atomic_pre_full_fence(); | ||
| 2104 | ret = atomic64_try_cmpxchg_relaxed(v, old, new); | ||
| 2105 | __atomic_post_full_fence(); | ||
| 2106 | return ret; | ||
| 2107 | } | ||
| 2108 | #define atomic64_try_cmpxchg atomic64_try_cmpxchg | ||
| 2109 | #endif | ||
| 2110 | |||
| 2111 | #endif /* atomic64_try_cmpxchg_relaxed */ | ||
| 2112 | |||
| 2113 | #ifndef atomic64_sub_and_test | ||
| 2114 | /** | ||
| 2115 | * atomic64_sub_and_test - subtract value from variable and test result | ||
| 2116 | * @i: integer value to subtract | ||
| 2117 | * @v: pointer of type atomic64_t | ||
| 2118 | * | ||
| 2119 | * Atomically subtracts @i from @v and returns | ||
| 2120 | * true if the result is zero, or false for all | ||
| 2121 | * other cases. | ||
| 2122 | */ | ||
| 2123 | static inline bool | ||
| 2124 | atomic64_sub_and_test(s64 i, atomic64_t *v) | ||
| 2125 | { | ||
| 2126 | return atomic64_sub_return(i, v) == 0; | ||
| 2127 | } | ||
| 2128 | #define atomic64_sub_and_test atomic64_sub_and_test | ||
| 2129 | #endif | ||
| 2130 | |||
| 2131 | #ifndef atomic64_dec_and_test | ||
| 2132 | /** | ||
| 2133 | * atomic64_dec_and_test - decrement and test | ||
| 2134 | * @v: pointer of type atomic64_t | ||
| 2135 | * | ||
| 2136 | * Atomically decrements @v by 1 and | ||
| 2137 | * returns true if the result is 0, or false for all other | ||
| 2138 | * cases. | ||
| 2139 | */ | ||
| 2140 | static inline bool | ||
| 2141 | atomic64_dec_and_test(atomic64_t *v) | ||
| 2142 | { | ||
| 2143 | return atomic64_dec_return(v) == 0; | ||
| 2144 | } | ||
| 2145 | #define atomic64_dec_and_test atomic64_dec_and_test | ||
| 2146 | #endif | ||
| 2147 | |||
| 2148 | #ifndef atomic64_inc_and_test | ||
| 2149 | /** | ||
| 2150 | * atomic64_inc_and_test - increment and test | ||
| 2151 | * @v: pointer of type atomic64_t | ||
| 2152 | * | ||
| 2153 | * Atomically increments @v by 1 | ||
| 2154 | * and returns true if the result is zero, or false for all | ||
| 2155 | * other cases. | ||
| 2156 | */ | ||
| 2157 | static inline bool | ||
| 2158 | atomic64_inc_and_test(atomic64_t *v) | ||
| 2159 | { | ||
| 2160 | return atomic64_inc_return(v) == 0; | ||
| 2161 | } | ||
| 2162 | #define atomic64_inc_and_test atomic64_inc_and_test | ||
| 2163 | #endif | ||
| 2164 | |||
| 2165 | #ifndef atomic64_add_negative | ||
| 2166 | /** | ||
| 2167 | * atomic64_add_negative - add and test if negative | ||
| 2168 | * @i: integer value to add | ||
| 2169 | * @v: pointer of type atomic64_t | ||
| 2170 | * | ||
| 2171 | * Atomically adds @i to @v and returns true | ||
| 2172 | * if the result is negative, or false when | ||
| 2173 | * result is greater than or equal to zero. | ||
| 2174 | */ | ||
| 2175 | static inline bool | ||
| 2176 | atomic64_add_negative(s64 i, atomic64_t *v) | ||
| 2177 | { | ||
| 2178 | return atomic64_add_return(i, v) < 0; | ||
| 2179 | } | ||
| 2180 | #define atomic64_add_negative atomic64_add_negative | ||
| 2181 | #endif | ||
| 2182 | |||
| 2183 | #ifndef atomic64_fetch_add_unless | ||
| 2184 | /** | ||
| 2185 | * atomic64_fetch_add_unless - add unless the number is already a given value | ||
| 2186 | * @v: pointer of type atomic64_t | ||
| 2187 | * @a: the amount to add to v... | ||
| 2188 | * @u: ...unless v is equal to u. | ||
| 2189 | * | ||
| 2190 | * Atomically adds @a to @v, so long as @v was not already @u. | ||
| 2191 | * Returns original value of @v | ||
| 2192 | */ | ||
| 2193 | static inline s64 | ||
| 2194 | atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) | ||
| 2195 | { | ||
| 2196 | s64 c = atomic64_read(v); | ||
| 2197 | |||
| 2198 | do { | ||
| 2199 | if (unlikely(c == u)) | ||
| 2200 | break; | ||
| 2201 | } while (!atomic64_try_cmpxchg(v, &c, c + a)); | ||
| 2202 | |||
| 2203 | return c; | ||
| 2204 | } | ||
| 2205 | #define atomic64_fetch_add_unless atomic64_fetch_add_unless | ||
| 2206 | #endif | ||
| 2207 | |||
| 2208 | #ifndef atomic64_add_unless | ||
| 2209 | /** | ||
| 2210 | * atomic64_add_unless - add unless the number is already a given value | ||
| 2211 | * @v: pointer of type atomic64_t | ||
| 2212 | * @a: the amount to add to v... | ||
| 2213 | * @u: ...unless v is equal to u. | ||
| 2214 | * | ||
| 2215 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 2216 | * Returns true if the addition was done. | ||
| 2217 | */ | ||
| 2218 | static inline bool | ||
| 2219 | atomic64_add_unless(atomic64_t *v, s64 a, s64 u) | ||
| 2220 | { | ||
| 2221 | return atomic64_fetch_add_unless(v, a, u) != u; | ||
| 2222 | } | ||
| 2223 | #define atomic64_add_unless atomic64_add_unless | ||
| 2224 | #endif | ||
| 2225 | |||
| 2226 | #ifndef atomic64_inc_not_zero | ||
| 2227 | /** | ||
| 2228 | * atomic64_inc_not_zero - increment unless the number is zero | ||
| 2229 | * @v: pointer of type atomic64_t | ||
| 2230 | * | ||
| 2231 | * Atomically increments @v by 1, if @v is non-zero. | ||
| 2232 | * Returns true if the increment was done. | ||
| 2233 | */ | ||
| 2234 | static inline bool | ||
| 2235 | atomic64_inc_not_zero(atomic64_t *v) | ||
| 2236 | { | ||
| 2237 | return atomic64_add_unless(v, 1, 0); | ||
| 2238 | } | ||
| 2239 | #define atomic64_inc_not_zero atomic64_inc_not_zero | ||
| 2240 | #endif | ||
| 2241 | |||
| 2242 | #ifndef atomic64_inc_unless_negative | ||
| 2243 | static inline bool | ||
| 2244 | atomic64_inc_unless_negative(atomic64_t *v) | ||
| 2245 | { | ||
| 2246 | s64 c = atomic64_read(v); | ||
| 2247 | |||
| 2248 | do { | ||
| 2249 | if (unlikely(c < 0)) | ||
| 2250 | return false; | ||
| 2251 | } while (!atomic64_try_cmpxchg(v, &c, c + 1)); | ||
| 2252 | |||
| 2253 | return true; | ||
| 2254 | } | ||
| 2255 | #define atomic64_inc_unless_negative atomic64_inc_unless_negative | ||
| 2256 | #endif | ||
| 2257 | |||
| 2258 | #ifndef atomic64_dec_unless_positive | ||
| 2259 | static inline bool | ||
| 2260 | atomic64_dec_unless_positive(atomic64_t *v) | ||
| 2261 | { | ||
| 2262 | s64 c = atomic64_read(v); | ||
| 2263 | |||
| 2264 | do { | ||
| 2265 | if (unlikely(c > 0)) | ||
| 2266 | return false; | ||
| 2267 | } while (!atomic64_try_cmpxchg(v, &c, c - 1)); | ||
| 2268 | |||
| 2269 | return true; | ||
| 2270 | } | ||
| 2271 | #define atomic64_dec_unless_positive atomic64_dec_unless_positive | ||
| 2272 | #endif | ||
| 2273 | |||
| 2274 | #ifndef atomic64_dec_if_positive | ||
| 2275 | static inline s64 | ||
| 2276 | atomic64_dec_if_positive(atomic64_t *v) | ||
| 2277 | { | ||
| 2278 | s64 dec, c = atomic64_read(v); | ||
| 2279 | |||
| 2280 | do { | ||
| 2281 | dec = c - 1; | ||
| 2282 | if (unlikely(dec < 0)) | ||
| 2283 | break; | ||
| 2284 | } while (!atomic64_try_cmpxchg(v, &c, dec)); | ||
| 2285 | |||
| 2286 | return dec; | ||
| 2287 | } | ||
| 2288 | #define atomic64_dec_if_positive atomic64_dec_if_positive | ||
| 2289 | #endif | ||
| 2290 | |||
| 2291 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
| 2292 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
| 2293 | |||
| 2294 | #endif /* _LINUX_ATOMIC_FALLBACK_H */ | ||
| 2295 | // 25de4a2804d70f57e994fe3b419148658bb5378a | ||
diff --git a/include/linux/atomic.h b/include/linux/atomic.h index 1e8e88bdaf09..4c0d009a46f0 100644 --- a/include/linux/atomic.h +++ b/include/linux/atomic.h | |||
| @@ -25,14 +25,6 @@ | |||
| 25 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. | 25 | * See Documentation/memory-barriers.txt for ACQUIRE/RELEASE definitions. |
| 26 | */ | 26 | */ |
| 27 | 27 | ||
| 28 | #ifndef atomic_read_acquire | ||
| 29 | #define atomic_read_acquire(v) smp_load_acquire(&(v)->counter) | ||
| 30 | #endif | ||
| 31 | |||
| 32 | #ifndef atomic_set_release | ||
| 33 | #define atomic_set_release(v, i) smp_store_release(&(v)->counter, (i)) | ||
| 34 | #endif | ||
| 35 | |||
| 36 | /* | 28 | /* |
| 37 | * The idea here is to build acquire/release variants by adding explicit | 29 | * The idea here is to build acquire/release variants by adding explicit |
| 38 | * barriers on top of the relaxed variant. In the case where the relaxed | 30 | * barriers on top of the relaxed variant. In the case where the relaxed |
| @@ -79,1238 +71,7 @@ | |||
| 79 | __ret; \ | 71 | __ret; \ |
| 80 | }) | 72 | }) |
| 81 | 73 | ||
| 82 | /* atomic_add_return_relaxed */ | 74 | #include <linux/atomic-fallback.h> |
| 83 | #ifndef atomic_add_return_relaxed | ||
| 84 | #define atomic_add_return_relaxed atomic_add_return | ||
| 85 | #define atomic_add_return_acquire atomic_add_return | ||
| 86 | #define atomic_add_return_release atomic_add_return | ||
| 87 | |||
| 88 | #else /* atomic_add_return_relaxed */ | ||
| 89 | |||
| 90 | #ifndef atomic_add_return_acquire | ||
| 91 | #define atomic_add_return_acquire(...) \ | ||
| 92 | __atomic_op_acquire(atomic_add_return, __VA_ARGS__) | ||
| 93 | #endif | ||
| 94 | |||
| 95 | #ifndef atomic_add_return_release | ||
| 96 | #define atomic_add_return_release(...) \ | ||
| 97 | __atomic_op_release(atomic_add_return, __VA_ARGS__) | ||
| 98 | #endif | ||
| 99 | |||
| 100 | #ifndef atomic_add_return | ||
| 101 | #define atomic_add_return(...) \ | ||
| 102 | __atomic_op_fence(atomic_add_return, __VA_ARGS__) | ||
| 103 | #endif | ||
| 104 | #endif /* atomic_add_return_relaxed */ | ||
| 105 | |||
| 106 | #ifndef atomic_inc | ||
| 107 | #define atomic_inc(v) atomic_add(1, (v)) | ||
| 108 | #endif | ||
| 109 | |||
| 110 | /* atomic_inc_return_relaxed */ | ||
| 111 | #ifndef atomic_inc_return_relaxed | ||
| 112 | |||
| 113 | #ifndef atomic_inc_return | ||
| 114 | #define atomic_inc_return(v) atomic_add_return(1, (v)) | ||
| 115 | #define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v)) | ||
| 116 | #define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v)) | ||
| 117 | #define atomic_inc_return_release(v) atomic_add_return_release(1, (v)) | ||
| 118 | #else /* atomic_inc_return */ | ||
| 119 | #define atomic_inc_return_relaxed atomic_inc_return | ||
| 120 | #define atomic_inc_return_acquire atomic_inc_return | ||
| 121 | #define atomic_inc_return_release atomic_inc_return | ||
| 122 | #endif /* atomic_inc_return */ | ||
| 123 | |||
| 124 | #else /* atomic_inc_return_relaxed */ | ||
| 125 | |||
| 126 | #ifndef atomic_inc_return_acquire | ||
| 127 | #define atomic_inc_return_acquire(...) \ | ||
| 128 | __atomic_op_acquire(atomic_inc_return, __VA_ARGS__) | ||
| 129 | #endif | ||
| 130 | |||
| 131 | #ifndef atomic_inc_return_release | ||
| 132 | #define atomic_inc_return_release(...) \ | ||
| 133 | __atomic_op_release(atomic_inc_return, __VA_ARGS__) | ||
| 134 | #endif | ||
| 135 | |||
| 136 | #ifndef atomic_inc_return | ||
| 137 | #define atomic_inc_return(...) \ | ||
| 138 | __atomic_op_fence(atomic_inc_return, __VA_ARGS__) | ||
| 139 | #endif | ||
| 140 | #endif /* atomic_inc_return_relaxed */ | ||
| 141 | |||
| 142 | /* atomic_sub_return_relaxed */ | ||
| 143 | #ifndef atomic_sub_return_relaxed | ||
| 144 | #define atomic_sub_return_relaxed atomic_sub_return | ||
| 145 | #define atomic_sub_return_acquire atomic_sub_return | ||
| 146 | #define atomic_sub_return_release atomic_sub_return | ||
| 147 | |||
| 148 | #else /* atomic_sub_return_relaxed */ | ||
| 149 | |||
| 150 | #ifndef atomic_sub_return_acquire | ||
| 151 | #define atomic_sub_return_acquire(...) \ | ||
| 152 | __atomic_op_acquire(atomic_sub_return, __VA_ARGS__) | ||
| 153 | #endif | ||
| 154 | |||
| 155 | #ifndef atomic_sub_return_release | ||
| 156 | #define atomic_sub_return_release(...) \ | ||
| 157 | __atomic_op_release(atomic_sub_return, __VA_ARGS__) | ||
| 158 | #endif | ||
| 159 | |||
| 160 | #ifndef atomic_sub_return | ||
| 161 | #define atomic_sub_return(...) \ | ||
| 162 | __atomic_op_fence(atomic_sub_return, __VA_ARGS__) | ||
| 163 | #endif | ||
| 164 | #endif /* atomic_sub_return_relaxed */ | ||
| 165 | |||
| 166 | #ifndef atomic_dec | ||
| 167 | #define atomic_dec(v) atomic_sub(1, (v)) | ||
| 168 | #endif | ||
| 169 | |||
| 170 | /* atomic_dec_return_relaxed */ | ||
| 171 | #ifndef atomic_dec_return_relaxed | ||
| 172 | |||
| 173 | #ifndef atomic_dec_return | ||
| 174 | #define atomic_dec_return(v) atomic_sub_return(1, (v)) | ||
| 175 | #define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v)) | ||
| 176 | #define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v)) | ||
| 177 | #define atomic_dec_return_release(v) atomic_sub_return_release(1, (v)) | ||
| 178 | #else /* atomic_dec_return */ | ||
| 179 | #define atomic_dec_return_relaxed atomic_dec_return | ||
| 180 | #define atomic_dec_return_acquire atomic_dec_return | ||
| 181 | #define atomic_dec_return_release atomic_dec_return | ||
| 182 | #endif /* atomic_dec_return */ | ||
| 183 | |||
| 184 | #else /* atomic_dec_return_relaxed */ | ||
| 185 | |||
| 186 | #ifndef atomic_dec_return_acquire | ||
| 187 | #define atomic_dec_return_acquire(...) \ | ||
| 188 | __atomic_op_acquire(atomic_dec_return, __VA_ARGS__) | ||
| 189 | #endif | ||
| 190 | |||
| 191 | #ifndef atomic_dec_return_release | ||
| 192 | #define atomic_dec_return_release(...) \ | ||
| 193 | __atomic_op_release(atomic_dec_return, __VA_ARGS__) | ||
| 194 | #endif | ||
| 195 | |||
| 196 | #ifndef atomic_dec_return | ||
| 197 | #define atomic_dec_return(...) \ | ||
| 198 | __atomic_op_fence(atomic_dec_return, __VA_ARGS__) | ||
| 199 | #endif | ||
| 200 | #endif /* atomic_dec_return_relaxed */ | ||
| 201 | |||
| 202 | |||
| 203 | /* atomic_fetch_add_relaxed */ | ||
| 204 | #ifndef atomic_fetch_add_relaxed | ||
| 205 | #define atomic_fetch_add_relaxed atomic_fetch_add | ||
| 206 | #define atomic_fetch_add_acquire atomic_fetch_add | ||
| 207 | #define atomic_fetch_add_release atomic_fetch_add | ||
| 208 | |||
| 209 | #else /* atomic_fetch_add_relaxed */ | ||
| 210 | |||
| 211 | #ifndef atomic_fetch_add_acquire | ||
| 212 | #define atomic_fetch_add_acquire(...) \ | ||
| 213 | __atomic_op_acquire(atomic_fetch_add, __VA_ARGS__) | ||
| 214 | #endif | ||
| 215 | |||
| 216 | #ifndef atomic_fetch_add_release | ||
| 217 | #define atomic_fetch_add_release(...) \ | ||
| 218 | __atomic_op_release(atomic_fetch_add, __VA_ARGS__) | ||
| 219 | #endif | ||
| 220 | |||
| 221 | #ifndef atomic_fetch_add | ||
| 222 | #define atomic_fetch_add(...) \ | ||
| 223 | __atomic_op_fence(atomic_fetch_add, __VA_ARGS__) | ||
| 224 | #endif | ||
| 225 | #endif /* atomic_fetch_add_relaxed */ | ||
| 226 | |||
| 227 | /* atomic_fetch_inc_relaxed */ | ||
| 228 | #ifndef atomic_fetch_inc_relaxed | ||
| 229 | |||
| 230 | #ifndef atomic_fetch_inc | ||
| 231 | #define atomic_fetch_inc(v) atomic_fetch_add(1, (v)) | ||
| 232 | #define atomic_fetch_inc_relaxed(v) atomic_fetch_add_relaxed(1, (v)) | ||
| 233 | #define atomic_fetch_inc_acquire(v) atomic_fetch_add_acquire(1, (v)) | ||
| 234 | #define atomic_fetch_inc_release(v) atomic_fetch_add_release(1, (v)) | ||
| 235 | #else /* atomic_fetch_inc */ | ||
| 236 | #define atomic_fetch_inc_relaxed atomic_fetch_inc | ||
| 237 | #define atomic_fetch_inc_acquire atomic_fetch_inc | ||
| 238 | #define atomic_fetch_inc_release atomic_fetch_inc | ||
| 239 | #endif /* atomic_fetch_inc */ | ||
| 240 | |||
| 241 | #else /* atomic_fetch_inc_relaxed */ | ||
| 242 | |||
| 243 | #ifndef atomic_fetch_inc_acquire | ||
| 244 | #define atomic_fetch_inc_acquire(...) \ | ||
| 245 | __atomic_op_acquire(atomic_fetch_inc, __VA_ARGS__) | ||
| 246 | #endif | ||
| 247 | |||
| 248 | #ifndef atomic_fetch_inc_release | ||
| 249 | #define atomic_fetch_inc_release(...) \ | ||
| 250 | __atomic_op_release(atomic_fetch_inc, __VA_ARGS__) | ||
| 251 | #endif | ||
| 252 | |||
| 253 | #ifndef atomic_fetch_inc | ||
| 254 | #define atomic_fetch_inc(...) \ | ||
| 255 | __atomic_op_fence(atomic_fetch_inc, __VA_ARGS__) | ||
| 256 | #endif | ||
| 257 | #endif /* atomic_fetch_inc_relaxed */ | ||
| 258 | |||
| 259 | /* atomic_fetch_sub_relaxed */ | ||
| 260 | #ifndef atomic_fetch_sub_relaxed | ||
| 261 | #define atomic_fetch_sub_relaxed atomic_fetch_sub | ||
| 262 | #define atomic_fetch_sub_acquire atomic_fetch_sub | ||
| 263 | #define atomic_fetch_sub_release atomic_fetch_sub | ||
| 264 | |||
| 265 | #else /* atomic_fetch_sub_relaxed */ | ||
| 266 | |||
| 267 | #ifndef atomic_fetch_sub_acquire | ||
| 268 | #define atomic_fetch_sub_acquire(...) \ | ||
| 269 | __atomic_op_acquire(atomic_fetch_sub, __VA_ARGS__) | ||
| 270 | #endif | ||
| 271 | |||
| 272 | #ifndef atomic_fetch_sub_release | ||
| 273 | #define atomic_fetch_sub_release(...) \ | ||
| 274 | __atomic_op_release(atomic_fetch_sub, __VA_ARGS__) | ||
| 275 | #endif | ||
| 276 | |||
| 277 | #ifndef atomic_fetch_sub | ||
| 278 | #define atomic_fetch_sub(...) \ | ||
| 279 | __atomic_op_fence(atomic_fetch_sub, __VA_ARGS__) | ||
| 280 | #endif | ||
| 281 | #endif /* atomic_fetch_sub_relaxed */ | ||
| 282 | |||
| 283 | /* atomic_fetch_dec_relaxed */ | ||
| 284 | #ifndef atomic_fetch_dec_relaxed | ||
| 285 | |||
| 286 | #ifndef atomic_fetch_dec | ||
| 287 | #define atomic_fetch_dec(v) atomic_fetch_sub(1, (v)) | ||
| 288 | #define atomic_fetch_dec_relaxed(v) atomic_fetch_sub_relaxed(1, (v)) | ||
| 289 | #define atomic_fetch_dec_acquire(v) atomic_fetch_sub_acquire(1, (v)) | ||
| 290 | #define atomic_fetch_dec_release(v) atomic_fetch_sub_release(1, (v)) | ||
| 291 | #else /* atomic_fetch_dec */ | ||
| 292 | #define atomic_fetch_dec_relaxed atomic_fetch_dec | ||
| 293 | #define atomic_fetch_dec_acquire atomic_fetch_dec | ||
| 294 | #define atomic_fetch_dec_release atomic_fetch_dec | ||
| 295 | #endif /* atomic_fetch_dec */ | ||
| 296 | |||
| 297 | #else /* atomic_fetch_dec_relaxed */ | ||
| 298 | |||
| 299 | #ifndef atomic_fetch_dec_acquire | ||
| 300 | #define atomic_fetch_dec_acquire(...) \ | ||
| 301 | __atomic_op_acquire(atomic_fetch_dec, __VA_ARGS__) | ||
| 302 | #endif | ||
| 303 | |||
| 304 | #ifndef atomic_fetch_dec_release | ||
| 305 | #define atomic_fetch_dec_release(...) \ | ||
| 306 | __atomic_op_release(atomic_fetch_dec, __VA_ARGS__) | ||
| 307 | #endif | ||
| 308 | |||
| 309 | #ifndef atomic_fetch_dec | ||
| 310 | #define atomic_fetch_dec(...) \ | ||
| 311 | __atomic_op_fence(atomic_fetch_dec, __VA_ARGS__) | ||
| 312 | #endif | ||
| 313 | #endif /* atomic_fetch_dec_relaxed */ | ||
| 314 | |||
| 315 | /* atomic_fetch_or_relaxed */ | ||
| 316 | #ifndef atomic_fetch_or_relaxed | ||
| 317 | #define atomic_fetch_or_relaxed atomic_fetch_or | ||
| 318 | #define atomic_fetch_or_acquire atomic_fetch_or | ||
| 319 | #define atomic_fetch_or_release atomic_fetch_or | ||
| 320 | |||
| 321 | #else /* atomic_fetch_or_relaxed */ | ||
| 322 | |||
| 323 | #ifndef atomic_fetch_or_acquire | ||
| 324 | #define atomic_fetch_or_acquire(...) \ | ||
| 325 | __atomic_op_acquire(atomic_fetch_or, __VA_ARGS__) | ||
| 326 | #endif | ||
| 327 | |||
| 328 | #ifndef atomic_fetch_or_release | ||
| 329 | #define atomic_fetch_or_release(...) \ | ||
| 330 | __atomic_op_release(atomic_fetch_or, __VA_ARGS__) | ||
| 331 | #endif | ||
| 332 | |||
| 333 | #ifndef atomic_fetch_or | ||
| 334 | #define atomic_fetch_or(...) \ | ||
| 335 | __atomic_op_fence(atomic_fetch_or, __VA_ARGS__) | ||
| 336 | #endif | ||
| 337 | #endif /* atomic_fetch_or_relaxed */ | ||
| 338 | |||
| 339 | /* atomic_fetch_and_relaxed */ | ||
| 340 | #ifndef atomic_fetch_and_relaxed | ||
| 341 | #define atomic_fetch_and_relaxed atomic_fetch_and | ||
| 342 | #define atomic_fetch_and_acquire atomic_fetch_and | ||
| 343 | #define atomic_fetch_and_release atomic_fetch_and | ||
| 344 | |||
| 345 | #else /* atomic_fetch_and_relaxed */ | ||
| 346 | |||
| 347 | #ifndef atomic_fetch_and_acquire | ||
| 348 | #define atomic_fetch_and_acquire(...) \ | ||
| 349 | __atomic_op_acquire(atomic_fetch_and, __VA_ARGS__) | ||
| 350 | #endif | ||
| 351 | |||
| 352 | #ifndef atomic_fetch_and_release | ||
| 353 | #define atomic_fetch_and_release(...) \ | ||
| 354 | __atomic_op_release(atomic_fetch_and, __VA_ARGS__) | ||
| 355 | #endif | ||
| 356 | |||
| 357 | #ifndef atomic_fetch_and | ||
| 358 | #define atomic_fetch_and(...) \ | ||
| 359 | __atomic_op_fence(atomic_fetch_and, __VA_ARGS__) | ||
| 360 | #endif | ||
| 361 | #endif /* atomic_fetch_and_relaxed */ | ||
| 362 | |||
| 363 | #ifndef atomic_andnot | ||
| 364 | #define atomic_andnot(i, v) atomic_and(~(int)(i), (v)) | ||
| 365 | #endif | ||
| 366 | |||
| 367 | #ifndef atomic_fetch_andnot_relaxed | ||
| 368 | |||
| 369 | #ifndef atomic_fetch_andnot | ||
| 370 | #define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v)) | ||
| 371 | #define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v)) | ||
| 372 | #define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v)) | ||
| 373 | #define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v)) | ||
| 374 | #else /* atomic_fetch_andnot */ | ||
| 375 | #define atomic_fetch_andnot_relaxed atomic_fetch_andnot | ||
| 376 | #define atomic_fetch_andnot_acquire atomic_fetch_andnot | ||
| 377 | #define atomic_fetch_andnot_release atomic_fetch_andnot | ||
| 378 | #endif /* atomic_fetch_andnot */ | ||
| 379 | |||
| 380 | #else /* atomic_fetch_andnot_relaxed */ | ||
| 381 | |||
| 382 | #ifndef atomic_fetch_andnot_acquire | ||
| 383 | #define atomic_fetch_andnot_acquire(...) \ | ||
| 384 | __atomic_op_acquire(atomic_fetch_andnot, __VA_ARGS__) | ||
| 385 | #endif | ||
| 386 | |||
| 387 | #ifndef atomic_fetch_andnot_release | ||
| 388 | #define atomic_fetch_andnot_release(...) \ | ||
| 389 | __atomic_op_release(atomic_fetch_andnot, __VA_ARGS__) | ||
| 390 | #endif | ||
| 391 | |||
| 392 | #ifndef atomic_fetch_andnot | ||
| 393 | #define atomic_fetch_andnot(...) \ | ||
| 394 | __atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__) | ||
| 395 | #endif | ||
| 396 | #endif /* atomic_fetch_andnot_relaxed */ | ||
| 397 | |||
| 398 | /* atomic_fetch_xor_relaxed */ | ||
| 399 | #ifndef atomic_fetch_xor_relaxed | ||
| 400 | #define atomic_fetch_xor_relaxed atomic_fetch_xor | ||
| 401 | #define atomic_fetch_xor_acquire atomic_fetch_xor | ||
| 402 | #define atomic_fetch_xor_release atomic_fetch_xor | ||
| 403 | |||
| 404 | #else /* atomic_fetch_xor_relaxed */ | ||
| 405 | |||
| 406 | #ifndef atomic_fetch_xor_acquire | ||
| 407 | #define atomic_fetch_xor_acquire(...) \ | ||
| 408 | __atomic_op_acquire(atomic_fetch_xor, __VA_ARGS__) | ||
| 409 | #endif | ||
| 410 | |||
| 411 | #ifndef atomic_fetch_xor_release | ||
| 412 | #define atomic_fetch_xor_release(...) \ | ||
| 413 | __atomic_op_release(atomic_fetch_xor, __VA_ARGS__) | ||
| 414 | #endif | ||
| 415 | |||
| 416 | #ifndef atomic_fetch_xor | ||
| 417 | #define atomic_fetch_xor(...) \ | ||
| 418 | __atomic_op_fence(atomic_fetch_xor, __VA_ARGS__) | ||
| 419 | #endif | ||
| 420 | #endif /* atomic_fetch_xor_relaxed */ | ||
| 421 | |||
| 422 | |||
| 423 | /* atomic_xchg_relaxed */ | ||
| 424 | #ifndef atomic_xchg_relaxed | ||
| 425 | #define atomic_xchg_relaxed atomic_xchg | ||
| 426 | #define atomic_xchg_acquire atomic_xchg | ||
| 427 | #define atomic_xchg_release atomic_xchg | ||
| 428 | |||
| 429 | #else /* atomic_xchg_relaxed */ | ||
| 430 | |||
| 431 | #ifndef atomic_xchg_acquire | ||
| 432 | #define atomic_xchg_acquire(...) \ | ||
| 433 | __atomic_op_acquire(atomic_xchg, __VA_ARGS__) | ||
| 434 | #endif | ||
| 435 | |||
| 436 | #ifndef atomic_xchg_release | ||
| 437 | #define atomic_xchg_release(...) \ | ||
| 438 | __atomic_op_release(atomic_xchg, __VA_ARGS__) | ||
| 439 | #endif | ||
| 440 | |||
| 441 | #ifndef atomic_xchg | ||
| 442 | #define atomic_xchg(...) \ | ||
| 443 | __atomic_op_fence(atomic_xchg, __VA_ARGS__) | ||
| 444 | #endif | ||
| 445 | #endif /* atomic_xchg_relaxed */ | ||
| 446 | |||
| 447 | /* atomic_cmpxchg_relaxed */ | ||
| 448 | #ifndef atomic_cmpxchg_relaxed | ||
| 449 | #define atomic_cmpxchg_relaxed atomic_cmpxchg | ||
| 450 | #define atomic_cmpxchg_acquire atomic_cmpxchg | ||
| 451 | #define atomic_cmpxchg_release atomic_cmpxchg | ||
| 452 | |||
| 453 | #else /* atomic_cmpxchg_relaxed */ | ||
| 454 | |||
| 455 | #ifndef atomic_cmpxchg_acquire | ||
| 456 | #define atomic_cmpxchg_acquire(...) \ | ||
| 457 | __atomic_op_acquire(atomic_cmpxchg, __VA_ARGS__) | ||
| 458 | #endif | ||
| 459 | |||
| 460 | #ifndef atomic_cmpxchg_release | ||
| 461 | #define atomic_cmpxchg_release(...) \ | ||
| 462 | __atomic_op_release(atomic_cmpxchg, __VA_ARGS__) | ||
| 463 | #endif | ||
| 464 | |||
| 465 | #ifndef atomic_cmpxchg | ||
| 466 | #define atomic_cmpxchg(...) \ | ||
| 467 | __atomic_op_fence(atomic_cmpxchg, __VA_ARGS__) | ||
| 468 | #endif | ||
| 469 | #endif /* atomic_cmpxchg_relaxed */ | ||
| 470 | |||
| 471 | #ifndef atomic_try_cmpxchg | ||
| 472 | |||
| 473 | #define __atomic_try_cmpxchg(type, _p, _po, _n) \ | ||
| 474 | ({ \ | ||
| 475 | typeof(_po) __po = (_po); \ | ||
| 476 | typeof(*(_po)) __r, __o = *__po; \ | ||
| 477 | __r = atomic_cmpxchg##type((_p), __o, (_n)); \ | ||
| 478 | if (unlikely(__r != __o)) \ | ||
| 479 | *__po = __r; \ | ||
| 480 | likely(__r == __o); \ | ||
| 481 | }) | ||
| 482 | |||
| 483 | #define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n) | ||
| 484 | #define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n) | ||
| 485 | #define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n) | ||
| 486 | #define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n) | ||
| 487 | |||
| 488 | #else /* atomic_try_cmpxchg */ | ||
| 489 | #define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg | ||
| 490 | #define atomic_try_cmpxchg_acquire atomic_try_cmpxchg | ||
| 491 | #define atomic_try_cmpxchg_release atomic_try_cmpxchg | ||
| 492 | #endif /* atomic_try_cmpxchg */ | ||
| 493 | |||
| 494 | /* cmpxchg_relaxed */ | ||
| 495 | #ifndef cmpxchg_relaxed | ||
| 496 | #define cmpxchg_relaxed cmpxchg | ||
| 497 | #define cmpxchg_acquire cmpxchg | ||
| 498 | #define cmpxchg_release cmpxchg | ||
| 499 | |||
| 500 | #else /* cmpxchg_relaxed */ | ||
| 501 | |||
| 502 | #ifndef cmpxchg_acquire | ||
| 503 | #define cmpxchg_acquire(...) \ | ||
| 504 | __atomic_op_acquire(cmpxchg, __VA_ARGS__) | ||
| 505 | #endif | ||
| 506 | |||
| 507 | #ifndef cmpxchg_release | ||
| 508 | #define cmpxchg_release(...) \ | ||
| 509 | __atomic_op_release(cmpxchg, __VA_ARGS__) | ||
| 510 | #endif | ||
| 511 | |||
| 512 | #ifndef cmpxchg | ||
| 513 | #define cmpxchg(...) \ | ||
| 514 | __atomic_op_fence(cmpxchg, __VA_ARGS__) | ||
| 515 | #endif | ||
| 516 | #endif /* cmpxchg_relaxed */ | ||
| 517 | |||
| 518 | /* cmpxchg64_relaxed */ | ||
| 519 | #ifndef cmpxchg64_relaxed | ||
| 520 | #define cmpxchg64_relaxed cmpxchg64 | ||
| 521 | #define cmpxchg64_acquire cmpxchg64 | ||
| 522 | #define cmpxchg64_release cmpxchg64 | ||
| 523 | |||
| 524 | #else /* cmpxchg64_relaxed */ | ||
| 525 | |||
| 526 | #ifndef cmpxchg64_acquire | ||
| 527 | #define cmpxchg64_acquire(...) \ | ||
| 528 | __atomic_op_acquire(cmpxchg64, __VA_ARGS__) | ||
| 529 | #endif | ||
| 530 | |||
| 531 | #ifndef cmpxchg64_release | ||
| 532 | #define cmpxchg64_release(...) \ | ||
| 533 | __atomic_op_release(cmpxchg64, __VA_ARGS__) | ||
| 534 | #endif | ||
| 535 | |||
| 536 | #ifndef cmpxchg64 | ||
| 537 | #define cmpxchg64(...) \ | ||
| 538 | __atomic_op_fence(cmpxchg64, __VA_ARGS__) | ||
| 539 | #endif | ||
| 540 | #endif /* cmpxchg64_relaxed */ | ||
| 541 | |||
| 542 | /* xchg_relaxed */ | ||
| 543 | #ifndef xchg_relaxed | ||
| 544 | #define xchg_relaxed xchg | ||
| 545 | #define xchg_acquire xchg | ||
| 546 | #define xchg_release xchg | ||
| 547 | |||
| 548 | #else /* xchg_relaxed */ | ||
| 549 | |||
| 550 | #ifndef xchg_acquire | ||
| 551 | #define xchg_acquire(...) __atomic_op_acquire(xchg, __VA_ARGS__) | ||
| 552 | #endif | ||
| 553 | |||
| 554 | #ifndef xchg_release | ||
| 555 | #define xchg_release(...) __atomic_op_release(xchg, __VA_ARGS__) | ||
| 556 | #endif | ||
| 557 | |||
| 558 | #ifndef xchg | ||
| 559 | #define xchg(...) __atomic_op_fence(xchg, __VA_ARGS__) | ||
| 560 | #endif | ||
| 561 | #endif /* xchg_relaxed */ | ||
| 562 | |||
| 563 | /** | ||
| 564 | * atomic_fetch_add_unless - add unless the number is already a given value | ||
| 565 | * @v: pointer of type atomic_t | ||
| 566 | * @a: the amount to add to v... | ||
| 567 | * @u: ...unless v is equal to u. | ||
| 568 | * | ||
| 569 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 570 | * Returns the original value of @v. | ||
| 571 | */ | ||
| 572 | #ifndef atomic_fetch_add_unless | ||
| 573 | static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u) | ||
| 574 | { | ||
| 575 | int c = atomic_read(v); | ||
| 576 | |||
| 577 | do { | ||
| 578 | if (unlikely(c == u)) | ||
| 579 | break; | ||
| 580 | } while (!atomic_try_cmpxchg(v, &c, c + a)); | ||
| 581 | |||
| 582 | return c; | ||
| 583 | } | ||
| 584 | #endif | ||
| 585 | |||
| 586 | /** | ||
| 587 | * atomic_add_unless - add unless the number is already a given value | ||
| 588 | * @v: pointer of type atomic_t | ||
| 589 | * @a: the amount to add to v... | ||
| 590 | * @u: ...unless v is equal to u. | ||
| 591 | * | ||
| 592 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 593 | * Returns true if the addition was done. | ||
| 594 | */ | ||
| 595 | static inline bool atomic_add_unless(atomic_t *v, int a, int u) | ||
| 596 | { | ||
| 597 | return atomic_fetch_add_unless(v, a, u) != u; | ||
| 598 | } | ||
| 599 | |||
| 600 | /** | ||
| 601 | * atomic_inc_not_zero - increment unless the number is zero | ||
| 602 | * @v: pointer of type atomic_t | ||
| 603 | * | ||
| 604 | * Atomically increments @v by 1, if @v is non-zero. | ||
| 605 | * Returns true if the increment was done. | ||
| 606 | */ | ||
| 607 | #ifndef atomic_inc_not_zero | ||
| 608 | #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) | ||
| 609 | #endif | ||
| 610 | |||
| 611 | /** | ||
| 612 | * atomic_inc_and_test - increment and test | ||
| 613 | * @v: pointer of type atomic_t | ||
| 614 | * | ||
| 615 | * Atomically increments @v by 1 | ||
| 616 | * and returns true if the result is zero, or false for all | ||
| 617 | * other cases. | ||
| 618 | */ | ||
| 619 | #ifndef atomic_inc_and_test | ||
| 620 | static inline bool atomic_inc_and_test(atomic_t *v) | ||
| 621 | { | ||
| 622 | return atomic_inc_return(v) == 0; | ||
| 623 | } | ||
| 624 | #endif | ||
| 625 | |||
| 626 | /** | ||
| 627 | * atomic_dec_and_test - decrement and test | ||
| 628 | * @v: pointer of type atomic_t | ||
| 629 | * | ||
| 630 | * Atomically decrements @v by 1 and | ||
| 631 | * returns true if the result is 0, or false for all other | ||
| 632 | * cases. | ||
| 633 | */ | ||
| 634 | #ifndef atomic_dec_and_test | ||
| 635 | static inline bool atomic_dec_and_test(atomic_t *v) | ||
| 636 | { | ||
| 637 | return atomic_dec_return(v) == 0; | ||
| 638 | } | ||
| 639 | #endif | ||
| 640 | |||
| 641 | /** | ||
| 642 | * atomic_sub_and_test - subtract value from variable and test result | ||
| 643 | * @i: integer value to subtract | ||
| 644 | * @v: pointer of type atomic_t | ||
| 645 | * | ||
| 646 | * Atomically subtracts @i from @v and returns | ||
| 647 | * true if the result is zero, or false for all | ||
| 648 | * other cases. | ||
| 649 | */ | ||
| 650 | #ifndef atomic_sub_and_test | ||
| 651 | static inline bool atomic_sub_and_test(int i, atomic_t *v) | ||
| 652 | { | ||
| 653 | return atomic_sub_return(i, v) == 0; | ||
| 654 | } | ||
| 655 | #endif | ||
| 656 | |||
| 657 | /** | ||
| 658 | * atomic_add_negative - add and test if negative | ||
| 659 | * @i: integer value to add | ||
| 660 | * @v: pointer of type atomic_t | ||
| 661 | * | ||
| 662 | * Atomically adds @i to @v and returns true | ||
| 663 | * if the result is negative, or false when | ||
| 664 | * result is greater than or equal to zero. | ||
| 665 | */ | ||
| 666 | #ifndef atomic_add_negative | ||
| 667 | static inline bool atomic_add_negative(int i, atomic_t *v) | ||
| 668 | { | ||
| 669 | return atomic_add_return(i, v) < 0; | ||
| 670 | } | ||
| 671 | #endif | ||
| 672 | |||
| 673 | #ifndef atomic_inc_unless_negative | ||
| 674 | static inline bool atomic_inc_unless_negative(atomic_t *v) | ||
| 675 | { | ||
| 676 | int c = atomic_read(v); | ||
| 677 | |||
| 678 | do { | ||
| 679 | if (unlikely(c < 0)) | ||
| 680 | return false; | ||
| 681 | } while (!atomic_try_cmpxchg(v, &c, c + 1)); | ||
| 682 | |||
| 683 | return true; | ||
| 684 | } | ||
| 685 | #endif | ||
| 686 | |||
| 687 | #ifndef atomic_dec_unless_positive | ||
| 688 | static inline bool atomic_dec_unless_positive(atomic_t *v) | ||
| 689 | { | ||
| 690 | int c = atomic_read(v); | ||
| 691 | |||
| 692 | do { | ||
| 693 | if (unlikely(c > 0)) | ||
| 694 | return false; | ||
| 695 | } while (!atomic_try_cmpxchg(v, &c, c - 1)); | ||
| 696 | |||
| 697 | return true; | ||
| 698 | } | ||
| 699 | #endif | ||
| 700 | |||
| 701 | /* | ||
| 702 | * atomic_dec_if_positive - decrement by 1 if old value positive | ||
| 703 | * @v: pointer of type atomic_t | ||
| 704 | * | ||
| 705 | * The function returns the old value of *v minus 1, even if | ||
| 706 | * the atomic variable, v, was not decremented. | ||
| 707 | */ | ||
| 708 | #ifndef atomic_dec_if_positive | ||
| 709 | static inline int atomic_dec_if_positive(atomic_t *v) | ||
| 710 | { | ||
| 711 | int dec, c = atomic_read(v); | ||
| 712 | |||
| 713 | do { | ||
| 714 | dec = c - 1; | ||
| 715 | if (unlikely(dec < 0)) | ||
| 716 | break; | ||
| 717 | } while (!atomic_try_cmpxchg(v, &c, dec)); | ||
| 718 | |||
| 719 | return dec; | ||
| 720 | } | ||
| 721 | #endif | ||
| 722 | |||
| 723 | #define atomic_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
| 724 | #define atomic_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
| 725 | |||
| 726 | #ifdef CONFIG_GENERIC_ATOMIC64 | ||
| 727 | #include <asm-generic/atomic64.h> | ||
| 728 | #endif | ||
| 729 | |||
| 730 | #ifndef atomic64_read_acquire | ||
| 731 | #define atomic64_read_acquire(v) smp_load_acquire(&(v)->counter) | ||
| 732 | #endif | ||
| 733 | |||
| 734 | #ifndef atomic64_set_release | ||
| 735 | #define atomic64_set_release(v, i) smp_store_release(&(v)->counter, (i)) | ||
| 736 | #endif | ||
| 737 | |||
| 738 | /* atomic64_add_return_relaxed */ | ||
| 739 | #ifndef atomic64_add_return_relaxed | ||
| 740 | #define atomic64_add_return_relaxed atomic64_add_return | ||
| 741 | #define atomic64_add_return_acquire atomic64_add_return | ||
| 742 | #define atomic64_add_return_release atomic64_add_return | ||
| 743 | |||
| 744 | #else /* atomic64_add_return_relaxed */ | ||
| 745 | |||
| 746 | #ifndef atomic64_add_return_acquire | ||
| 747 | #define atomic64_add_return_acquire(...) \ | ||
| 748 | __atomic_op_acquire(atomic64_add_return, __VA_ARGS__) | ||
| 749 | #endif | ||
| 750 | |||
| 751 | #ifndef atomic64_add_return_release | ||
| 752 | #define atomic64_add_return_release(...) \ | ||
| 753 | __atomic_op_release(atomic64_add_return, __VA_ARGS__) | ||
| 754 | #endif | ||
| 755 | |||
| 756 | #ifndef atomic64_add_return | ||
| 757 | #define atomic64_add_return(...) \ | ||
| 758 | __atomic_op_fence(atomic64_add_return, __VA_ARGS__) | ||
| 759 | #endif | ||
| 760 | #endif /* atomic64_add_return_relaxed */ | ||
| 761 | |||
| 762 | #ifndef atomic64_inc | ||
| 763 | #define atomic64_inc(v) atomic64_add(1, (v)) | ||
| 764 | #endif | ||
| 765 | |||
| 766 | /* atomic64_inc_return_relaxed */ | ||
| 767 | #ifndef atomic64_inc_return_relaxed | ||
| 768 | |||
| 769 | #ifndef atomic64_inc_return | ||
| 770 | #define atomic64_inc_return(v) atomic64_add_return(1, (v)) | ||
| 771 | #define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v)) | ||
| 772 | #define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v)) | ||
| 773 | #define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v)) | ||
| 774 | #else /* atomic64_inc_return */ | ||
| 775 | #define atomic64_inc_return_relaxed atomic64_inc_return | ||
| 776 | #define atomic64_inc_return_acquire atomic64_inc_return | ||
| 777 | #define atomic64_inc_return_release atomic64_inc_return | ||
| 778 | #endif /* atomic64_inc_return */ | ||
| 779 | |||
| 780 | #else /* atomic64_inc_return_relaxed */ | ||
| 781 | |||
| 782 | #ifndef atomic64_inc_return_acquire | ||
| 783 | #define atomic64_inc_return_acquire(...) \ | ||
| 784 | __atomic_op_acquire(atomic64_inc_return, __VA_ARGS__) | ||
| 785 | #endif | ||
| 786 | |||
| 787 | #ifndef atomic64_inc_return_release | ||
| 788 | #define atomic64_inc_return_release(...) \ | ||
| 789 | __atomic_op_release(atomic64_inc_return, __VA_ARGS__) | ||
| 790 | #endif | ||
| 791 | |||
| 792 | #ifndef atomic64_inc_return | ||
| 793 | #define atomic64_inc_return(...) \ | ||
| 794 | __atomic_op_fence(atomic64_inc_return, __VA_ARGS__) | ||
| 795 | #endif | ||
| 796 | #endif /* atomic64_inc_return_relaxed */ | ||
| 797 | |||
| 798 | |||
| 799 | /* atomic64_sub_return_relaxed */ | ||
| 800 | #ifndef atomic64_sub_return_relaxed | ||
| 801 | #define atomic64_sub_return_relaxed atomic64_sub_return | ||
| 802 | #define atomic64_sub_return_acquire atomic64_sub_return | ||
| 803 | #define atomic64_sub_return_release atomic64_sub_return | ||
| 804 | |||
| 805 | #else /* atomic64_sub_return_relaxed */ | ||
| 806 | |||
| 807 | #ifndef atomic64_sub_return_acquire | ||
| 808 | #define atomic64_sub_return_acquire(...) \ | ||
| 809 | __atomic_op_acquire(atomic64_sub_return, __VA_ARGS__) | ||
| 810 | #endif | ||
| 811 | |||
| 812 | #ifndef atomic64_sub_return_release | ||
| 813 | #define atomic64_sub_return_release(...) \ | ||
| 814 | __atomic_op_release(atomic64_sub_return, __VA_ARGS__) | ||
| 815 | #endif | ||
| 816 | |||
| 817 | #ifndef atomic64_sub_return | ||
| 818 | #define atomic64_sub_return(...) \ | ||
| 819 | __atomic_op_fence(atomic64_sub_return, __VA_ARGS__) | ||
| 820 | #endif | ||
| 821 | #endif /* atomic64_sub_return_relaxed */ | ||
| 822 | |||
| 823 | #ifndef atomic64_dec | ||
| 824 | #define atomic64_dec(v) atomic64_sub(1, (v)) | ||
| 825 | #endif | ||
| 826 | |||
| 827 | /* atomic64_dec_return_relaxed */ | ||
| 828 | #ifndef atomic64_dec_return_relaxed | ||
| 829 | |||
| 830 | #ifndef atomic64_dec_return | ||
| 831 | #define atomic64_dec_return(v) atomic64_sub_return(1, (v)) | ||
| 832 | #define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v)) | ||
| 833 | #define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v)) | ||
| 834 | #define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v)) | ||
| 835 | #else /* atomic64_dec_return */ | ||
| 836 | #define atomic64_dec_return_relaxed atomic64_dec_return | ||
| 837 | #define atomic64_dec_return_acquire atomic64_dec_return | ||
| 838 | #define atomic64_dec_return_release atomic64_dec_return | ||
| 839 | #endif /* atomic64_dec_return */ | ||
| 840 | |||
| 841 | #else /* atomic64_dec_return_relaxed */ | ||
| 842 | |||
| 843 | #ifndef atomic64_dec_return_acquire | ||
| 844 | #define atomic64_dec_return_acquire(...) \ | ||
| 845 | __atomic_op_acquire(atomic64_dec_return, __VA_ARGS__) | ||
| 846 | #endif | ||
| 847 | |||
| 848 | #ifndef atomic64_dec_return_release | ||
| 849 | #define atomic64_dec_return_release(...) \ | ||
| 850 | __atomic_op_release(atomic64_dec_return, __VA_ARGS__) | ||
| 851 | #endif | ||
| 852 | |||
| 853 | #ifndef atomic64_dec_return | ||
| 854 | #define atomic64_dec_return(...) \ | ||
| 855 | __atomic_op_fence(atomic64_dec_return, __VA_ARGS__) | ||
| 856 | #endif | ||
| 857 | #endif /* atomic64_dec_return_relaxed */ | ||
| 858 | |||
| 859 | |||
| 860 | /* atomic64_fetch_add_relaxed */ | ||
| 861 | #ifndef atomic64_fetch_add_relaxed | ||
| 862 | #define atomic64_fetch_add_relaxed atomic64_fetch_add | ||
| 863 | #define atomic64_fetch_add_acquire atomic64_fetch_add | ||
| 864 | #define atomic64_fetch_add_release atomic64_fetch_add | ||
| 865 | |||
| 866 | #else /* atomic64_fetch_add_relaxed */ | ||
| 867 | |||
| 868 | #ifndef atomic64_fetch_add_acquire | ||
| 869 | #define atomic64_fetch_add_acquire(...) \ | ||
| 870 | __atomic_op_acquire(atomic64_fetch_add, __VA_ARGS__) | ||
| 871 | #endif | ||
| 872 | |||
| 873 | #ifndef atomic64_fetch_add_release | ||
| 874 | #define atomic64_fetch_add_release(...) \ | ||
| 875 | __atomic_op_release(atomic64_fetch_add, __VA_ARGS__) | ||
| 876 | #endif | ||
| 877 | |||
| 878 | #ifndef atomic64_fetch_add | ||
| 879 | #define atomic64_fetch_add(...) \ | ||
| 880 | __atomic_op_fence(atomic64_fetch_add, __VA_ARGS__) | ||
| 881 | #endif | ||
| 882 | #endif /* atomic64_fetch_add_relaxed */ | ||
| 883 | |||
| 884 | /* atomic64_fetch_inc_relaxed */ | ||
| 885 | #ifndef atomic64_fetch_inc_relaxed | ||
| 886 | |||
| 887 | #ifndef atomic64_fetch_inc | ||
| 888 | #define atomic64_fetch_inc(v) atomic64_fetch_add(1, (v)) | ||
| 889 | #define atomic64_fetch_inc_relaxed(v) atomic64_fetch_add_relaxed(1, (v)) | ||
| 890 | #define atomic64_fetch_inc_acquire(v) atomic64_fetch_add_acquire(1, (v)) | ||
| 891 | #define atomic64_fetch_inc_release(v) atomic64_fetch_add_release(1, (v)) | ||
| 892 | #else /* atomic64_fetch_inc */ | ||
| 893 | #define atomic64_fetch_inc_relaxed atomic64_fetch_inc | ||
| 894 | #define atomic64_fetch_inc_acquire atomic64_fetch_inc | ||
| 895 | #define atomic64_fetch_inc_release atomic64_fetch_inc | ||
| 896 | #endif /* atomic64_fetch_inc */ | ||
| 897 | |||
| 898 | #else /* atomic64_fetch_inc_relaxed */ | ||
| 899 | |||
| 900 | #ifndef atomic64_fetch_inc_acquire | ||
| 901 | #define atomic64_fetch_inc_acquire(...) \ | ||
| 902 | __atomic_op_acquire(atomic64_fetch_inc, __VA_ARGS__) | ||
| 903 | #endif | ||
| 904 | |||
| 905 | #ifndef atomic64_fetch_inc_release | ||
| 906 | #define atomic64_fetch_inc_release(...) \ | ||
| 907 | __atomic_op_release(atomic64_fetch_inc, __VA_ARGS__) | ||
| 908 | #endif | ||
| 909 | |||
| 910 | #ifndef atomic64_fetch_inc | ||
| 911 | #define atomic64_fetch_inc(...) \ | ||
| 912 | __atomic_op_fence(atomic64_fetch_inc, __VA_ARGS__) | ||
| 913 | #endif | ||
| 914 | #endif /* atomic64_fetch_inc_relaxed */ | ||
| 915 | |||
| 916 | /* atomic64_fetch_sub_relaxed */ | ||
| 917 | #ifndef atomic64_fetch_sub_relaxed | ||
| 918 | #define atomic64_fetch_sub_relaxed atomic64_fetch_sub | ||
| 919 | #define atomic64_fetch_sub_acquire atomic64_fetch_sub | ||
| 920 | #define atomic64_fetch_sub_release atomic64_fetch_sub | ||
| 921 | |||
| 922 | #else /* atomic64_fetch_sub_relaxed */ | ||
| 923 | |||
| 924 | #ifndef atomic64_fetch_sub_acquire | ||
| 925 | #define atomic64_fetch_sub_acquire(...) \ | ||
| 926 | __atomic_op_acquire(atomic64_fetch_sub, __VA_ARGS__) | ||
| 927 | #endif | ||
| 928 | |||
| 929 | #ifndef atomic64_fetch_sub_release | ||
| 930 | #define atomic64_fetch_sub_release(...) \ | ||
| 931 | __atomic_op_release(atomic64_fetch_sub, __VA_ARGS__) | ||
| 932 | #endif | ||
| 933 | |||
| 934 | #ifndef atomic64_fetch_sub | ||
| 935 | #define atomic64_fetch_sub(...) \ | ||
| 936 | __atomic_op_fence(atomic64_fetch_sub, __VA_ARGS__) | ||
| 937 | #endif | ||
| 938 | #endif /* atomic64_fetch_sub_relaxed */ | ||
| 939 | |||
| 940 | /* atomic64_fetch_dec_relaxed */ | ||
| 941 | #ifndef atomic64_fetch_dec_relaxed | ||
| 942 | |||
| 943 | #ifndef atomic64_fetch_dec | ||
| 944 | #define atomic64_fetch_dec(v) atomic64_fetch_sub(1, (v)) | ||
| 945 | #define atomic64_fetch_dec_relaxed(v) atomic64_fetch_sub_relaxed(1, (v)) | ||
| 946 | #define atomic64_fetch_dec_acquire(v) atomic64_fetch_sub_acquire(1, (v)) | ||
| 947 | #define atomic64_fetch_dec_release(v) atomic64_fetch_sub_release(1, (v)) | ||
| 948 | #else /* atomic64_fetch_dec */ | ||
| 949 | #define atomic64_fetch_dec_relaxed atomic64_fetch_dec | ||
| 950 | #define atomic64_fetch_dec_acquire atomic64_fetch_dec | ||
| 951 | #define atomic64_fetch_dec_release atomic64_fetch_dec | ||
| 952 | #endif /* atomic64_fetch_dec */ | ||
| 953 | |||
| 954 | #else /* atomic64_fetch_dec_relaxed */ | ||
| 955 | |||
| 956 | #ifndef atomic64_fetch_dec_acquire | ||
| 957 | #define atomic64_fetch_dec_acquire(...) \ | ||
| 958 | __atomic_op_acquire(atomic64_fetch_dec, __VA_ARGS__) | ||
| 959 | #endif | ||
| 960 | |||
| 961 | #ifndef atomic64_fetch_dec_release | ||
| 962 | #define atomic64_fetch_dec_release(...) \ | ||
| 963 | __atomic_op_release(atomic64_fetch_dec, __VA_ARGS__) | ||
| 964 | #endif | ||
| 965 | |||
| 966 | #ifndef atomic64_fetch_dec | ||
| 967 | #define atomic64_fetch_dec(...) \ | ||
| 968 | __atomic_op_fence(atomic64_fetch_dec, __VA_ARGS__) | ||
| 969 | #endif | ||
| 970 | #endif /* atomic64_fetch_dec_relaxed */ | ||
| 971 | |||
| 972 | /* atomic64_fetch_or_relaxed */ | ||
| 973 | #ifndef atomic64_fetch_or_relaxed | ||
| 974 | #define atomic64_fetch_or_relaxed atomic64_fetch_or | ||
| 975 | #define atomic64_fetch_or_acquire atomic64_fetch_or | ||
| 976 | #define atomic64_fetch_or_release atomic64_fetch_or | ||
| 977 | |||
| 978 | #else /* atomic64_fetch_or_relaxed */ | ||
| 979 | |||
| 980 | #ifndef atomic64_fetch_or_acquire | ||
| 981 | #define atomic64_fetch_or_acquire(...) \ | ||
| 982 | __atomic_op_acquire(atomic64_fetch_or, __VA_ARGS__) | ||
| 983 | #endif | ||
| 984 | |||
| 985 | #ifndef atomic64_fetch_or_release | ||
| 986 | #define atomic64_fetch_or_release(...) \ | ||
| 987 | __atomic_op_release(atomic64_fetch_or, __VA_ARGS__) | ||
| 988 | #endif | ||
| 989 | |||
| 990 | #ifndef atomic64_fetch_or | ||
| 991 | #define atomic64_fetch_or(...) \ | ||
| 992 | __atomic_op_fence(atomic64_fetch_or, __VA_ARGS__) | ||
| 993 | #endif | ||
| 994 | #endif /* atomic64_fetch_or_relaxed */ | ||
| 995 | |||
| 996 | /* atomic64_fetch_and_relaxed */ | ||
| 997 | #ifndef atomic64_fetch_and_relaxed | ||
| 998 | #define atomic64_fetch_and_relaxed atomic64_fetch_and | ||
| 999 | #define atomic64_fetch_and_acquire atomic64_fetch_and | ||
| 1000 | #define atomic64_fetch_and_release atomic64_fetch_and | ||
| 1001 | |||
| 1002 | #else /* atomic64_fetch_and_relaxed */ | ||
| 1003 | |||
| 1004 | #ifndef atomic64_fetch_and_acquire | ||
| 1005 | #define atomic64_fetch_and_acquire(...) \ | ||
| 1006 | __atomic_op_acquire(atomic64_fetch_and, __VA_ARGS__) | ||
| 1007 | #endif | ||
| 1008 | |||
| 1009 | #ifndef atomic64_fetch_and_release | ||
| 1010 | #define atomic64_fetch_and_release(...) \ | ||
| 1011 | __atomic_op_release(atomic64_fetch_and, __VA_ARGS__) | ||
| 1012 | #endif | ||
| 1013 | |||
| 1014 | #ifndef atomic64_fetch_and | ||
| 1015 | #define atomic64_fetch_and(...) \ | ||
| 1016 | __atomic_op_fence(atomic64_fetch_and, __VA_ARGS__) | ||
| 1017 | #endif | ||
| 1018 | #endif /* atomic64_fetch_and_relaxed */ | ||
| 1019 | |||
| 1020 | #ifndef atomic64_andnot | ||
| 1021 | #define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v)) | ||
| 1022 | #endif | ||
| 1023 | |||
| 1024 | #ifndef atomic64_fetch_andnot_relaxed | ||
| 1025 | |||
| 1026 | #ifndef atomic64_fetch_andnot | ||
| 1027 | #define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v)) | ||
| 1028 | #define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v)) | ||
| 1029 | #define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v)) | ||
| 1030 | #define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v)) | ||
| 1031 | #else /* atomic64_fetch_andnot */ | ||
| 1032 | #define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot | ||
| 1033 | #define atomic64_fetch_andnot_acquire atomic64_fetch_andnot | ||
| 1034 | #define atomic64_fetch_andnot_release atomic64_fetch_andnot | ||
| 1035 | #endif /* atomic64_fetch_andnot */ | ||
| 1036 | |||
| 1037 | #else /* atomic64_fetch_andnot_relaxed */ | ||
| 1038 | |||
| 1039 | #ifndef atomic64_fetch_andnot_acquire | ||
| 1040 | #define atomic64_fetch_andnot_acquire(...) \ | ||
| 1041 | __atomic_op_acquire(atomic64_fetch_andnot, __VA_ARGS__) | ||
| 1042 | #endif | ||
| 1043 | |||
| 1044 | #ifndef atomic64_fetch_andnot_release | ||
| 1045 | #define atomic64_fetch_andnot_release(...) \ | ||
| 1046 | __atomic_op_release(atomic64_fetch_andnot, __VA_ARGS__) | ||
| 1047 | #endif | ||
| 1048 | |||
| 1049 | #ifndef atomic64_fetch_andnot | ||
| 1050 | #define atomic64_fetch_andnot(...) \ | ||
| 1051 | __atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__) | ||
| 1052 | #endif | ||
| 1053 | #endif /* atomic64_fetch_andnot_relaxed */ | ||
| 1054 | |||
| 1055 | /* atomic64_fetch_xor_relaxed */ | ||
| 1056 | #ifndef atomic64_fetch_xor_relaxed | ||
| 1057 | #define atomic64_fetch_xor_relaxed atomic64_fetch_xor | ||
| 1058 | #define atomic64_fetch_xor_acquire atomic64_fetch_xor | ||
| 1059 | #define atomic64_fetch_xor_release atomic64_fetch_xor | ||
| 1060 | |||
| 1061 | #else /* atomic64_fetch_xor_relaxed */ | ||
| 1062 | |||
| 1063 | #ifndef atomic64_fetch_xor_acquire | ||
| 1064 | #define atomic64_fetch_xor_acquire(...) \ | ||
| 1065 | __atomic_op_acquire(atomic64_fetch_xor, __VA_ARGS__) | ||
| 1066 | #endif | ||
| 1067 | |||
| 1068 | #ifndef atomic64_fetch_xor_release | ||
| 1069 | #define atomic64_fetch_xor_release(...) \ | ||
| 1070 | __atomic_op_release(atomic64_fetch_xor, __VA_ARGS__) | ||
| 1071 | #endif | ||
| 1072 | |||
| 1073 | #ifndef atomic64_fetch_xor | ||
| 1074 | #define atomic64_fetch_xor(...) \ | ||
| 1075 | __atomic_op_fence(atomic64_fetch_xor, __VA_ARGS__) | ||
| 1076 | #endif | ||
| 1077 | #endif /* atomic64_fetch_xor_relaxed */ | ||
| 1078 | |||
| 1079 | |||
| 1080 | /* atomic64_xchg_relaxed */ | ||
| 1081 | #ifndef atomic64_xchg_relaxed | ||
| 1082 | #define atomic64_xchg_relaxed atomic64_xchg | ||
| 1083 | #define atomic64_xchg_acquire atomic64_xchg | ||
| 1084 | #define atomic64_xchg_release atomic64_xchg | ||
| 1085 | |||
| 1086 | #else /* atomic64_xchg_relaxed */ | ||
| 1087 | |||
| 1088 | #ifndef atomic64_xchg_acquire | ||
| 1089 | #define atomic64_xchg_acquire(...) \ | ||
| 1090 | __atomic_op_acquire(atomic64_xchg, __VA_ARGS__) | ||
| 1091 | #endif | ||
| 1092 | |||
| 1093 | #ifndef atomic64_xchg_release | ||
| 1094 | #define atomic64_xchg_release(...) \ | ||
| 1095 | __atomic_op_release(atomic64_xchg, __VA_ARGS__) | ||
| 1096 | #endif | ||
| 1097 | |||
| 1098 | #ifndef atomic64_xchg | ||
| 1099 | #define atomic64_xchg(...) \ | ||
| 1100 | __atomic_op_fence(atomic64_xchg, __VA_ARGS__) | ||
| 1101 | #endif | ||
| 1102 | #endif /* atomic64_xchg_relaxed */ | ||
| 1103 | |||
| 1104 | /* atomic64_cmpxchg_relaxed */ | ||
| 1105 | #ifndef atomic64_cmpxchg_relaxed | ||
| 1106 | #define atomic64_cmpxchg_relaxed atomic64_cmpxchg | ||
| 1107 | #define atomic64_cmpxchg_acquire atomic64_cmpxchg | ||
| 1108 | #define atomic64_cmpxchg_release atomic64_cmpxchg | ||
| 1109 | |||
| 1110 | #else /* atomic64_cmpxchg_relaxed */ | ||
| 1111 | |||
| 1112 | #ifndef atomic64_cmpxchg_acquire | ||
| 1113 | #define atomic64_cmpxchg_acquire(...) \ | ||
| 1114 | __atomic_op_acquire(atomic64_cmpxchg, __VA_ARGS__) | ||
| 1115 | #endif | ||
| 1116 | |||
| 1117 | #ifndef atomic64_cmpxchg_release | ||
| 1118 | #define atomic64_cmpxchg_release(...) \ | ||
| 1119 | __atomic_op_release(atomic64_cmpxchg, __VA_ARGS__) | ||
| 1120 | #endif | ||
| 1121 | |||
| 1122 | #ifndef atomic64_cmpxchg | ||
| 1123 | #define atomic64_cmpxchg(...) \ | ||
| 1124 | __atomic_op_fence(atomic64_cmpxchg, __VA_ARGS__) | ||
| 1125 | #endif | ||
| 1126 | #endif /* atomic64_cmpxchg_relaxed */ | ||
| 1127 | |||
| 1128 | #ifndef atomic64_try_cmpxchg | ||
| 1129 | |||
| 1130 | #define __atomic64_try_cmpxchg(type, _p, _po, _n) \ | ||
| 1131 | ({ \ | ||
| 1132 | typeof(_po) __po = (_po); \ | ||
| 1133 | typeof(*(_po)) __r, __o = *__po; \ | ||
| 1134 | __r = atomic64_cmpxchg##type((_p), __o, (_n)); \ | ||
| 1135 | if (unlikely(__r != __o)) \ | ||
| 1136 | *__po = __r; \ | ||
| 1137 | likely(__r == __o); \ | ||
| 1138 | }) | ||
| 1139 | |||
| 1140 | #define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n) | ||
| 1141 | #define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n) | ||
| 1142 | #define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n) | ||
| 1143 | #define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n) | ||
| 1144 | |||
| 1145 | #else /* atomic64_try_cmpxchg */ | ||
| 1146 | #define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg | ||
| 1147 | #define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg | ||
| 1148 | #define atomic64_try_cmpxchg_release atomic64_try_cmpxchg | ||
| 1149 | #endif /* atomic64_try_cmpxchg */ | ||
| 1150 | |||
| 1151 | /** | ||
| 1152 | * atomic64_fetch_add_unless - add unless the number is already a given value | ||
| 1153 | * @v: pointer of type atomic64_t | ||
| 1154 | * @a: the amount to add to v... | ||
| 1155 | * @u: ...unless v is equal to u. | ||
| 1156 | * | ||
| 1157 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 1158 | * Returns the original value of @v. | ||
| 1159 | */ | ||
| 1160 | #ifndef atomic64_fetch_add_unless | ||
| 1161 | static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a, | ||
| 1162 | long long u) | ||
| 1163 | { | ||
| 1164 | long long c = atomic64_read(v); | ||
| 1165 | |||
| 1166 | do { | ||
| 1167 | if (unlikely(c == u)) | ||
| 1168 | break; | ||
| 1169 | } while (!atomic64_try_cmpxchg(v, &c, c + a)); | ||
| 1170 | |||
| 1171 | return c; | ||
| 1172 | } | ||
| 1173 | #endif | ||
| 1174 | |||
| 1175 | /** | ||
| 1176 | * atomic64_add_unless - add unless the number is already a given value | ||
| 1177 | * @v: pointer of type atomic_t | ||
| 1178 | * @a: the amount to add to v... | ||
| 1179 | * @u: ...unless v is equal to u. | ||
| 1180 | * | ||
| 1181 | * Atomically adds @a to @v, if @v was not already @u. | ||
| 1182 | * Returns true if the addition was done. | ||
| 1183 | */ | ||
| 1184 | static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u) | ||
| 1185 | { | ||
| 1186 | return atomic64_fetch_add_unless(v, a, u) != u; | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | /** | ||
| 1190 | * atomic64_inc_not_zero - increment unless the number is zero | ||
| 1191 | * @v: pointer of type atomic64_t | ||
| 1192 | * | ||
| 1193 | * Atomically increments @v by 1, if @v is non-zero. | ||
| 1194 | * Returns true if the increment was done. | ||
| 1195 | */ | ||
| 1196 | #ifndef atomic64_inc_not_zero | ||
| 1197 | #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) | ||
| 1198 | #endif | ||
| 1199 | |||
| 1200 | /** | ||
| 1201 | * atomic64_inc_and_test - increment and test | ||
| 1202 | * @v: pointer of type atomic64_t | ||
| 1203 | * | ||
| 1204 | * Atomically increments @v by 1 | ||
| 1205 | * and returns true if the result is zero, or false for all | ||
| 1206 | * other cases. | ||
| 1207 | */ | ||
| 1208 | #ifndef atomic64_inc_and_test | ||
| 1209 | static inline bool atomic64_inc_and_test(atomic64_t *v) | ||
| 1210 | { | ||
| 1211 | return atomic64_inc_return(v) == 0; | ||
| 1212 | } | ||
| 1213 | #endif | ||
| 1214 | |||
| 1215 | /** | ||
| 1216 | * atomic64_dec_and_test - decrement and test | ||
| 1217 | * @v: pointer of type atomic64_t | ||
| 1218 | * | ||
| 1219 | * Atomically decrements @v by 1 and | ||
| 1220 | * returns true if the result is 0, or false for all other | ||
| 1221 | * cases. | ||
| 1222 | */ | ||
| 1223 | #ifndef atomic64_dec_and_test | ||
| 1224 | static inline bool atomic64_dec_and_test(atomic64_t *v) | ||
| 1225 | { | ||
| 1226 | return atomic64_dec_return(v) == 0; | ||
| 1227 | } | ||
| 1228 | #endif | ||
| 1229 | |||
| 1230 | /** | ||
| 1231 | * atomic64_sub_and_test - subtract value from variable and test result | ||
| 1232 | * @i: integer value to subtract | ||
| 1233 | * @v: pointer of type atomic64_t | ||
| 1234 | * | ||
| 1235 | * Atomically subtracts @i from @v and returns | ||
| 1236 | * true if the result is zero, or false for all | ||
| 1237 | * other cases. | ||
| 1238 | */ | ||
| 1239 | #ifndef atomic64_sub_and_test | ||
| 1240 | static inline bool atomic64_sub_and_test(long long i, atomic64_t *v) | ||
| 1241 | { | ||
| 1242 | return atomic64_sub_return(i, v) == 0; | ||
| 1243 | } | ||
| 1244 | #endif | ||
| 1245 | |||
| 1246 | /** | ||
| 1247 | * atomic64_add_negative - add and test if negative | ||
| 1248 | * @i: integer value to add | ||
| 1249 | * @v: pointer of type atomic64_t | ||
| 1250 | * | ||
| 1251 | * Atomically adds @i to @v and returns true | ||
| 1252 | * if the result is negative, or false when | ||
| 1253 | * result is greater than or equal to zero. | ||
| 1254 | */ | ||
| 1255 | #ifndef atomic64_add_negative | ||
| 1256 | static inline bool atomic64_add_negative(long long i, atomic64_t *v) | ||
| 1257 | { | ||
| 1258 | return atomic64_add_return(i, v) < 0; | ||
| 1259 | } | ||
| 1260 | #endif | ||
| 1261 | |||
| 1262 | #ifndef atomic64_inc_unless_negative | ||
| 1263 | static inline bool atomic64_inc_unless_negative(atomic64_t *v) | ||
| 1264 | { | ||
| 1265 | long long c = atomic64_read(v); | ||
| 1266 | |||
| 1267 | do { | ||
| 1268 | if (unlikely(c < 0)) | ||
| 1269 | return false; | ||
| 1270 | } while (!atomic64_try_cmpxchg(v, &c, c + 1)); | ||
| 1271 | |||
| 1272 | return true; | ||
| 1273 | } | ||
| 1274 | #endif | ||
| 1275 | |||
| 1276 | #ifndef atomic64_dec_unless_positive | ||
| 1277 | static inline bool atomic64_dec_unless_positive(atomic64_t *v) | ||
| 1278 | { | ||
| 1279 | long long c = atomic64_read(v); | ||
| 1280 | |||
| 1281 | do { | ||
| 1282 | if (unlikely(c > 0)) | ||
| 1283 | return false; | ||
| 1284 | } while (!atomic64_try_cmpxchg(v, &c, c - 1)); | ||
| 1285 | |||
| 1286 | return true; | ||
| 1287 | } | ||
| 1288 | #endif | ||
| 1289 | |||
| 1290 | /* | ||
| 1291 | * atomic64_dec_if_positive - decrement by 1 if old value positive | ||
| 1292 | * @v: pointer of type atomic64_t | ||
| 1293 | * | ||
| 1294 | * The function returns the old value of *v minus 1, even if | ||
| 1295 | * the atomic64 variable, v, was not decremented. | ||
| 1296 | */ | ||
| 1297 | #ifndef atomic64_dec_if_positive | ||
| 1298 | static inline long long atomic64_dec_if_positive(atomic64_t *v) | ||
| 1299 | { | ||
| 1300 | long long dec, c = atomic64_read(v); | ||
| 1301 | |||
| 1302 | do { | ||
| 1303 | dec = c - 1; | ||
| 1304 | if (unlikely(dec < 0)) | ||
| 1305 | break; | ||
| 1306 | } while (!atomic64_try_cmpxchg(v, &c, dec)); | ||
| 1307 | |||
| 1308 | return dec; | ||
| 1309 | } | ||
| 1310 | #endif | ||
| 1311 | |||
| 1312 | #define atomic64_cond_read_relaxed(v, c) smp_cond_load_relaxed(&(v)->counter, (c)) | ||
| 1313 | #define atomic64_cond_read_acquire(v, c) smp_cond_load_acquire(&(v)->counter, (c)) | ||
| 1314 | 75 | ||
| 1315 | #include <asm-generic/atomic-long.h> | 76 | #include <asm-generic/atomic-long.h> |
| 1316 | 77 | ||
diff --git a/include/linux/audit.h b/include/linux/audit.h index a625c29a2ea2..1e69d9fe16da 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | 25 | ||
| 26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
| 27 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
| 28 | #include <linux/namei.h> /* LOOKUP_* */ | ||
| 28 | #include <uapi/linux/audit.h> | 29 | #include <uapi/linux/audit.h> |
| 29 | 30 | ||
| 30 | #define AUDIT_INO_UNSET ((unsigned long)-1) | 31 | #define AUDIT_INO_UNSET ((unsigned long)-1) |
| @@ -159,6 +160,18 @@ extern int audit_update_lsm_rules(void); | |||
| 159 | extern int audit_rule_change(int type, int seq, void *data, size_t datasz); | 160 | extern int audit_rule_change(int type, int seq, void *data, size_t datasz); |
| 160 | extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); | 161 | extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); |
| 161 | 162 | ||
| 163 | extern int audit_set_loginuid(kuid_t loginuid); | ||
| 164 | |||
| 165 | static inline kuid_t audit_get_loginuid(struct task_struct *tsk) | ||
| 166 | { | ||
| 167 | return tsk->loginuid; | ||
| 168 | } | ||
| 169 | |||
| 170 | static inline unsigned int audit_get_sessionid(struct task_struct *tsk) | ||
| 171 | { | ||
| 172 | return tsk->sessionid; | ||
| 173 | } | ||
| 174 | |||
| 162 | extern u32 audit_enabled; | 175 | extern u32 audit_enabled; |
| 163 | #else /* CONFIG_AUDIT */ | 176 | #else /* CONFIG_AUDIT */ |
| 164 | static inline __printf(4, 5) | 177 | static inline __printf(4, 5) |
| @@ -201,6 +214,17 @@ static inline int audit_log_task_context(struct audit_buffer *ab) | |||
| 201 | } | 214 | } |
| 202 | static inline void audit_log_task_info(struct audit_buffer *ab) | 215 | static inline void audit_log_task_info(struct audit_buffer *ab) |
| 203 | { } | 216 | { } |
| 217 | |||
| 218 | static inline kuid_t audit_get_loginuid(struct task_struct *tsk) | ||
| 219 | { | ||
| 220 | return INVALID_UID; | ||
| 221 | } | ||
| 222 | |||
| 223 | static inline unsigned int audit_get_sessionid(struct task_struct *tsk) | ||
| 224 | { | ||
| 225 | return AUDIT_SID_UNSET; | ||
| 226 | } | ||
| 227 | |||
| 204 | #define audit_enabled AUDIT_OFF | 228 | #define audit_enabled AUDIT_OFF |
| 205 | #endif /* CONFIG_AUDIT */ | 229 | #endif /* CONFIG_AUDIT */ |
| 206 | 230 | ||
| @@ -225,6 +249,7 @@ extern void __audit_getname(struct filename *name); | |||
| 225 | 249 | ||
| 226 | #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ | 250 | #define AUDIT_INODE_PARENT 1 /* dentry represents the parent */ |
| 227 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ | 251 | #define AUDIT_INODE_HIDDEN 2 /* audit record should be hidden */ |
| 252 | #define AUDIT_INODE_NOEVAL 4 /* audit record incomplete */ | ||
| 228 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, | 253 | extern void __audit_inode(struct filename *name, const struct dentry *dentry, |
| 229 | unsigned int flags); | 254 | unsigned int flags); |
| 230 | extern void __audit_file(const struct file *); | 255 | extern void __audit_file(const struct file *); |
| @@ -285,12 +310,15 @@ static inline void audit_getname(struct filename *name) | |||
| 285 | } | 310 | } |
| 286 | static inline void audit_inode(struct filename *name, | 311 | static inline void audit_inode(struct filename *name, |
| 287 | const struct dentry *dentry, | 312 | const struct dentry *dentry, |
| 288 | unsigned int parent) { | 313 | unsigned int flags) { |
| 289 | if (unlikely(!audit_dummy_context())) { | 314 | if (unlikely(!audit_dummy_context())) { |
| 290 | unsigned int flags = 0; | 315 | unsigned int aflags = 0; |
| 291 | if (parent) | 316 | |
| 292 | flags |= AUDIT_INODE_PARENT; | 317 | if (flags & LOOKUP_PARENT) |
| 293 | __audit_inode(name, dentry, flags); | 318 | aflags |= AUDIT_INODE_PARENT; |
| 319 | if (flags & LOOKUP_NO_EVAL) | ||
| 320 | aflags |= AUDIT_INODE_NOEVAL; | ||
| 321 | __audit_inode(name, dentry, aflags); | ||
| 294 | } | 322 | } |
| 295 | } | 323 | } |
| 296 | static inline void audit_file(struct file *file) | 324 | static inline void audit_file(struct file *file) |
| @@ -320,21 +348,6 @@ static inline void audit_ptrace(struct task_struct *t) | |||
| 320 | } | 348 | } |
| 321 | 349 | ||
| 322 | /* Private API (for audit.c only) */ | 350 | /* Private API (for audit.c only) */ |
| 323 | extern unsigned int audit_serial(void); | ||
| 324 | extern int auditsc_get_stamp(struct audit_context *ctx, | ||
| 325 | struct timespec64 *t, unsigned int *serial); | ||
| 326 | extern int audit_set_loginuid(kuid_t loginuid); | ||
| 327 | |||
| 328 | static inline kuid_t audit_get_loginuid(struct task_struct *tsk) | ||
| 329 | { | ||
| 330 | return tsk->loginuid; | ||
| 331 | } | ||
| 332 | |||
| 333 | static inline unsigned int audit_get_sessionid(struct task_struct *tsk) | ||
| 334 | { | ||
| 335 | return tsk->sessionid; | ||
| 336 | } | ||
| 337 | |||
| 338 | extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); | 351 | extern void __audit_ipc_obj(struct kern_ipc_perm *ipcp); |
| 339 | extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); | 352 | extern void __audit_ipc_set_perm(unsigned long qbytes, uid_t uid, gid_t gid, umode_t mode); |
| 340 | extern void __audit_bprm(struct linux_binprm *bprm); | 353 | extern void __audit_bprm(struct linux_binprm *bprm); |
| @@ -514,19 +527,6 @@ static inline void audit_seccomp(unsigned long syscall, long signr, int code) | |||
| 514 | static inline void audit_seccomp_actions_logged(const char *names, | 527 | static inline void audit_seccomp_actions_logged(const char *names, |
| 515 | const char *old_names, int res) | 528 | const char *old_names, int res) |
| 516 | { } | 529 | { } |
| 517 | static inline int auditsc_get_stamp(struct audit_context *ctx, | ||
| 518 | struct timespec64 *t, unsigned int *serial) | ||
| 519 | { | ||
| 520 | return 0; | ||
| 521 | } | ||
| 522 | static inline kuid_t audit_get_loginuid(struct task_struct *tsk) | ||
| 523 | { | ||
| 524 | return INVALID_UID; | ||
| 525 | } | ||
| 526 | static inline unsigned int audit_get_sessionid(struct task_struct *tsk) | ||
| 527 | { | ||
| 528 | return AUDIT_SID_UNSET; | ||
| 529 | } | ||
| 530 | static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) | 530 | static inline void audit_ipc_obj(struct kern_ipc_perm *ipcp) |
| 531 | { } | 531 | { } |
| 532 | static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, | 532 | static inline void audit_ipc_set_perm(unsigned long qbytes, uid_t uid, |
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h index c31157135598..07e02d6df5ad 100644 --- a/include/linux/backing-dev-defs.h +++ b/include/linux/backing-dev-defs.h | |||
| @@ -190,6 +190,7 @@ struct backing_dev_info { | |||
| 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ | 190 | struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */ |
| 191 | struct rb_root cgwb_congested_tree; /* their congested states */ | 191 | struct rb_root cgwb_congested_tree; /* their congested states */ |
| 192 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ | 192 | struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */ |
| 193 | struct rw_semaphore wb_switch_rwsem; /* no cgwb switch while syncing */ | ||
| 193 | #else | 194 | #else |
| 194 | struct bdi_writeback_congested *wb_congested; | 195 | struct bdi_writeback_congested *wb_congested; |
| 195 | #endif | 196 | #endif |
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index c28a47cbe355..f9b029180241 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
| @@ -365,7 +365,7 @@ unlocked_inode_to_wb_begin(struct inode *inode, struct wb_lock_cookie *cookie) | |||
| 365 | rcu_read_lock(); | 365 | rcu_read_lock(); |
| 366 | 366 | ||
| 367 | /* | 367 | /* |
| 368 | * Paired with store_release in inode_switch_wb_work_fn() and | 368 | * Paired with store_release in inode_switch_wbs_work_fn() and |
| 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. | 369 | * ensures that we see the new wb if we see cleared I_WB_SWITCH. |
| 370 | */ | 370 | */ |
| 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; | 371 | cookie->locked = smp_load_acquire(&inode->i_state) & I_WB_SWITCH; |
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h index 53051f3d8f25..f111c780ef1d 100644 --- a/include/linux/balloon_compaction.h +++ b/include/linux/balloon_compaction.h | |||
| @@ -4,15 +4,18 @@ | |||
| 4 | * | 4 | * |
| 5 | * Common interface definitions for making balloon pages movable by compaction. | 5 | * Common interface definitions for making balloon pages movable by compaction. |
| 6 | * | 6 | * |
| 7 | * Despite being perfectly possible to perform ballooned pages migration, they | 7 | * Balloon page migration makes use of the general non-lru movable page |
| 8 | * make a special corner case to compaction scans because balloon pages are not | 8 | * feature. |
| 9 | * enlisted at any LRU list like the other pages we do compact / migrate. | 9 | * |
| 10 | * page->private is used to reference the responsible balloon device. | ||
| 11 | * page->mapping is used in context of non-lru page migration to reference | ||
| 12 | * the address space operations for page isolation/migration/compaction. | ||
| 10 | * | 13 | * |
| 11 | * As the page isolation scanning step a compaction thread does is a lockless | 14 | * As the page isolation scanning step a compaction thread does is a lockless |
| 12 | * procedure (from a page standpoint), it might bring some racy situations while | 15 | * procedure (from a page standpoint), it might bring some racy situations while |
| 13 | * performing balloon page compaction. In order to sort out these racy scenarios | 16 | * performing balloon page compaction. In order to sort out these racy scenarios |
| 14 | * and safely perform balloon's page compaction and migration we must, always, | 17 | * and safely perform balloon's page compaction and migration we must, always, |
| 15 | * ensure following these three simple rules: | 18 | * ensure following these simple rules: |
| 16 | * | 19 | * |
| 17 | * i. when updating a balloon's page ->mapping element, strictly do it under | 20 | * i. when updating a balloon's page ->mapping element, strictly do it under |
| 18 | * the following lock order, independently of the far superior | 21 | * the following lock order, independently of the far superior |
| @@ -21,19 +24,8 @@ | |||
| 21 | * +--spin_lock_irq(&b_dev_info->pages_lock); | 24 | * +--spin_lock_irq(&b_dev_info->pages_lock); |
| 22 | * ... page->mapping updates here ... | 25 | * ... page->mapping updates here ... |
| 23 | * | 26 | * |
| 24 | * ii. before isolating or dequeueing a balloon page from the balloon device | 27 | * ii. isolation or dequeueing procedure must remove the page from balloon |
| 25 | * pages list, the page reference counter must be raised by one and the | 28 | * device page list under b_dev_info->pages_lock. |
| 26 | * extra refcount must be dropped when the page is enqueued back into | ||
| 27 | * the balloon device page list, thus a balloon page keeps its reference | ||
| 28 | * counter raised only while it is under our special handling; | ||
| 29 | * | ||
| 30 | * iii. after the lockless scan step have selected a potential balloon page for | ||
| 31 | * isolation, re-test the PageBalloon mark and the PagePrivate flag | ||
| 32 | * under the proper page lock, to ensure isolating a valid balloon page | ||
| 33 | * (not yet isolated, nor under release procedure) | ||
| 34 | * | ||
| 35 | * iv. isolation or dequeueing procedure must clear PagePrivate flag under | ||
| 36 | * page lock together with removing page from balloon device page list. | ||
| 37 | * | 29 | * |
| 38 | * The functions provided by this interface are placed to help on coping with | 30 | * The functions provided by this interface are placed to help on coping with |
| 39 | * the aforementioned balloon page corner case, as well as to ensure the simple | 31 | * the aforementioned balloon page corner case, as well as to ensure the simple |
| @@ -103,7 +95,7 @@ extern int balloon_page_migrate(struct address_space *mapping, | |||
| 103 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, | 95 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, |
| 104 | struct page *page) | 96 | struct page *page) |
| 105 | { | 97 | { |
| 106 | __SetPageBalloon(page); | 98 | __SetPageOffline(page); |
| 107 | __SetPageMovable(page, balloon->inode->i_mapping); | 99 | __SetPageMovable(page, balloon->inode->i_mapping); |
| 108 | set_page_private(page, (unsigned long)balloon); | 100 | set_page_private(page, (unsigned long)balloon); |
| 109 | list_add(&page->lru, &balloon->pages); | 101 | list_add(&page->lru, &balloon->pages); |
| @@ -119,7 +111,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon, | |||
| 119 | */ | 111 | */ |
| 120 | static inline void balloon_page_delete(struct page *page) | 112 | static inline void balloon_page_delete(struct page *page) |
| 121 | { | 113 | { |
| 122 | __ClearPageBalloon(page); | 114 | __ClearPageOffline(page); |
| 123 | __ClearPageMovable(page); | 115 | __ClearPageMovable(page); |
| 124 | set_page_private(page, 0); | 116 | set_page_private(page, 0); |
| 125 | /* | 117 | /* |
| @@ -149,13 +141,13 @@ static inline gfp_t balloon_mapping_gfp_mask(void) | |||
| 149 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, | 141 | static inline void balloon_page_insert(struct balloon_dev_info *balloon, |
| 150 | struct page *page) | 142 | struct page *page) |
| 151 | { | 143 | { |
| 152 | __SetPageBalloon(page); | 144 | __SetPageOffline(page); |
| 153 | list_add(&page->lru, &balloon->pages); | 145 | list_add(&page->lru, &balloon->pages); |
| 154 | } | 146 | } |
| 155 | 147 | ||
| 156 | static inline void balloon_page_delete(struct page *page) | 148 | static inline void balloon_page_delete(struct page *page) |
| 157 | { | 149 | { |
| 158 | __ClearPageBalloon(page); | 150 | __ClearPageOffline(page); |
| 159 | list_del(&page->lru); | 151 | list_del(&page->lru); |
| 160 | } | 152 | } |
| 161 | 153 | ||
diff --git a/include/linux/bcma/bcma.h b/include/linux/bcma/bcma.h index ef61f3607e99..60b94b944e9f 100644 --- a/include/linux/bcma/bcma.h +++ b/include/linux/bcma/bcma.h | |||
| @@ -332,6 +332,8 @@ extern int bcma_arch_register_fallback_sprom( | |||
| 332 | struct ssb_sprom *out)); | 332 | struct ssb_sprom *out)); |
| 333 | 333 | ||
| 334 | struct bcma_bus { | 334 | struct bcma_bus { |
| 335 | struct device *dev; | ||
| 336 | |||
| 335 | /* The MMIO area. */ | 337 | /* The MMIO area. */ |
| 336 | void __iomem *mmio; | 338 | void __iomem *mmio; |
| 337 | 339 | ||
| @@ -339,14 +341,7 @@ struct bcma_bus { | |||
| 339 | 341 | ||
| 340 | enum bcma_hosttype hosttype; | 342 | enum bcma_hosttype hosttype; |
| 341 | bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ | 343 | bool host_is_pcie2; /* Used for BCMA_HOSTTYPE_PCI only */ |
| 342 | union { | 344 | struct pci_dev *host_pci; /* PCI bus pointer (BCMA_HOSTTYPE_PCI only) */ |
| 343 | /* Pointer to the PCI bus (only for BCMA_HOSTTYPE_PCI) */ | ||
| 344 | struct pci_dev *host_pci; | ||
| 345 | /* Pointer to the SDIO device (only for BCMA_HOSTTYPE_SDIO) */ | ||
| 346 | struct sdio_func *host_sdio; | ||
| 347 | /* Pointer to platform device (only for BCMA_HOSTTYPE_SOC) */ | ||
| 348 | struct platform_device *host_pdev; | ||
| 349 | }; | ||
| 350 | 345 | ||
| 351 | struct bcma_chipinfo chipinfo; | 346 | struct bcma_chipinfo chipinfo; |
| 352 | 347 | ||
diff --git a/include/linux/bcma/bcma_soc.h b/include/linux/bcma/bcma_soc.h index 7cca5f859a90..f3c43519baa7 100644 --- a/include/linux/bcma/bcma_soc.h +++ b/include/linux/bcma/bcma_soc.h | |||
| @@ -6,6 +6,7 @@ | |||
| 6 | 6 | ||
| 7 | struct bcma_soc { | 7 | struct bcma_soc { |
| 8 | struct bcma_bus bus; | 8 | struct bcma_bus bus; |
| 9 | struct device *dev; | ||
| 9 | }; | 10 | }; |
| 10 | 11 | ||
| 11 | int __init bcma_host_soc_register(struct bcma_soc *soc); | 12 | int __init bcma_host_soc_register(struct bcma_soc *soc); |
diff --git a/include/linux/bio.h b/include/linux/bio.h index 7380b094dcca..e584673c1881 100644 --- a/include/linux/bio.h +++ b/include/linux/bio.h | |||
| @@ -34,15 +34,7 @@ | |||
| 34 | #define BIO_BUG_ON | 34 | #define BIO_BUG_ON |
| 35 | #endif | 35 | #endif |
| 36 | 36 | ||
| 37 | #ifdef CONFIG_THP_SWAP | ||
| 38 | #if HPAGE_PMD_NR > 256 | ||
| 39 | #define BIO_MAX_PAGES HPAGE_PMD_NR | ||
| 40 | #else | ||
| 41 | #define BIO_MAX_PAGES 256 | ||
| 42 | #endif | ||
| 43 | #else | ||
| 44 | #define BIO_MAX_PAGES 256 | 37 | #define BIO_MAX_PAGES 256 |
| 45 | #endif | ||
| 46 | 38 | ||
| 47 | #define bio_prio(bio) (bio)->bi_ioprio | 39 | #define bio_prio(bio) (bio)->bi_ioprio |
| 48 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) | 40 | #define bio_set_prio(bio, prio) ((bio)->bi_ioprio = prio) |
| @@ -128,12 +120,23 @@ static inline bool bio_full(struct bio *bio) | |||
| 128 | return bio->bi_vcnt >= bio->bi_max_vecs; | 120 | return bio->bi_vcnt >= bio->bi_max_vecs; |
| 129 | } | 121 | } |
| 130 | 122 | ||
| 123 | static inline bool bio_next_segment(const struct bio *bio, | ||
| 124 | struct bvec_iter_all *iter) | ||
| 125 | { | ||
| 126 | if (iter->idx >= bio->bi_vcnt) | ||
| 127 | return false; | ||
| 128 | |||
| 129 | bvec_advance(&bio->bi_io_vec[iter->idx], iter); | ||
| 130 | return true; | ||
| 131 | } | ||
| 132 | |||
| 131 | /* | 133 | /* |
| 132 | * drivers should _never_ use the all version - the bio may have been split | 134 | * drivers should _never_ use the all version - the bio may have been split |
| 133 | * before it got to the driver and the driver won't own all of it | 135 | * before it got to the driver and the driver won't own all of it |
| 134 | */ | 136 | */ |
| 135 | #define bio_for_each_segment_all(bvl, bio, i) \ | 137 | #define bio_for_each_segment_all(bvl, bio, i, iter) \ |
| 136 | for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++) | 138 | for (i = 0, bvl = bvec_init_iter_all(&iter); \ |
| 139 | bio_next_segment((bio), &iter); i++) | ||
| 137 | 140 | ||
| 138 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, | 141 | static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, |
| 139 | unsigned bytes) | 142 | unsigned bytes) |
| @@ -156,6 +159,16 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter, | |||
| 156 | #define bio_for_each_segment(bvl, bio, iter) \ | 159 | #define bio_for_each_segment(bvl, bio, iter) \ |
| 157 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) | 160 | __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter) |
| 158 | 161 | ||
| 162 | #define __bio_for_each_bvec(bvl, bio, iter, start) \ | ||
| 163 | for (iter = (start); \ | ||
| 164 | (iter).bi_size && \ | ||
| 165 | ((bvl = mp_bvec_iter_bvec((bio)->bi_io_vec, (iter))), 1); \ | ||
| 166 | bio_advance_iter((bio), &(iter), (bvl).bv_len)) | ||
| 167 | |||
| 168 | /* iterate over multi-page bvec */ | ||
| 169 | #define bio_for_each_bvec(bvl, bio, iter) \ | ||
| 170 | __bio_for_each_bvec(bvl, bio, iter, (bio)->bi_iter) | ||
| 171 | |||
| 159 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) | 172 | #define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len) |
| 160 | 173 | ||
| 161 | static inline unsigned bio_segments(struct bio *bio) | 174 | static inline unsigned bio_segments(struct bio *bio) |
| @@ -263,12 +276,6 @@ static inline void bio_get_last_bvec(struct bio *bio, struct bio_vec *bv) | |||
| 263 | bv->bv_len = iter.bi_bvec_done; | 276 | bv->bv_len = iter.bi_bvec_done; |
| 264 | } | 277 | } |
| 265 | 278 | ||
| 266 | static inline unsigned bio_pages_all(struct bio *bio) | ||
| 267 | { | ||
| 268 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | ||
| 269 | return bio->bi_vcnt; | ||
| 270 | } | ||
| 271 | |||
| 272 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) | 279 | static inline struct bio_vec *bio_first_bvec_all(struct bio *bio) |
| 273 | { | 280 | { |
| 274 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); | 281 | WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)); |
| @@ -430,7 +437,7 @@ extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); | |||
| 430 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, | 437 | extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, |
| 431 | unsigned int, unsigned int); | 438 | unsigned int, unsigned int); |
| 432 | bool __bio_try_merge_page(struct bio *bio, struct page *page, | 439 | bool __bio_try_merge_page(struct bio *bio, struct page *page, |
| 433 | unsigned int len, unsigned int off); | 440 | unsigned int len, unsigned int off, bool same_page); |
| 434 | void __bio_add_page(struct bio *bio, struct page *page, | 441 | void __bio_add_page(struct bio *bio, struct page *page, |
| 435 | unsigned int len, unsigned int off); | 442 | unsigned int len, unsigned int off); |
| 436 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); | 443 | int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter); |
| @@ -823,5 +830,19 @@ static inline int bio_integrity_add_page(struct bio *bio, struct page *page, | |||
| 823 | 830 | ||
| 824 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | 831 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ |
| 825 | 832 | ||
| 833 | /* | ||
| 834 | * Mark a bio as polled. Note that for async polled IO, the caller must | ||
| 835 | * expect -EWOULDBLOCK if we cannot allocate a request (or other resources). | ||
| 836 | * We cannot block waiting for requests on polled IO, as those completions | ||
| 837 | * must be found by the caller. This is different than IRQ driven IO, where | ||
| 838 | * it's safe to wait for IO to complete. | ||
| 839 | */ | ||
| 840 | static inline void bio_set_polled(struct bio *bio, struct kiocb *kiocb) | ||
| 841 | { | ||
| 842 | bio->bi_opf |= REQ_HIPRI; | ||
| 843 | if (!is_sync_kiocb(kiocb)) | ||
| 844 | bio->bi_opf |= REQ_NOWAIT; | ||
| 845 | } | ||
| 846 | |||
| 826 | #endif /* CONFIG_BLOCK */ | 847 | #endif /* CONFIG_BLOCK */ |
| 827 | #endif /* __LINUX_BIO_H */ | 848 | #endif /* __LINUX_BIO_H */ |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 705f7c442691..602af23b98c7 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
| @@ -246,7 +246,7 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, | |||
| 246 | new__ = (old__ & ~mask__) | bits__; \ | 246 | new__ = (old__ & ~mask__) | bits__; \ |
| 247 | } while (cmpxchg(ptr, old__, new__) != old__); \ | 247 | } while (cmpxchg(ptr, old__, new__) != old__); \ |
| 248 | \ | 248 | \ |
| 249 | new__; \ | 249 | old__; \ |
| 250 | }) | 250 | }) |
| 251 | #endif | 251 | #endif |
| 252 | 252 | ||
diff --git a/include/linux/bitrev.h b/include/linux/bitrev.h index 50fb0dee23e8..d35b8ec1c485 100644 --- a/include/linux/bitrev.h +++ b/include/linux/bitrev.h | |||
| @@ -34,41 +34,41 @@ static inline u32 __bitrev32(u32 x) | |||
| 34 | 34 | ||
| 35 | #define __constant_bitrev32(x) \ | 35 | #define __constant_bitrev32(x) \ |
| 36 | ({ \ | 36 | ({ \ |
| 37 | u32 __x = x; \ | 37 | u32 ___x = x; \ |
| 38 | __x = (__x >> 16) | (__x << 16); \ | 38 | ___x = (___x >> 16) | (___x << 16); \ |
| 39 | __x = ((__x & (u32)0xFF00FF00UL) >> 8) | ((__x & (u32)0x00FF00FFUL) << 8); \ | 39 | ___x = ((___x & (u32)0xFF00FF00UL) >> 8) | ((___x & (u32)0x00FF00FFUL) << 8); \ |
| 40 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 40 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 41 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 41 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 42 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 42 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 43 | __x; \ | 43 | ___x; \ |
| 44 | }) | 44 | }) |
| 45 | 45 | ||
| 46 | #define __constant_bitrev16(x) \ | 46 | #define __constant_bitrev16(x) \ |
| 47 | ({ \ | 47 | ({ \ |
| 48 | u16 __x = x; \ | 48 | u16 ___x = x; \ |
| 49 | __x = (__x >> 8) | (__x << 8); \ | 49 | ___x = (___x >> 8) | (___x << 8); \ |
| 50 | __x = ((__x & (u16)0xF0F0U) >> 4) | ((__x & (u16)0x0F0FU) << 4); \ | 50 | ___x = ((___x & (u16)0xF0F0U) >> 4) | ((___x & (u16)0x0F0FU) << 4); \ |
| 51 | __x = ((__x & (u16)0xCCCCU) >> 2) | ((__x & (u16)0x3333U) << 2); \ | 51 | ___x = ((___x & (u16)0xCCCCU) >> 2) | ((___x & (u16)0x3333U) << 2); \ |
| 52 | __x = ((__x & (u16)0xAAAAU) >> 1) | ((__x & (u16)0x5555U) << 1); \ | 52 | ___x = ((___x & (u16)0xAAAAU) >> 1) | ((___x & (u16)0x5555U) << 1); \ |
| 53 | __x; \ | 53 | ___x; \ |
| 54 | }) | 54 | }) |
| 55 | 55 | ||
| 56 | #define __constant_bitrev8x4(x) \ | 56 | #define __constant_bitrev8x4(x) \ |
| 57 | ({ \ | 57 | ({ \ |
| 58 | u32 __x = x; \ | 58 | u32 ___x = x; \ |
| 59 | __x = ((__x & (u32)0xF0F0F0F0UL) >> 4) | ((__x & (u32)0x0F0F0F0FUL) << 4); \ | 59 | ___x = ((___x & (u32)0xF0F0F0F0UL) >> 4) | ((___x & (u32)0x0F0F0F0FUL) << 4); \ |
| 60 | __x = ((__x & (u32)0xCCCCCCCCUL) >> 2) | ((__x & (u32)0x33333333UL) << 2); \ | 60 | ___x = ((___x & (u32)0xCCCCCCCCUL) >> 2) | ((___x & (u32)0x33333333UL) << 2); \ |
| 61 | __x = ((__x & (u32)0xAAAAAAAAUL) >> 1) | ((__x & (u32)0x55555555UL) << 1); \ | 61 | ___x = ((___x & (u32)0xAAAAAAAAUL) >> 1) | ((___x & (u32)0x55555555UL) << 1); \ |
| 62 | __x; \ | 62 | ___x; \ |
| 63 | }) | 63 | }) |
| 64 | 64 | ||
| 65 | #define __constant_bitrev8(x) \ | 65 | #define __constant_bitrev8(x) \ |
| 66 | ({ \ | 66 | ({ \ |
| 67 | u8 __x = x; \ | 67 | u8 ___x = x; \ |
| 68 | __x = (__x >> 4) | (__x << 4); \ | 68 | ___x = (___x >> 4) | (___x << 4); \ |
| 69 | __x = ((__x & (u8)0xCCU) >> 2) | ((__x & (u8)0x33U) << 2); \ | 69 | ___x = ((___x & (u8)0xCCU) >> 2) | ((___x & (u8)0x33U) << 2); \ |
| 70 | __x = ((__x & (u8)0xAAU) >> 1) | ((__x & (u8)0x55U) << 1); \ | 70 | ___x = ((___x & (u8)0xAAU) >> 1) | ((___x & (u8)0x55U) << 1); \ |
| 71 | __x; \ | 71 | ___x; \ |
| 72 | }) | 72 | }) |
| 73 | 73 | ||
| 74 | #define bitrev32(x) \ | 74 | #define bitrev32(x) \ |
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 0e030f5f76b6..db29928de467 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h | |||
| @@ -57,7 +57,6 @@ struct blk_mq_hw_ctx { | |||
| 57 | unsigned int queue_num; | 57 | unsigned int queue_num; |
| 58 | 58 | ||
| 59 | atomic_t nr_active; | 59 | atomic_t nr_active; |
| 60 | unsigned int nr_expired; | ||
| 61 | 60 | ||
| 62 | struct hlist_node cpuhp_dead; | 61 | struct hlist_node cpuhp_dead; |
| 63 | struct kobject kobj; | 62 | struct kobject kobj; |
| @@ -218,7 +217,6 @@ struct blk_mq_ops { | |||
| 218 | enum { | 217 | enum { |
| 219 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, | 218 | BLK_MQ_F_SHOULD_MERGE = 1 << 0, |
| 220 | BLK_MQ_F_TAG_SHARED = 1 << 1, | 219 | BLK_MQ_F_TAG_SHARED = 1 << 1, |
| 221 | BLK_MQ_F_SG_MERGE = 1 << 2, | ||
| 222 | BLK_MQ_F_BLOCKING = 1 << 5, | 220 | BLK_MQ_F_BLOCKING = 1 << 5, |
| 223 | BLK_MQ_F_NO_SCHED = 1 << 6, | 221 | BLK_MQ_F_NO_SCHED = 1 << 6, |
| 224 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, | 222 | BLK_MQ_F_ALLOC_POLICY_START_BIT = 8, |
| @@ -301,11 +299,10 @@ void blk_mq_end_request(struct request *rq, blk_status_t error); | |||
| 301 | void __blk_mq_end_request(struct request *rq, blk_status_t error); | 299 | void __blk_mq_end_request(struct request *rq, blk_status_t error); |
| 302 | 300 | ||
| 303 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); | 301 | void blk_mq_requeue_request(struct request *rq, bool kick_requeue_list); |
| 304 | void blk_mq_add_to_requeue_list(struct request *rq, bool at_head, | ||
| 305 | bool kick_requeue_list); | ||
| 306 | void blk_mq_kick_requeue_list(struct request_queue *q); | 302 | void blk_mq_kick_requeue_list(struct request_queue *q); |
| 307 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); | 303 | void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs); |
| 308 | bool blk_mq_complete_request(struct request *rq); | 304 | bool blk_mq_complete_request(struct request *rq); |
| 305 | void blk_mq_complete_request_sync(struct request *rq); | ||
| 309 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, | 306 | bool blk_mq_bio_list_merge(struct request_queue *q, struct list_head *list, |
| 310 | struct bio *bio); | 307 | struct bio *bio); |
| 311 | bool blk_mq_queue_stopped(struct request_queue *q); | 308 | bool blk_mq_queue_stopped(struct request_queue *q); |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 5c7e7f859a24..791fee35df88 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -215,6 +215,7 @@ struct bio { | |||
| 215 | /* | 215 | /* |
| 216 | * bio flags | 216 | * bio flags |
| 217 | */ | 217 | */ |
| 218 | #define BIO_NO_PAGE_REF 0 /* don't put release vec pages */ | ||
| 218 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ | 219 | #define BIO_SEG_VALID 1 /* bi_phys_segments valid */ |
| 219 | #define BIO_CLONED 2 /* doesn't own data */ | 220 | #define BIO_CLONED 2 /* doesn't own data */ |
| 220 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ | 221 | #define BIO_BOUNCED 3 /* bio is a bounce bio */ |
| @@ -287,7 +288,7 @@ enum req_opf { | |||
| 287 | REQ_OP_DISCARD = 3, | 288 | REQ_OP_DISCARD = 3, |
| 288 | /* securely erase sectors */ | 289 | /* securely erase sectors */ |
| 289 | REQ_OP_SECURE_ERASE = 5, | 290 | REQ_OP_SECURE_ERASE = 5, |
| 290 | /* seset a zone write pointer */ | 291 | /* reset a zone write pointer */ |
| 291 | REQ_OP_ZONE_RESET = 6, | 292 | REQ_OP_ZONE_RESET = 6, |
| 292 | /* write the same sector many times */ | 293 | /* write the same sector many times */ |
| 293 | REQ_OP_WRITE_SAME = 7, | 294 | REQ_OP_WRITE_SAME = 7, |
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 338604dff7d0..317ab30d2904 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -50,6 +50,9 @@ struct blk_stat_callback; | |||
| 50 | /* Must be consistent with blk_mq_poll_stats_bkt() */ | 50 | /* Must be consistent with blk_mq_poll_stats_bkt() */ |
| 51 | #define BLK_MQ_POLL_STATS_BKTS 16 | 51 | #define BLK_MQ_POLL_STATS_BKTS 16 |
| 52 | 52 | ||
| 53 | /* Doing classic polling */ | ||
| 54 | #define BLK_MQ_POLL_CLASSIC -1 | ||
| 55 | |||
| 53 | /* | 56 | /* |
| 54 | * Maximum number of blkcg policies allowed to be registered concurrently. | 57 | * Maximum number of blkcg policies allowed to be registered concurrently. |
| 55 | * Defined here to simplify include dependency. | 58 | * Defined here to simplify include dependency. |
| @@ -216,8 +219,6 @@ struct request { | |||
| 216 | unsigned short write_hint; | 219 | unsigned short write_hint; |
| 217 | unsigned short ioprio; | 220 | unsigned short ioprio; |
| 218 | 221 | ||
| 219 | void *special; /* opaque pointer available for LLD use */ | ||
| 220 | |||
| 221 | unsigned int extra_len; /* length of alignment and padding */ | 222 | unsigned int extra_len; /* length of alignment and padding */ |
| 222 | 223 | ||
| 223 | enum mq_rq_state state; | 224 | enum mq_rq_state state; |
| @@ -236,9 +237,6 @@ struct request { | |||
| 236 | */ | 237 | */ |
| 237 | rq_end_io_fn *end_io; | 238 | rq_end_io_fn *end_io; |
| 238 | void *end_io_data; | 239 | void *end_io_data; |
| 239 | |||
| 240 | /* for bidi */ | ||
| 241 | struct request *next_rq; | ||
| 242 | }; | 240 | }; |
| 243 | 241 | ||
| 244 | static inline bool blk_op_is_scsi(unsigned int op) | 242 | static inline bool blk_op_is_scsi(unsigned int op) |
| @@ -550,7 +548,6 @@ struct request_queue { | |||
| 550 | struct rcu_head rcu_head; | 548 | struct rcu_head rcu_head; |
| 551 | wait_queue_head_t mq_freeze_wq; | 549 | wait_queue_head_t mq_freeze_wq; |
| 552 | struct percpu_ref q_usage_counter; | 550 | struct percpu_ref q_usage_counter; |
| 553 | struct list_head all_q_node; | ||
| 554 | 551 | ||
| 555 | struct blk_mq_tag_set *tag_set; | 552 | struct blk_mq_tag_set *tag_set; |
| 556 | struct list_head tag_set_list; | 553 | struct list_head tag_set_list; |
| @@ -572,38 +569,31 @@ struct request_queue { | |||
| 572 | u64 write_hints[BLK_MAX_WRITE_HINTS]; | 569 | u64 write_hints[BLK_MAX_WRITE_HINTS]; |
| 573 | }; | 570 | }; |
| 574 | 571 | ||
| 575 | #define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ | 572 | #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */ |
| 576 | #define QUEUE_FLAG_DYING 2 /* queue being torn down */ | 573 | #define QUEUE_FLAG_DYING 1 /* queue being torn down */ |
| 577 | #define QUEUE_FLAG_BIDI 4 /* queue supports bidi requests */ | 574 | #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */ |
| 578 | #define QUEUE_FLAG_NOMERGES 5 /* disable merge attempts */ | 575 | #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */ |
| 579 | #define QUEUE_FLAG_SAME_COMP 6 /* complete on same CPU-group */ | 576 | #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */ |
| 580 | #define QUEUE_FLAG_FAIL_IO 7 /* fake timeout */ | 577 | #define QUEUE_FLAG_NONROT 6 /* non-rotational device (SSD) */ |
| 581 | #define QUEUE_FLAG_NONROT 9 /* non-rotational device (SSD) */ | 578 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 582 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 579 | #define QUEUE_FLAG_IO_STAT 7 /* do disk/partitions IO accounting */ |
| 583 | #define QUEUE_FLAG_IO_STAT 10 /* do disk/partitions IO accounting */ | 580 | #define QUEUE_FLAG_DISCARD 8 /* supports DISCARD */ |
| 584 | #define QUEUE_FLAG_DISCARD 11 /* supports DISCARD */ | 581 | #define QUEUE_FLAG_NOXMERGES 9 /* No extended merges */ |
| 585 | #define QUEUE_FLAG_NOXMERGES 12 /* No extended merges */ | 582 | #define QUEUE_FLAG_ADD_RANDOM 10 /* Contributes to random pool */ |
| 586 | #define QUEUE_FLAG_ADD_RANDOM 13 /* Contributes to random pool */ | 583 | #define QUEUE_FLAG_SECERASE 11 /* supports secure erase */ |
| 587 | #define QUEUE_FLAG_SECERASE 14 /* supports secure erase */ | 584 | #define QUEUE_FLAG_SAME_FORCE 12 /* force complete on same CPU */ |
| 588 | #define QUEUE_FLAG_SAME_FORCE 15 /* force complete on same CPU */ | 585 | #define QUEUE_FLAG_DEAD 13 /* queue tear-down finished */ |
| 589 | #define QUEUE_FLAG_DEAD 16 /* queue tear-down finished */ | 586 | #define QUEUE_FLAG_INIT_DONE 14 /* queue is initialized */ |
| 590 | #define QUEUE_FLAG_INIT_DONE 17 /* queue is initialized */ | 587 | #define QUEUE_FLAG_POLL 16 /* IO polling enabled if set */ |
| 591 | #define QUEUE_FLAG_NO_SG_MERGE 18 /* don't attempt to merge SG segments*/ | 588 | #define QUEUE_FLAG_WC 17 /* Write back caching */ |
| 592 | #define QUEUE_FLAG_POLL 19 /* IO polling enabled if set */ | 589 | #define QUEUE_FLAG_FUA 18 /* device supports FUA writes */ |
| 593 | #define QUEUE_FLAG_WC 20 /* Write back caching */ | 590 | #define QUEUE_FLAG_DAX 19 /* device supports DAX */ |
| 594 | #define QUEUE_FLAG_FUA 21 /* device supports FUA writes */ | 591 | #define QUEUE_FLAG_STATS 20 /* track IO start and completion times */ |
| 595 | #define QUEUE_FLAG_FLUSH_NQ 22 /* flush not queueuable */ | 592 | #define QUEUE_FLAG_POLL_STATS 21 /* collecting stats for hybrid polling */ |
| 596 | #define QUEUE_FLAG_DAX 23 /* device supports DAX */ | 593 | #define QUEUE_FLAG_REGISTERED 22 /* queue has been registered to a disk */ |
| 597 | #define QUEUE_FLAG_STATS 24 /* track IO start and completion times */ | 594 | #define QUEUE_FLAG_SCSI_PASSTHROUGH 23 /* queue supports SCSI commands */ |
| 598 | #define QUEUE_FLAG_POLL_STATS 25 /* collecting stats for hybrid polling */ | 595 | #define QUEUE_FLAG_QUIESCED 24 /* queue has been quiesced */ |
| 599 | #define QUEUE_FLAG_REGISTERED 26 /* queue has been registered to a disk */ | 596 | #define QUEUE_FLAG_PCI_P2PDMA 25 /* device supports PCI p2p requests */ |
| 600 | #define QUEUE_FLAG_SCSI_PASSTHROUGH 27 /* queue supports SCSI commands */ | ||
| 601 | #define QUEUE_FLAG_QUIESCED 28 /* queue has been quiesced */ | ||
| 602 | #define QUEUE_FLAG_PCI_P2PDMA 29 /* device supports PCI p2p requests */ | ||
| 603 | |||
| 604 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | ||
| 605 | (1 << QUEUE_FLAG_SAME_COMP) | \ | ||
| 606 | (1 << QUEUE_FLAG_ADD_RANDOM)) | ||
| 607 | 597 | ||
| 608 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 598 | #define QUEUE_FLAG_MQ_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 609 | (1 << QUEUE_FLAG_SAME_COMP)) | 599 | (1 << QUEUE_FLAG_SAME_COMP)) |
| @@ -646,8 +636,6 @@ static inline bool blk_account_rq(struct request *rq) | |||
| 646 | return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); | 636 | return (rq->rq_flags & RQF_STARTED) && !blk_rq_is_passthrough(rq); |
| 647 | } | 637 | } |
| 648 | 638 | ||
| 649 | #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) | ||
| 650 | |||
| 651 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) | 639 | #define list_entry_rq(ptr) list_entry((ptr), struct request, queuelist) |
| 652 | 640 | ||
| 653 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) | 641 | #define rq_data_dir(rq) (op_is_write(req_op(rq)) ? WRITE : READ) |
| @@ -797,6 +785,10 @@ struct req_iterator { | |||
| 797 | __rq_for_each_bio(_iter.bio, _rq) \ | 785 | __rq_for_each_bio(_iter.bio, _rq) \ |
| 798 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) | 786 | bio_for_each_segment(bvl, _iter.bio, _iter.iter) |
| 799 | 787 | ||
| 788 | #define rq_for_each_bvec(bvl, _rq, _iter) \ | ||
| 789 | __rq_for_each_bio(_iter.bio, _rq) \ | ||
| 790 | bio_for_each_bvec(bvl, _iter.bio, _iter.iter) | ||
| 791 | |||
| 800 | #define rq_iter_last(bvec, _iter) \ | 792 | #define rq_iter_last(bvec, _iter) \ |
| 801 | (_iter.bio->bi_next == NULL && \ | 793 | (_iter.bio->bi_next == NULL && \ |
| 802 | bio_iter_last(bvec, _iter.iter)) | 794 | bio_iter_last(bvec, _iter.iter)) |
| @@ -1069,7 +1061,6 @@ extern void blk_queue_virt_boundary(struct request_queue *, unsigned long); | |||
| 1069 | extern void blk_queue_dma_alignment(struct request_queue *, int); | 1061 | extern void blk_queue_dma_alignment(struct request_queue *, int); |
| 1070 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); | 1062 | extern void blk_queue_update_dma_alignment(struct request_queue *, int); |
| 1071 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 1063 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 1072 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | ||
| 1073 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); | 1064 | extern void blk_queue_write_cache(struct request_queue *q, bool enabled, bool fua); |
| 1074 | 1065 | ||
| 1075 | /* | 1066 | /* |
| @@ -1446,11 +1437,6 @@ static inline unsigned int block_size(struct block_device *bdev) | |||
| 1446 | return bdev->bd_block_size; | 1437 | return bdev->bd_block_size; |
| 1447 | } | 1438 | } |
| 1448 | 1439 | ||
| 1449 | static inline bool queue_flush_queueable(struct request_queue *q) | ||
| 1450 | { | ||
| 1451 | return !test_bit(QUEUE_FLAG_FLUSH_NQ, &q->queue_flags); | ||
| 1452 | } | ||
| 1453 | |||
| 1454 | typedef struct {struct page *v;} Sector; | 1440 | typedef struct {struct page *v;} Sector; |
| 1455 | 1441 | ||
| 1456 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 1442 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index 8804753805ac..7bb2d8de9f30 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -116,7 +116,13 @@ extern void blk_fill_rwbs(char *rwbs, unsigned int op, int bytes); | |||
| 116 | 116 | ||
| 117 | static inline sector_t blk_rq_trace_sector(struct request *rq) | 117 | static inline sector_t blk_rq_trace_sector(struct request *rq) |
| 118 | { | 118 | { |
| 119 | return blk_rq_is_passthrough(rq) ? 0 : blk_rq_pos(rq); | 119 | /* |
| 120 | * Tracing should ignore starting sector for passthrough requests and | ||
| 121 | * requests where starting sector didn't get set. | ||
| 122 | */ | ||
| 123 | if (blk_rq_is_passthrough(rq) || blk_rq_pos(rq) == (sector_t)-1) | ||
| 124 | return 0; | ||
| 125 | return blk_rq_pos(rq); | ||
| 120 | } | 126 | } |
| 121 | 127 | ||
| 122 | static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) | 128 | static inline unsigned int blk_rq_trace_nr_sectors(struct request *rq) |
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h index 588dd5f0bd85..a4c644c1c091 100644 --- a/include/linux/bpf-cgroup.h +++ b/include/linux/bpf-cgroup.h | |||
| @@ -78,7 +78,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp); | |||
| 78 | int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, | 78 | int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 79 | enum bpf_attach_type type, u32 flags); | 79 | enum bpf_attach_type type, u32 flags); |
| 80 | int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, | 80 | int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog, |
| 81 | enum bpf_attach_type type, u32 flags); | 81 | enum bpf_attach_type type); |
| 82 | int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, | 82 | int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr, |
| 83 | union bpf_attr __user *uattr); | 83 | union bpf_attr __user *uattr); |
| 84 | 84 | ||
| @@ -292,7 +292,7 @@ static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog, | |||
| 292 | static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, | 292 | static inline void bpf_cgroup_storage_release(struct bpf_prog *prog, |
| 293 | struct bpf_map *map) {} | 293 | struct bpf_map *map) {} |
| 294 | static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( | 294 | static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc( |
| 295 | struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return 0; } | 295 | struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; } |
| 296 | static inline void bpf_cgroup_storage_free( | 296 | static inline void bpf_cgroup_storage_free( |
| 297 | struct bpf_cgroup_storage *storage) {} | 297 | struct bpf_cgroup_storage *storage) {} |
| 298 | static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, | 298 | static inline int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, |
diff --git a/include/linux/bpf.h b/include/linux/bpf.h index e734f163bd0b..944ccc310201 100644 --- a/include/linux/bpf.h +++ b/include/linux/bpf.h | |||
| @@ -16,6 +16,7 @@ | |||
| 16 | #include <linux/rbtree_latch.h> | 16 | #include <linux/rbtree_latch.h> |
| 17 | #include <linux/numa.h> | 17 | #include <linux/numa.h> |
| 18 | #include <linux/wait.h> | 18 | #include <linux/wait.h> |
| 19 | #include <linux/u64_stats_sync.h> | ||
| 19 | 20 | ||
| 20 | struct bpf_verifier_env; | 21 | struct bpf_verifier_env; |
| 21 | struct perf_event; | 22 | struct perf_event; |
| @@ -72,14 +73,15 @@ struct bpf_map { | |||
| 72 | u32 value_size; | 73 | u32 value_size; |
| 73 | u32 max_entries; | 74 | u32 max_entries; |
| 74 | u32 map_flags; | 75 | u32 map_flags; |
| 75 | u32 pages; | 76 | int spin_lock_off; /* >=0 valid offset, <0 error */ |
| 76 | u32 id; | 77 | u32 id; |
| 77 | int numa_node; | 78 | int numa_node; |
| 78 | u32 btf_key_type_id; | 79 | u32 btf_key_type_id; |
| 79 | u32 btf_value_type_id; | 80 | u32 btf_value_type_id; |
| 80 | struct btf *btf; | 81 | struct btf *btf; |
| 82 | u32 pages; | ||
| 81 | bool unpriv_array; | 83 | bool unpriv_array; |
| 82 | /* 55 bytes hole */ | 84 | /* 51 bytes hole */ |
| 83 | 85 | ||
| 84 | /* The 3rd and 4th cacheline with misc members to avoid false sharing | 86 | /* The 3rd and 4th cacheline with misc members to avoid false sharing |
| 85 | * particularly with refcounting. | 87 | * particularly with refcounting. |
| @@ -91,6 +93,36 @@ struct bpf_map { | |||
| 91 | char name[BPF_OBJ_NAME_LEN]; | 93 | char name[BPF_OBJ_NAME_LEN]; |
| 92 | }; | 94 | }; |
| 93 | 95 | ||
| 96 | static inline bool map_value_has_spin_lock(const struct bpf_map *map) | ||
| 97 | { | ||
| 98 | return map->spin_lock_off >= 0; | ||
| 99 | } | ||
| 100 | |||
| 101 | static inline void check_and_init_map_lock(struct bpf_map *map, void *dst) | ||
| 102 | { | ||
| 103 | if (likely(!map_value_has_spin_lock(map))) | ||
| 104 | return; | ||
| 105 | *(struct bpf_spin_lock *)(dst + map->spin_lock_off) = | ||
| 106 | (struct bpf_spin_lock){}; | ||
| 107 | } | ||
| 108 | |||
| 109 | /* copy everything but bpf_spin_lock */ | ||
| 110 | static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) | ||
| 111 | { | ||
| 112 | if (unlikely(map_value_has_spin_lock(map))) { | ||
| 113 | u32 off = map->spin_lock_off; | ||
| 114 | |||
| 115 | memcpy(dst, src, off); | ||
| 116 | memcpy(dst + off + sizeof(struct bpf_spin_lock), | ||
| 117 | src + off + sizeof(struct bpf_spin_lock), | ||
| 118 | map->value_size - off - sizeof(struct bpf_spin_lock)); | ||
| 119 | } else { | ||
| 120 | memcpy(dst, src, map->value_size); | ||
| 121 | } | ||
| 122 | } | ||
| 123 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, | ||
| 124 | bool lock_src); | ||
| 125 | |||
| 94 | struct bpf_offload_dev; | 126 | struct bpf_offload_dev; |
| 95 | struct bpf_offloaded_map; | 127 | struct bpf_offloaded_map; |
| 96 | 128 | ||
| @@ -161,7 +193,8 @@ enum bpf_arg_type { | |||
| 161 | 193 | ||
| 162 | ARG_PTR_TO_CTX, /* pointer to context */ | 194 | ARG_PTR_TO_CTX, /* pointer to context */ |
| 163 | ARG_ANYTHING, /* any (initialized) argument is ok */ | 195 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
| 164 | ARG_PTR_TO_SOCKET, /* pointer to bpf_sock */ | 196 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
| 197 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ | ||
| 165 | }; | 198 | }; |
| 166 | 199 | ||
| 167 | /* type of values returned from helper functions */ | 200 | /* type of values returned from helper functions */ |
| @@ -171,6 +204,7 @@ enum bpf_return_type { | |||
| 171 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ | 204 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ |
| 172 | RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ | 205 | RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */ |
| 173 | RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ | 206 | RET_PTR_TO_SOCKET_OR_NULL, /* returns a pointer to a socket or NULL */ |
| 207 | RET_PTR_TO_TCP_SOCK_OR_NULL, /* returns a pointer to a tcp_sock or NULL */ | ||
| 174 | }; | 208 | }; |
| 175 | 209 | ||
| 176 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs | 210 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs |
| @@ -224,6 +258,10 @@ enum bpf_reg_type { | |||
| 224 | PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ | 258 | PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ |
| 225 | PTR_TO_SOCKET, /* reg points to struct bpf_sock */ | 259 | PTR_TO_SOCKET, /* reg points to struct bpf_sock */ |
| 226 | PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ | 260 | PTR_TO_SOCKET_OR_NULL, /* reg points to struct bpf_sock or NULL */ |
| 261 | PTR_TO_SOCK_COMMON, /* reg points to sock_common */ | ||
| 262 | PTR_TO_SOCK_COMMON_OR_NULL, /* reg points to sock_common or NULL */ | ||
| 263 | PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ | ||
| 264 | PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */ | ||
| 227 | }; | 265 | }; |
| 228 | 266 | ||
| 229 | /* The information passed from prog-specific *_is_valid_access | 267 | /* The information passed from prog-specific *_is_valid_access |
| @@ -268,9 +306,15 @@ struct bpf_verifier_ops { | |||
| 268 | }; | 306 | }; |
| 269 | 307 | ||
| 270 | struct bpf_prog_offload_ops { | 308 | struct bpf_prog_offload_ops { |
| 309 | /* verifier basic callbacks */ | ||
| 271 | int (*insn_hook)(struct bpf_verifier_env *env, | 310 | int (*insn_hook)(struct bpf_verifier_env *env, |
| 272 | int insn_idx, int prev_insn_idx); | 311 | int insn_idx, int prev_insn_idx); |
| 273 | int (*finalize)(struct bpf_verifier_env *env); | 312 | int (*finalize)(struct bpf_verifier_env *env); |
| 313 | /* verifier optimization callbacks (called after .finalize) */ | ||
| 314 | int (*replace_insn)(struct bpf_verifier_env *env, u32 off, | ||
| 315 | struct bpf_insn *insn); | ||
| 316 | int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); | ||
| 317 | /* program management callbacks */ | ||
| 274 | int (*prepare)(struct bpf_prog *prog); | 318 | int (*prepare)(struct bpf_prog *prog); |
| 275 | int (*translate)(struct bpf_prog *prog); | 319 | int (*translate)(struct bpf_prog *prog); |
| 276 | void (*destroy)(struct bpf_prog *prog); | 320 | void (*destroy)(struct bpf_prog *prog); |
| @@ -283,6 +327,7 @@ struct bpf_prog_offload { | |||
| 283 | void *dev_priv; | 327 | void *dev_priv; |
| 284 | struct list_head offloads; | 328 | struct list_head offloads; |
| 285 | bool dev_state; | 329 | bool dev_state; |
| 330 | bool opt_failed; | ||
| 286 | void *jited_image; | 331 | void *jited_image; |
| 287 | u32 jited_len; | 332 | u32 jited_len; |
| 288 | }; | 333 | }; |
| @@ -295,6 +340,12 @@ enum bpf_cgroup_storage_type { | |||
| 295 | 340 | ||
| 296 | #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX | 341 | #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX |
| 297 | 342 | ||
| 343 | struct bpf_prog_stats { | ||
| 344 | u64 cnt; | ||
| 345 | u64 nsecs; | ||
| 346 | struct u64_stats_sync syncp; | ||
| 347 | }; | ||
| 348 | |||
| 298 | struct bpf_prog_aux { | 349 | struct bpf_prog_aux { |
| 299 | atomic_t refcnt; | 350 | atomic_t refcnt; |
| 300 | u32 used_map_cnt; | 351 | u32 used_map_cnt; |
| @@ -344,6 +395,7 @@ struct bpf_prog_aux { | |||
| 344 | * main prog always has linfo_idx == 0 | 395 | * main prog always has linfo_idx == 0 |
| 345 | */ | 396 | */ |
| 346 | u32 linfo_idx; | 397 | u32 linfo_idx; |
| 398 | struct bpf_prog_stats __percpu *stats; | ||
| 347 | union { | 399 | union { |
| 348 | struct work_struct work; | 400 | struct work_struct work; |
| 349 | struct rcu_head rcu; | 401 | struct rcu_head rcu; |
| @@ -397,6 +449,9 @@ int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, | |||
| 397 | union bpf_attr __user *uattr); | 449 | union bpf_attr __user *uattr); |
| 398 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | 450 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
| 399 | union bpf_attr __user *uattr); | 451 | union bpf_attr __user *uattr); |
| 452 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, | ||
| 453 | const union bpf_attr *kattr, | ||
| 454 | union bpf_attr __user *uattr); | ||
| 400 | 455 | ||
| 401 | /* an array of programs to be executed under rcu_lock. | 456 | /* an array of programs to be executed under rcu_lock. |
| 402 | * | 457 | * |
| @@ -455,7 +510,7 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array, | |||
| 455 | } \ | 510 | } \ |
| 456 | _out: \ | 511 | _out: \ |
| 457 | rcu_read_unlock(); \ | 512 | rcu_read_unlock(); \ |
| 458 | preempt_enable_no_resched(); \ | 513 | preempt_enable(); \ |
| 459 | _ret; \ | 514 | _ret; \ |
| 460 | }) | 515 | }) |
| 461 | 516 | ||
| @@ -511,6 +566,7 @@ void bpf_map_area_free(void *base); | |||
| 511 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); | 566 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
| 512 | 567 | ||
| 513 | extern int sysctl_unprivileged_bpf_disabled; | 568 | extern int sysctl_unprivileged_bpf_disabled; |
| 569 | extern int sysctl_bpf_stats_enabled; | ||
| 514 | 570 | ||
| 515 | int bpf_map_new_fd(struct bpf_map *map, int flags); | 571 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
| 516 | int bpf_prog_new_fd(struct bpf_prog *prog); | 572 | int bpf_prog_new_fd(struct bpf_prog *prog); |
| @@ -725,8 +781,9 @@ int bpf_map_offload_get_next_key(struct bpf_map *map, | |||
| 725 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); | 781 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); |
| 726 | 782 | ||
| 727 | struct bpf_offload_dev * | 783 | struct bpf_offload_dev * |
| 728 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops); | 784 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); |
| 729 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); | 785 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); |
| 786 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); | ||
| 730 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, | 787 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
| 731 | struct net_device *netdev); | 788 | struct net_device *netdev); |
| 732 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, | 789 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, |
| @@ -869,7 +926,8 @@ extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; | |||
| 869 | extern const struct bpf_func_proto bpf_msg_redirect_map_proto; | 926 | extern const struct bpf_func_proto bpf_msg_redirect_map_proto; |
| 870 | extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; | 927 | extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; |
| 871 | extern const struct bpf_func_proto bpf_sk_redirect_map_proto; | 928 | extern const struct bpf_func_proto bpf_sk_redirect_map_proto; |
| 872 | 929 | extern const struct bpf_func_proto bpf_spin_lock_proto; | |
| 930 | extern const struct bpf_func_proto bpf_spin_unlock_proto; | ||
| 873 | extern const struct bpf_func_proto bpf_get_local_storage_proto; | 931 | extern const struct bpf_func_proto bpf_get_local_storage_proto; |
| 874 | 932 | ||
| 875 | /* Shared helpers among cBPF and eBPF. */ | 933 | /* Shared helpers among cBPF and eBPF. */ |
| @@ -877,6 +935,9 @@ void bpf_user_rnd_init_once(void); | |||
| 877 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | 935 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
| 878 | 936 | ||
| 879 | #if defined(CONFIG_NET) | 937 | #if defined(CONFIG_NET) |
| 938 | bool bpf_sock_common_is_valid_access(int off, int size, | ||
| 939 | enum bpf_access_type type, | ||
| 940 | struct bpf_insn_access_aux *info); | ||
| 880 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, | 941 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
| 881 | struct bpf_insn_access_aux *info); | 942 | struct bpf_insn_access_aux *info); |
| 882 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | 943 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, |
| @@ -885,6 +946,12 @@ u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |||
| 885 | struct bpf_prog *prog, | 946 | struct bpf_prog *prog, |
| 886 | u32 *target_size); | 947 | u32 *target_size); |
| 887 | #else | 948 | #else |
| 949 | static inline bool bpf_sock_common_is_valid_access(int off, int size, | ||
| 950 | enum bpf_access_type type, | ||
| 951 | struct bpf_insn_access_aux *info) | ||
| 952 | { | ||
| 953 | return false; | ||
| 954 | } | ||
| 888 | static inline bool bpf_sock_is_valid_access(int off, int size, | 955 | static inline bool bpf_sock_is_valid_access(int off, int size, |
| 889 | enum bpf_access_type type, | 956 | enum bpf_access_type type, |
| 890 | struct bpf_insn_access_aux *info) | 957 | struct bpf_insn_access_aux *info) |
| @@ -901,4 +968,31 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |||
| 901 | } | 968 | } |
| 902 | #endif | 969 | #endif |
| 903 | 970 | ||
| 971 | #ifdef CONFIG_INET | ||
| 972 | bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, | ||
| 973 | struct bpf_insn_access_aux *info); | ||
| 974 | |||
| 975 | u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | ||
| 976 | const struct bpf_insn *si, | ||
| 977 | struct bpf_insn *insn_buf, | ||
| 978 | struct bpf_prog *prog, | ||
| 979 | u32 *target_size); | ||
| 980 | #else | ||
| 981 | static inline bool bpf_tcp_sock_is_valid_access(int off, int size, | ||
| 982 | enum bpf_access_type type, | ||
| 983 | struct bpf_insn_access_aux *info) | ||
| 984 | { | ||
| 985 | return false; | ||
| 986 | } | ||
| 987 | |||
| 988 | static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | ||
| 989 | const struct bpf_insn *si, | ||
| 990 | struct bpf_insn *insn_buf, | ||
| 991 | struct bpf_prog *prog, | ||
| 992 | u32 *target_size) | ||
| 993 | { | ||
| 994 | return 0; | ||
| 995 | } | ||
| 996 | #endif /* CONFIG_INET */ | ||
| 997 | |||
| 904 | #endif /* _LINUX_BPF_H */ | 998 | #endif /* _LINUX_BPF_H */ |
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h index 44d9ab4809bd..08bf2f1fe553 100644 --- a/include/linux/bpf_types.h +++ b/include/linux/bpf_types.h | |||
| @@ -6,9 +6,11 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter) | |||
| 6 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) | 6 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act) |
| 7 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) | 7 | BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act) |
| 8 | BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) | 8 | BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp) |
| 9 | #ifdef CONFIG_CGROUP_BPF | ||
| 9 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) | 10 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb) |
| 10 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) | 11 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock) |
| 11 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) | 12 | BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr) |
| 13 | #endif | ||
| 12 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) | 14 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in) |
| 13 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) | 15 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out) |
| 14 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) | 16 | BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit) |
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h index 27b74947cd2b..7d8228d1c898 100644 --- a/include/linux/bpf_verifier.h +++ b/include/linux/bpf_verifier.h | |||
| @@ -66,6 +66,46 @@ struct bpf_reg_state { | |||
| 66 | * same reference to the socket, to determine proper reference freeing. | 66 | * same reference to the socket, to determine proper reference freeing. |
| 67 | */ | 67 | */ |
| 68 | u32 id; | 68 | u32 id; |
| 69 | /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned | ||
| 70 | * from a pointer-cast helper, bpf_sk_fullsock() and | ||
| 71 | * bpf_tcp_sock(). | ||
| 72 | * | ||
| 73 | * Consider the following where "sk" is a reference counted | ||
| 74 | * pointer returned from "sk = bpf_sk_lookup_tcp();": | ||
| 75 | * | ||
| 76 | * 1: sk = bpf_sk_lookup_tcp(); | ||
| 77 | * 2: if (!sk) { return 0; } | ||
| 78 | * 3: fullsock = bpf_sk_fullsock(sk); | ||
| 79 | * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } | ||
| 80 | * 5: tp = bpf_tcp_sock(fullsock); | ||
| 81 | * 6: if (!tp) { bpf_sk_release(sk); return 0; } | ||
| 82 | * 7: bpf_sk_release(sk); | ||
| 83 | * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain | ||
| 84 | * | ||
| 85 | * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and | ||
| 86 | * "tp" ptr should be invalidated also. In order to do that, | ||
| 87 | * the reg holding "fullsock" and "sk" need to remember | ||
| 88 | * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id | ||
| 89 | * such that the verifier can reset all regs which have | ||
| 90 | * ref_obj_id matching the sk_reg->id. | ||
| 91 | * | ||
| 92 | * sk_reg->ref_obj_id is set to sk_reg->id at line 1. | ||
| 93 | * sk_reg->id will stay as NULL-marking purpose only. | ||
| 94 | * After NULL-marking is done, sk_reg->id can be reset to 0. | ||
| 95 | * | ||
| 96 | * After "fullsock = bpf_sk_fullsock(sk);" at line 3, | ||
| 97 | * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. | ||
| 98 | * | ||
| 99 | * After "tp = bpf_tcp_sock(fullsock);" at line 5, | ||
| 100 | * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id | ||
| 101 | * which is the same as sk_reg->ref_obj_id. | ||
| 102 | * | ||
| 103 | * From the verifier perspective, if sk, fullsock and tp | ||
| 104 | * are not NULL, they are the same ptr with different | ||
| 105 | * reg->type. In particular, bpf_sk_release(tp) is also | ||
| 106 | * allowed and has the same effect as bpf_sk_release(sk). | ||
| 107 | */ | ||
| 108 | u32 ref_obj_id; | ||
| 69 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of | 109 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
| 70 | * the actual value. | 110 | * the actual value. |
| 71 | * For pointer types, this represents the variable part of the offset | 111 | * For pointer types, this represents the variable part of the offset |
| @@ -148,6 +188,7 @@ struct bpf_verifier_state { | |||
| 148 | /* call stack tracking */ | 188 | /* call stack tracking */ |
| 149 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | 189 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; |
| 150 | u32 curframe; | 190 | u32 curframe; |
| 191 | u32 active_spin_lock; | ||
| 151 | bool speculative; | 192 | bool speculative; |
| 152 | }; | 193 | }; |
| 153 | 194 | ||
| @@ -172,6 +213,7 @@ struct bpf_verifier_state_list { | |||
| 172 | #define BPF_ALU_SANITIZE_SRC 1U | 213 | #define BPF_ALU_SANITIZE_SRC 1U |
| 173 | #define BPF_ALU_SANITIZE_DST 2U | 214 | #define BPF_ALU_SANITIZE_DST 2U |
| 174 | #define BPF_ALU_NEG_VALUE (1U << 2) | 215 | #define BPF_ALU_NEG_VALUE (1U << 2) |
| 216 | #define BPF_ALU_NON_POINTER (1U << 3) | ||
| 175 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ | 217 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
| 176 | BPF_ALU_SANITIZE_DST) | 218 | BPF_ALU_SANITIZE_DST) |
| 177 | 219 | ||
| @@ -186,6 +228,7 @@ struct bpf_insn_aux_data { | |||
| 186 | int sanitize_stack_off; /* stack slot to be cleared */ | 228 | int sanitize_stack_off; /* stack slot to be cleared */ |
| 187 | bool seen; /* this insn was processed by the verifier */ | 229 | bool seen; /* this insn was processed by the verifier */ |
| 188 | u8 alu_state; /* used in combination with alu_limit */ | 230 | u8 alu_state; /* used in combination with alu_limit */ |
| 231 | unsigned int orig_idx; /* original instruction index */ | ||
| 189 | }; | 232 | }; |
| 190 | 233 | ||
| 191 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | 234 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ |
| @@ -264,5 +307,10 @@ int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); | |||
| 264 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, | 307 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
| 265 | int insn_idx, int prev_insn_idx); | 308 | int insn_idx, int prev_insn_idx); |
| 266 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); | 309 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
| 310 | void | ||
| 311 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, | ||
| 312 | struct bpf_insn *insn); | ||
| 313 | void | ||
| 314 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); | ||
| 267 | 315 | ||
| 268 | #endif /* _LINUX_BPF_VERIFIER_H */ | 316 | #endif /* _LINUX_BPF_VERIFIER_H */ |
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h index f02cee0225d4..d815622cd31e 100644 --- a/include/linux/bpfilter.h +++ b/include/linux/bpfilter.h | |||
| @@ -3,13 +3,22 @@ | |||
| 3 | #define _LINUX_BPFILTER_H | 3 | #define _LINUX_BPFILTER_H |
| 4 | 4 | ||
| 5 | #include <uapi/linux/bpfilter.h> | 5 | #include <uapi/linux/bpfilter.h> |
| 6 | #include <linux/umh.h> | ||
| 6 | 7 | ||
| 7 | struct sock; | 8 | struct sock; |
| 8 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, | 9 | int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval, |
| 9 | unsigned int optlen); | 10 | unsigned int optlen); |
| 10 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, | 11 | int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval, |
| 11 | int __user *optlen); | 12 | int __user *optlen); |
| 12 | extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname, | 13 | struct bpfilter_umh_ops { |
| 13 | char __user *optval, | 14 | struct umh_info info; |
| 14 | unsigned int optlen, bool is_set); | 15 | /* since ip_getsockopt() can run in parallel, serialize access to umh */ |
| 16 | struct mutex lock; | ||
| 17 | int (*sockopt)(struct sock *sk, int optname, | ||
| 18 | char __user *optval, | ||
| 19 | unsigned int optlen, bool is_set); | ||
| 20 | int (*start)(void); | ||
| 21 | bool stop; | ||
| 22 | }; | ||
| 23 | extern struct bpfilter_umh_ops bpfilter_ops; | ||
| 15 | #endif | 24 | #endif |
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h index 9cd00a37b8d3..6db2d9a6e503 100644 --- a/include/linux/brcmphy.h +++ b/include/linux/brcmphy.h | |||
| @@ -148,6 +148,22 @@ | |||
| 148 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ | 148 | #define BCM_LED_SRC_OFF 0xe /* Tied high */ |
| 149 | #define BCM_LED_SRC_ON 0xf /* Tied low */ | 149 | #define BCM_LED_SRC_ON 0xf /* Tied low */ |
| 150 | 150 | ||
| 151 | /* | ||
| 152 | * Broadcom Multicolor LED configurations (expansion register 4) | ||
| 153 | */ | ||
| 154 | #define BCM_EXP_MULTICOLOR (MII_BCM54XX_EXP_SEL_ER + 0x04) | ||
| 155 | #define BCM_LED_MULTICOLOR_IN_PHASE BIT(8) | ||
| 156 | #define BCM_LED_MULTICOLOR_LINK_ACT 0x0 | ||
| 157 | #define BCM_LED_MULTICOLOR_SPEED 0x1 | ||
| 158 | #define BCM_LED_MULTICOLOR_ACT_FLASH 0x2 | ||
| 159 | #define BCM_LED_MULTICOLOR_FDX 0x3 | ||
| 160 | #define BCM_LED_MULTICOLOR_OFF 0x4 | ||
| 161 | #define BCM_LED_MULTICOLOR_ON 0x5 | ||
| 162 | #define BCM_LED_MULTICOLOR_ALT 0x6 | ||
| 163 | #define BCM_LED_MULTICOLOR_FLASH 0x7 | ||
| 164 | #define BCM_LED_MULTICOLOR_LINK 0x8 | ||
| 165 | #define BCM_LED_MULTICOLOR_ACT 0x9 | ||
| 166 | #define BCM_LED_MULTICOLOR_PROGRAM 0xa | ||
| 151 | 167 | ||
| 152 | /* | 168 | /* |
| 153 | * BCM5482: Shadow registers | 169 | * BCM5482: Shadow registers |
diff --git a/include/linux/bsg-lib.h b/include/linux/bsg-lib.h index b356e0006731..7f14517a559b 100644 --- a/include/linux/bsg-lib.h +++ b/include/linux/bsg-lib.h | |||
| @@ -69,6 +69,10 @@ struct bsg_job { | |||
| 69 | int result; | 69 | int result; |
| 70 | unsigned int reply_payload_rcv_len; | 70 | unsigned int reply_payload_rcv_len; |
| 71 | 71 | ||
| 72 | /* BIDI support */ | ||
| 73 | struct request *bidi_rq; | ||
| 74 | struct bio *bidi_bio; | ||
| 75 | |||
| 72 | void *dd_data; /* Used for driver-specific storage */ | 76 | void *dd_data; /* Used for driver-specific storage */ |
| 73 | }; | 77 | }; |
| 74 | 78 | ||
diff --git a/include/linux/btf.h b/include/linux/btf.h index 12502e25e767..455d31b55828 100644 --- a/include/linux/btf.h +++ b/include/linux/btf.h | |||
| @@ -50,6 +50,7 @@ u32 btf_id(const struct btf *btf); | |||
| 50 | bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, | 50 | bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s, |
| 51 | const struct btf_member *m, | 51 | const struct btf_member *m, |
| 52 | u32 expected_offset, u32 expected_size); | 52 | u32 expected_offset, u32 expected_size); |
| 53 | int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t); | ||
| 53 | 54 | ||
| 54 | #ifdef CONFIG_BPF_SYSCALL | 55 | #ifdef CONFIG_BPF_SYSCALL |
| 55 | const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); | 56 | const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id); |
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h index faeec7433aab..0fe5426f2bdc 100644 --- a/include/linux/build_bug.h +++ b/include/linux/build_bug.h | |||
| @@ -58,4 +58,23 @@ | |||
| 58 | */ | 58 | */ |
| 59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") | 59 | #define BUILD_BUG() BUILD_BUG_ON_MSG(1, "BUILD_BUG failed") |
| 60 | 60 | ||
| 61 | /** | ||
| 62 | * static_assert - check integer constant expression at build time | ||
| 63 | * | ||
| 64 | * static_assert() is a wrapper for the C11 _Static_assert, with a | ||
| 65 | * little macro magic to make the message optional (defaulting to the | ||
| 66 | * stringification of the tested expression). | ||
| 67 | * | ||
| 68 | * Contrary to BUILD_BUG_ON(), static_assert() can be used at global | ||
| 69 | * scope, but requires the expression to be an integer constant | ||
| 70 | * expression (i.e., it is not enough that __builtin_constant_p() is | ||
| 71 | * true for expr). | ||
| 72 | * | ||
| 73 | * Also note that BUILD_BUG_ON() fails the build if the condition is | ||
| 74 | * true, while static_assert() fails the build if the expression is | ||
| 75 | * false. | ||
| 76 | */ | ||
| 77 | #define static_assert(expr, ...) __static_assert(expr, ##__VA_ARGS__, #expr) | ||
| 78 | #define __static_assert(expr, msg, ...) _Static_assert(expr, msg) | ||
| 79 | |||
| 61 | #endif /* _LINUX_BUILD_BUG_H */ | 80 | #endif /* _LINUX_BUILD_BUG_H */ |
diff --git a/include/linux/bvec.h b/include/linux/bvec.h index 02c73c6aa805..ff13cbc1887d 100644 --- a/include/linux/bvec.h +++ b/include/linux/bvec.h | |||
| @@ -23,6 +23,7 @@ | |||
| 23 | #include <linux/kernel.h> | 23 | #include <linux/kernel.h> |
| 24 | #include <linux/bug.h> | 24 | #include <linux/bug.h> |
| 25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
| 26 | #include <linux/mm.h> | ||
| 26 | 27 | ||
| 27 | /* | 28 | /* |
| 28 | * was unsigned short, but we might as well be ready for > 64kB I/O pages | 29 | * was unsigned short, but we might as well be ready for > 64kB I/O pages |
| @@ -44,22 +45,56 @@ struct bvec_iter { | |||
| 44 | current bvec */ | 45 | current bvec */ |
| 45 | }; | 46 | }; |
| 46 | 47 | ||
| 48 | struct bvec_iter_all { | ||
| 49 | struct bio_vec bv; | ||
| 50 | int idx; | ||
| 51 | unsigned done; | ||
| 52 | }; | ||
| 53 | |||
| 54 | static inline struct page *bvec_nth_page(struct page *page, int idx) | ||
| 55 | { | ||
| 56 | return idx == 0 ? page : nth_page(page, idx); | ||
| 57 | } | ||
| 58 | |||
| 47 | /* | 59 | /* |
| 48 | * various member access, note that bio_data should of course not be used | 60 | * various member access, note that bio_data should of course not be used |
| 49 | * on highmem page vectors | 61 | * on highmem page vectors |
| 50 | */ | 62 | */ |
| 51 | #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) | 63 | #define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx]) |
| 52 | 64 | ||
| 53 | #define bvec_iter_page(bvec, iter) \ | 65 | /* multi-page (mp_bvec) helpers */ |
| 66 | #define mp_bvec_iter_page(bvec, iter) \ | ||
| 54 | (__bvec_iter_bvec((bvec), (iter))->bv_page) | 67 | (__bvec_iter_bvec((bvec), (iter))->bv_page) |
| 55 | 68 | ||
| 56 | #define bvec_iter_len(bvec, iter) \ | 69 | #define mp_bvec_iter_len(bvec, iter) \ |
| 57 | min((iter).bi_size, \ | 70 | min((iter).bi_size, \ |
| 58 | __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) | 71 | __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done) |
| 59 | 72 | ||
| 60 | #define bvec_iter_offset(bvec, iter) \ | 73 | #define mp_bvec_iter_offset(bvec, iter) \ |
| 61 | (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) | 74 | (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done) |
| 62 | 75 | ||
| 76 | #define mp_bvec_iter_page_idx(bvec, iter) \ | ||
| 77 | (mp_bvec_iter_offset((bvec), (iter)) / PAGE_SIZE) | ||
| 78 | |||
| 79 | #define mp_bvec_iter_bvec(bvec, iter) \ | ||
| 80 | ((struct bio_vec) { \ | ||
| 81 | .bv_page = mp_bvec_iter_page((bvec), (iter)), \ | ||
| 82 | .bv_len = mp_bvec_iter_len((bvec), (iter)), \ | ||
| 83 | .bv_offset = mp_bvec_iter_offset((bvec), (iter)), \ | ||
| 84 | }) | ||
| 85 | |||
| 86 | /* For building single-page bvec in flight */ | ||
| 87 | #define bvec_iter_offset(bvec, iter) \ | ||
| 88 | (mp_bvec_iter_offset((bvec), (iter)) % PAGE_SIZE) | ||
| 89 | |||
| 90 | #define bvec_iter_len(bvec, iter) \ | ||
| 91 | min_t(unsigned, mp_bvec_iter_len((bvec), (iter)), \ | ||
| 92 | PAGE_SIZE - bvec_iter_offset((bvec), (iter))) | ||
| 93 | |||
| 94 | #define bvec_iter_page(bvec, iter) \ | ||
| 95 | bvec_nth_page(mp_bvec_iter_page((bvec), (iter)), \ | ||
| 96 | mp_bvec_iter_page_idx((bvec), (iter))) | ||
| 97 | |||
| 63 | #define bvec_iter_bvec(bvec, iter) \ | 98 | #define bvec_iter_bvec(bvec, iter) \ |
| 64 | ((struct bio_vec) { \ | 99 | ((struct bio_vec) { \ |
| 65 | .bv_page = bvec_iter_page((bvec), (iter)), \ | 100 | .bv_page = bvec_iter_page((bvec), (iter)), \ |
| @@ -77,14 +112,15 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, | |||
| 77 | } | 112 | } |
| 78 | 113 | ||
| 79 | while (bytes) { | 114 | while (bytes) { |
| 80 | unsigned iter_len = bvec_iter_len(bv, *iter); | 115 | const struct bio_vec *cur = bv + iter->bi_idx; |
| 81 | unsigned len = min(bytes, iter_len); | 116 | unsigned len = min3(bytes, iter->bi_size, |
| 117 | cur->bv_len - iter->bi_bvec_done); | ||
| 82 | 118 | ||
| 83 | bytes -= len; | 119 | bytes -= len; |
| 84 | iter->bi_size -= len; | 120 | iter->bi_size -= len; |
| 85 | iter->bi_bvec_done += len; | 121 | iter->bi_bvec_done += len; |
| 86 | 122 | ||
| 87 | if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) { | 123 | if (iter->bi_bvec_done == cur->bv_len) { |
| 88 | iter->bi_bvec_done = 0; | 124 | iter->bi_bvec_done = 0; |
| 89 | iter->bi_idx++; | 125 | iter->bi_idx++; |
| 90 | } | 126 | } |
| @@ -92,30 +128,6 @@ static inline bool bvec_iter_advance(const struct bio_vec *bv, | |||
| 92 | return true; | 128 | return true; |
| 93 | } | 129 | } |
| 94 | 130 | ||
| 95 | static inline bool bvec_iter_rewind(const struct bio_vec *bv, | ||
| 96 | struct bvec_iter *iter, | ||
| 97 | unsigned int bytes) | ||
| 98 | { | ||
| 99 | while (bytes) { | ||
| 100 | unsigned len = min(bytes, iter->bi_bvec_done); | ||
| 101 | |||
| 102 | if (iter->bi_bvec_done == 0) { | ||
| 103 | if (WARN_ONCE(iter->bi_idx == 0, | ||
| 104 | "Attempted to rewind iter beyond " | ||
| 105 | "bvec's boundaries\n")) { | ||
| 106 | return false; | ||
| 107 | } | ||
| 108 | iter->bi_idx--; | ||
| 109 | iter->bi_bvec_done = __bvec_iter_bvec(bv, *iter)->bv_len; | ||
| 110 | continue; | ||
| 111 | } | ||
| 112 | bytes -= len; | ||
| 113 | iter->bi_size += len; | ||
| 114 | iter->bi_bvec_done -= len; | ||
| 115 | } | ||
| 116 | return true; | ||
| 117 | } | ||
| 118 | |||
| 119 | #define for_each_bvec(bvl, bio_vec, iter, start) \ | 131 | #define for_each_bvec(bvl, bio_vec, iter, start) \ |
| 120 | for (iter = (start); \ | 132 | for (iter = (start); \ |
| 121 | (iter).bi_size && \ | 133 | (iter).bi_size && \ |
| @@ -131,4 +143,62 @@ static inline bool bvec_iter_rewind(const struct bio_vec *bv, | |||
| 131 | .bi_bvec_done = 0, \ | 143 | .bi_bvec_done = 0, \ |
| 132 | } | 144 | } |
| 133 | 145 | ||
| 146 | static inline struct bio_vec *bvec_init_iter_all(struct bvec_iter_all *iter_all) | ||
| 147 | { | ||
| 148 | iter_all->done = 0; | ||
| 149 | iter_all->idx = 0; | ||
| 150 | |||
| 151 | return &iter_all->bv; | ||
| 152 | } | ||
| 153 | |||
| 154 | static inline void bvec_advance(const struct bio_vec *bvec, | ||
| 155 | struct bvec_iter_all *iter_all) | ||
| 156 | { | ||
| 157 | struct bio_vec *bv = &iter_all->bv; | ||
| 158 | |||
| 159 | if (iter_all->done) { | ||
| 160 | bv->bv_page = nth_page(bv->bv_page, 1); | ||
| 161 | bv->bv_offset = 0; | ||
| 162 | } else { | ||
| 163 | bv->bv_page = bvec_nth_page(bvec->bv_page, bvec->bv_offset / | ||
| 164 | PAGE_SIZE); | ||
| 165 | bv->bv_offset = bvec->bv_offset & ~PAGE_MASK; | ||
| 166 | } | ||
| 167 | bv->bv_len = min_t(unsigned int, PAGE_SIZE - bv->bv_offset, | ||
| 168 | bvec->bv_len - iter_all->done); | ||
| 169 | iter_all->done += bv->bv_len; | ||
| 170 | |||
| 171 | if (iter_all->done == bvec->bv_len) { | ||
| 172 | iter_all->idx++; | ||
| 173 | iter_all->done = 0; | ||
| 174 | } | ||
| 175 | } | ||
| 176 | |||
| 177 | /* | ||
| 178 | * Get the last single-page segment from the multi-page bvec and store it | ||
| 179 | * in @seg | ||
| 180 | */ | ||
| 181 | static inline void mp_bvec_last_segment(const struct bio_vec *bvec, | ||
| 182 | struct bio_vec *seg) | ||
| 183 | { | ||
| 184 | unsigned total = bvec->bv_offset + bvec->bv_len; | ||
| 185 | unsigned last_page = (total - 1) / PAGE_SIZE; | ||
| 186 | |||
| 187 | seg->bv_page = bvec_nth_page(bvec->bv_page, last_page); | ||
| 188 | |||
| 189 | /* the whole segment is inside the last page */ | ||
| 190 | if (bvec->bv_offset >= last_page * PAGE_SIZE) { | ||
| 191 | seg->bv_offset = bvec->bv_offset % PAGE_SIZE; | ||
| 192 | seg->bv_len = bvec->bv_len; | ||
| 193 | } else { | ||
| 194 | seg->bv_offset = 0; | ||
| 195 | seg->bv_len = total - last_page * PAGE_SIZE; | ||
| 196 | } | ||
| 197 | } | ||
| 198 | |||
| 199 | #define mp_bvec_for_each_page(pg, bv, i) \ | ||
| 200 | for (i = (bv)->bv_offset / PAGE_SIZE; \ | ||
| 201 | (i <= (((bv)->bv_offset + (bv)->bv_len - 1) / PAGE_SIZE)) && \ | ||
| 202 | (pg = bvec_nth_page((bv)->bv_page, i)); i += 1) | ||
| 203 | |||
| 134 | #endif /* __LINUX_BVEC_ITER_H */ | 204 | #endif /* __LINUX_BVEC_ITER_H */ |
diff --git a/include/linux/capability.h b/include/linux/capability.h index f640dcbc880c..ecce0f43c73a 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #define _LINUX_CAPABILITY_H | 14 | #define _LINUX_CAPABILITY_H |
| 15 | 15 | ||
| 16 | #include <uapi/linux/capability.h> | 16 | #include <uapi/linux/capability.h> |
| 17 | 17 | #include <linux/uidgid.h> | |
| 18 | 18 | ||
| 19 | #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 | 19 | #define _KERNEL_CAPABILITY_VERSION _LINUX_CAPABILITY_VERSION_3 |
| 20 | #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 | 20 | #define _KERNEL_CAPABILITY_U32S _LINUX_CAPABILITY_U32S_3 |
| @@ -25,11 +25,12 @@ typedef struct kernel_cap_struct { | |||
| 25 | __u32 cap[_KERNEL_CAPABILITY_U32S]; | 25 | __u32 cap[_KERNEL_CAPABILITY_U32S]; |
| 26 | } kernel_cap_t; | 26 | } kernel_cap_t; |
| 27 | 27 | ||
| 28 | /* exact same as vfs_cap_data but in cpu endian and always filled completely */ | 28 | /* same as vfs_ns_cap_data but in cpu endian and always filled completely */ |
| 29 | struct cpu_vfs_cap_data { | 29 | struct cpu_vfs_cap_data { |
| 30 | __u32 magic_etc; | 30 | __u32 magic_etc; |
| 31 | kernel_cap_t permitted; | 31 | kernel_cap_t permitted; |
| 32 | kernel_cap_t inheritable; | 32 | kernel_cap_t inheritable; |
| 33 | kuid_t rootid; | ||
| 33 | }; | 34 | }; |
| 34 | 35 | ||
| 35 | #define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) | 36 | #define _USER_CAP_HEADER_SIZE (sizeof(struct __user_cap_header_struct)) |
| @@ -209,6 +210,7 @@ extern bool has_ns_capability_noaudit(struct task_struct *t, | |||
| 209 | extern bool capable(int cap); | 210 | extern bool capable(int cap); |
| 210 | extern bool ns_capable(struct user_namespace *ns, int cap); | 211 | extern bool ns_capable(struct user_namespace *ns, int cap); |
| 211 | extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); | 212 | extern bool ns_capable_noaudit(struct user_namespace *ns, int cap); |
| 213 | extern bool ns_capable_setid(struct user_namespace *ns, int cap); | ||
| 212 | #else | 214 | #else |
| 213 | static inline bool has_capability(struct task_struct *t, int cap) | 215 | static inline bool has_capability(struct task_struct *t, int cap) |
| 214 | { | 216 | { |
| @@ -240,6 +242,10 @@ static inline bool ns_capable_noaudit(struct user_namespace *ns, int cap) | |||
| 240 | { | 242 | { |
| 241 | return true; | 243 | return true; |
| 242 | } | 244 | } |
| 245 | static inline bool ns_capable_setid(struct user_namespace *ns, int cap) | ||
| 246 | { | ||
| 247 | return true; | ||
| 248 | } | ||
| 243 | #endif /* CONFIG_MULTIUSER */ | 249 | #endif /* CONFIG_MULTIUSER */ |
| 244 | extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); | 250 | extern bool privileged_wrt_inode_uidgid(struct user_namespace *ns, const struct inode *inode); |
| 245 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); | 251 | extern bool capable_wrt_inode_uidgid(const struct inode *inode, int cap); |
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h index 68bb09c29ce8..337d5049ff93 100644 --- a/include/linux/ceph/libceph.h +++ b/include/linux/ceph/libceph.h | |||
| @@ -35,6 +35,7 @@ | |||
| 35 | #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ | 35 | #define CEPH_OPT_NOMSGAUTH (1<<4) /* don't require msg signing feat */ |
| 36 | #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ | 36 | #define CEPH_OPT_TCP_NODELAY (1<<5) /* TCP_NODELAY on TCP sockets */ |
| 37 | #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ | 37 | #define CEPH_OPT_NOMSGSIGN (1<<6) /* don't sign msgs */ |
| 38 | #define CEPH_OPT_ABORT_ON_FULL (1<<7) /* abort w/ ENOSPC when full */ | ||
| 38 | 39 | ||
| 39 | #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) | 40 | #define CEPH_OPT_DEFAULT (CEPH_OPT_TCP_NODELAY) |
| 40 | 41 | ||
| @@ -53,7 +54,7 @@ struct ceph_options { | |||
| 53 | unsigned long osd_request_timeout; /* jiffies */ | 54 | unsigned long osd_request_timeout; /* jiffies */ |
| 54 | 55 | ||
| 55 | /* | 56 | /* |
| 56 | * any type that can't be simply compared or doesn't need need | 57 | * any type that can't be simply compared or doesn't need |
| 57 | * to be compared should go beyond this point, | 58 | * to be compared should go beyond this point, |
| 58 | * ceph_compare_options() should be updated accordingly | 59 | * ceph_compare_options() should be updated accordingly |
| 59 | */ | 60 | */ |
| @@ -281,7 +282,8 @@ extern struct ceph_options *ceph_parse_options(char *options, | |||
| 281 | const char *dev_name, const char *dev_name_end, | 282 | const char *dev_name, const char *dev_name_end, |
| 282 | int (*parse_extra_token)(char *c, void *private), | 283 | int (*parse_extra_token)(char *c, void *private), |
| 283 | void *private); | 284 | void *private); |
| 284 | int ceph_print_client_options(struct seq_file *m, struct ceph_client *client); | 285 | int ceph_print_client_options(struct seq_file *m, struct ceph_client *client, |
| 286 | bool show_all); | ||
| 285 | extern void ceph_destroy_options(struct ceph_options *opt); | 287 | extern void ceph_destroy_options(struct ceph_options *opt); |
| 286 | extern int ceph_compare_options(struct ceph_options *new_opt, | 288 | extern int ceph_compare_options(struct ceph_options *new_opt, |
| 287 | struct ceph_client *client); | 289 | struct ceph_client *client); |
| @@ -292,6 +294,8 @@ extern void ceph_destroy_client(struct ceph_client *client); | |||
| 292 | extern int __ceph_open_session(struct ceph_client *client, | 294 | extern int __ceph_open_session(struct ceph_client *client, |
| 293 | unsigned long started); | 295 | unsigned long started); |
| 294 | extern int ceph_open_session(struct ceph_client *client); | 296 | extern int ceph_open_session(struct ceph_client *client); |
| 297 | int ceph_wait_for_latest_osdmap(struct ceph_client *client, | ||
| 298 | unsigned long timeout); | ||
| 295 | 299 | ||
| 296 | /* pagevec.c */ | 300 | /* pagevec.c */ |
| 297 | extern void ceph_release_page_vector(struct page **pages, int num_pages); | 301 | extern void ceph_release_page_vector(struct page **pages, int num_pages); |
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 7a2af5034278..2294f963dab7 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h | |||
| @@ -354,7 +354,6 @@ struct ceph_osd_client { | |||
| 354 | struct rb_root linger_map_checks; | 354 | struct rb_root linger_map_checks; |
| 355 | atomic_t num_requests; | 355 | atomic_t num_requests; |
| 356 | atomic_t num_homeless; | 356 | atomic_t num_homeless; |
| 357 | bool abort_on_full; /* abort w/ ENOSPC when full */ | ||
| 358 | int abort_err; | 357 | int abort_err; |
| 359 | struct delayed_work timeout_work; | 358 | struct delayed_work timeout_work; |
| 360 | struct delayed_work osds_timeout_work; | 359 | struct delayed_work osds_timeout_work; |
diff --git a/include/linux/ceph/types.h b/include/linux/ceph/types.h index 27cd973d3881..bd3d532902d7 100644 --- a/include/linux/ceph/types.h +++ b/include/linux/ceph/types.h | |||
| @@ -24,6 +24,7 @@ struct ceph_vino { | |||
| 24 | /* context for the caps reservation mechanism */ | 24 | /* context for the caps reservation mechanism */ |
| 25 | struct ceph_cap_reservation { | 25 | struct ceph_cap_reservation { |
| 26 | int count; | 26 | int count; |
| 27 | int used; | ||
| 27 | }; | 28 | }; |
| 28 | 29 | ||
| 29 | 30 | ||
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h index 8fcbae1b8db0..1c70803e9f77 100644 --- a/include/linux/cgroup-defs.h +++ b/include/linux/cgroup-defs.h | |||
| @@ -32,6 +32,7 @@ struct kernfs_node; | |||
| 32 | struct kernfs_ops; | 32 | struct kernfs_ops; |
| 33 | struct kernfs_open_file; | 33 | struct kernfs_open_file; |
| 34 | struct seq_file; | 34 | struct seq_file; |
| 35 | struct poll_table_struct; | ||
| 35 | 36 | ||
| 36 | #define MAX_CGROUP_TYPE_NAMELEN 32 | 37 | #define MAX_CGROUP_TYPE_NAMELEN 32 |
| 37 | #define MAX_CGROUP_ROOT_NAMELEN 64 | 38 | #define MAX_CGROUP_ROOT_NAMELEN 64 |
| @@ -574,6 +575,9 @@ struct cftype { | |||
| 574 | ssize_t (*write)(struct kernfs_open_file *of, | 575 | ssize_t (*write)(struct kernfs_open_file *of, |
| 575 | char *buf, size_t nbytes, loff_t off); | 576 | char *buf, size_t nbytes, loff_t off); |
| 576 | 577 | ||
| 578 | __poll_t (*poll)(struct kernfs_open_file *of, | ||
| 579 | struct poll_table_struct *pt); | ||
| 580 | |||
| 577 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 581 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 578 | struct lock_class_key lockdep_key; | 582 | struct lock_class_key lockdep_key; |
| 579 | #endif | 583 | #endif |
| @@ -602,7 +606,7 @@ struct cgroup_subsys { | |||
| 602 | void (*cancel_fork)(struct task_struct *task); | 606 | void (*cancel_fork)(struct task_struct *task); |
| 603 | void (*fork)(struct task_struct *task); | 607 | void (*fork)(struct task_struct *task); |
| 604 | void (*exit)(struct task_struct *task); | 608 | void (*exit)(struct task_struct *task); |
| 605 | void (*free)(struct task_struct *task); | 609 | void (*release)(struct task_struct *task); |
| 606 | void (*bind)(struct cgroup_subsys_state *root_css); | 610 | void (*bind)(struct cgroup_subsys_state *root_css); |
| 607 | 611 | ||
| 608 | bool early_init:1; | 612 | bool early_init:1; |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 9968332cceed..81f58b4a5418 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
| @@ -121,6 +121,7 @@ extern int cgroup_can_fork(struct task_struct *p); | |||
| 121 | extern void cgroup_cancel_fork(struct task_struct *p); | 121 | extern void cgroup_cancel_fork(struct task_struct *p); |
| 122 | extern void cgroup_post_fork(struct task_struct *p); | 122 | extern void cgroup_post_fork(struct task_struct *p); |
| 123 | void cgroup_exit(struct task_struct *p); | 123 | void cgroup_exit(struct task_struct *p); |
| 124 | void cgroup_release(struct task_struct *p); | ||
| 124 | void cgroup_free(struct task_struct *p); | 125 | void cgroup_free(struct task_struct *p); |
| 125 | 126 | ||
| 126 | int cgroup_init_early(void); | 127 | int cgroup_init_early(void); |
| @@ -697,6 +698,7 @@ static inline int cgroup_can_fork(struct task_struct *p) { return 0; } | |||
| 697 | static inline void cgroup_cancel_fork(struct task_struct *p) {} | 698 | static inline void cgroup_cancel_fork(struct task_struct *p) {} |
| 698 | static inline void cgroup_post_fork(struct task_struct *p) {} | 699 | static inline void cgroup_post_fork(struct task_struct *p) {} |
| 699 | static inline void cgroup_exit(struct task_struct *p) {} | 700 | static inline void cgroup_exit(struct task_struct *p) {} |
| 701 | static inline void cgroup_release(struct task_struct *p) {} | ||
| 700 | static inline void cgroup_free(struct task_struct *p) {} | 702 | static inline void cgroup_free(struct task_struct *p) {} |
| 701 | 703 | ||
| 702 | static inline int cgroup_init_early(void) { return 0; } | 704 | static inline int cgroup_init_early(void) { return 0; } |
diff --git a/include/linux/cgroup_rdma.h b/include/linux/cgroup_rdma.h index e94290b29e99..ef1bae2983f3 100644 --- a/include/linux/cgroup_rdma.h +++ b/include/linux/cgroup_rdma.h | |||
| @@ -39,7 +39,7 @@ struct rdmacg_device { | |||
| 39 | * APIs for RDMA/IB stack to publish when a device wants to | 39 | * APIs for RDMA/IB stack to publish when a device wants to |
| 40 | * participate in resource accounting | 40 | * participate in resource accounting |
| 41 | */ | 41 | */ |
| 42 | int rdmacg_register_device(struct rdmacg_device *device); | 42 | void rdmacg_register_device(struct rdmacg_device *device); |
| 43 | void rdmacg_unregister_device(struct rdmacg_device *device); | 43 | void rdmacg_unregister_device(struct rdmacg_device *device); |
| 44 | 44 | ||
| 45 | /* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ | 45 | /* APIs for RDMA/IB stack to charge/uncharge pool specific resources */ |
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 677df7865ac8..46d5fc3057b5 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h | |||
| @@ -348,6 +348,9 @@ void of_fixed_clk_setup(struct device_node *np); | |||
| 348 | * of this register, and mask of gate bits are in higher 16-bit of this | 348 | * of this register, and mask of gate bits are in higher 16-bit of this |
| 349 | * register. While setting the gate bits, higher 16-bit should also be | 349 | * register. While setting the gate bits, higher 16-bit should also be |
| 350 | * updated to indicate changing gate bits. | 350 | * updated to indicate changing gate bits. |
| 351 | * CLK_GATE_BIG_ENDIAN - by default little endian register accesses are used for | ||
| 352 | * the gate register. Setting this flag makes the register accesses big | ||
| 353 | * endian. | ||
| 351 | */ | 354 | */ |
| 352 | struct clk_gate { | 355 | struct clk_gate { |
| 353 | struct clk_hw hw; | 356 | struct clk_hw hw; |
| @@ -361,6 +364,7 @@ struct clk_gate { | |||
| 361 | 364 | ||
| 362 | #define CLK_GATE_SET_TO_DISABLE BIT(0) | 365 | #define CLK_GATE_SET_TO_DISABLE BIT(0) |
| 363 | #define CLK_GATE_HIWORD_MASK BIT(1) | 366 | #define CLK_GATE_HIWORD_MASK BIT(1) |
| 367 | #define CLK_GATE_BIG_ENDIAN BIT(2) | ||
| 364 | 368 | ||
| 365 | extern const struct clk_ops clk_gate_ops; | 369 | extern const struct clk_ops clk_gate_ops; |
| 366 | struct clk *clk_register_gate(struct device *dev, const char *name, | 370 | struct clk *clk_register_gate(struct device *dev, const char *name, |
| @@ -416,6 +420,9 @@ struct clk_div_table { | |||
| 416 | * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED | 420 | * CLK_DIVIDER_MAX_AT_ZERO - For dividers which are like CLK_DIVIDER_ONE_BASED |
| 417 | * except when the value read from the register is zero, the divisor is | 421 | * except when the value read from the register is zero, the divisor is |
| 418 | * 2^width of the field. | 422 | * 2^width of the field. |
| 423 | * CLK_DIVIDER_BIG_ENDIAN - By default little endian register accesses are used | ||
| 424 | * for the divider register. Setting this flag makes the register accesses | ||
| 425 | * big endian. | ||
| 419 | */ | 426 | */ |
| 420 | struct clk_divider { | 427 | struct clk_divider { |
| 421 | struct clk_hw hw; | 428 | struct clk_hw hw; |
| @@ -437,6 +444,7 @@ struct clk_divider { | |||
| 437 | #define CLK_DIVIDER_ROUND_CLOSEST BIT(4) | 444 | #define CLK_DIVIDER_ROUND_CLOSEST BIT(4) |
| 438 | #define CLK_DIVIDER_READ_ONLY BIT(5) | 445 | #define CLK_DIVIDER_READ_ONLY BIT(5) |
| 439 | #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) | 446 | #define CLK_DIVIDER_MAX_AT_ZERO BIT(6) |
| 447 | #define CLK_DIVIDER_BIG_ENDIAN BIT(7) | ||
| 440 | 448 | ||
| 441 | extern const struct clk_ops clk_divider_ops; | 449 | extern const struct clk_ops clk_divider_ops; |
| 442 | extern const struct clk_ops clk_divider_ro_ops; | 450 | extern const struct clk_ops clk_divider_ro_ops; |
| @@ -502,6 +510,9 @@ void clk_hw_unregister_divider(struct clk_hw *hw); | |||
| 502 | * .get_parent clk_op. | 510 | * .get_parent clk_op. |
| 503 | * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired | 511 | * CLK_MUX_ROUND_CLOSEST - Use the parent rate that is closest to the desired |
| 504 | * frequency. | 512 | * frequency. |
| 513 | * CLK_MUX_BIG_ENDIAN - By default little endian register accesses are used for | ||
| 514 | * the mux register. Setting this flag makes the register accesses big | ||
| 515 | * endian. | ||
| 505 | */ | 516 | */ |
| 506 | struct clk_mux { | 517 | struct clk_mux { |
| 507 | struct clk_hw hw; | 518 | struct clk_hw hw; |
| @@ -520,6 +531,7 @@ struct clk_mux { | |||
| 520 | #define CLK_MUX_HIWORD_MASK BIT(2) | 531 | #define CLK_MUX_HIWORD_MASK BIT(2) |
| 521 | #define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ | 532 | #define CLK_MUX_READ_ONLY BIT(3) /* mux can't be changed */ |
| 522 | #define CLK_MUX_ROUND_CLOSEST BIT(4) | 533 | #define CLK_MUX_ROUND_CLOSEST BIT(4) |
| 534 | #define CLK_MUX_BIG_ENDIAN BIT(5) | ||
| 523 | 535 | ||
| 524 | extern const struct clk_ops clk_mux_ops; | 536 | extern const struct clk_ops clk_mux_ops; |
| 525 | extern const struct clk_ops clk_mux_ro_ops; | 537 | extern const struct clk_ops clk_mux_ro_ops; |
| @@ -603,6 +615,9 @@ void clk_hw_unregister_fixed_factor(struct clk_hw *hw); | |||
| 603 | * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED | 615 | * is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED |
| 604 | * is set then the numerator and denominator are both the value read | 616 | * is set then the numerator and denominator are both the value read |
| 605 | * plus one. | 617 | * plus one. |
| 618 | * CLK_FRAC_DIVIDER_BIG_ENDIAN - By default little endian register accesses are | ||
| 619 | * used for the divider register. Setting this flag makes the register | ||
| 620 | * accesses big endian. | ||
| 606 | */ | 621 | */ |
| 607 | struct clk_fractional_divider { | 622 | struct clk_fractional_divider { |
| 608 | struct clk_hw hw; | 623 | struct clk_hw hw; |
| @@ -623,6 +638,7 @@ struct clk_fractional_divider { | |||
| 623 | #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) | 638 | #define to_clk_fd(_hw) container_of(_hw, struct clk_fractional_divider, hw) |
| 624 | 639 | ||
| 625 | #define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) | 640 | #define CLK_FRAC_DIVIDER_ZERO_BASED BIT(0) |
| 641 | #define CLK_FRAC_DIVIDER_BIG_ENDIAN BIT(1) | ||
| 626 | 642 | ||
| 627 | extern const struct clk_ops clk_fractional_divider_ops; | 643 | extern const struct clk_ops clk_fractional_divider_ops; |
| 628 | struct clk *clk_register_fractional_divider(struct device *dev, | 644 | struct clk *clk_register_fractional_divider(struct device *dev, |
| @@ -655,6 +671,9 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw); | |||
| 655 | * leaving the parent rate unmodified. | 671 | * leaving the parent rate unmodified. |
| 656 | * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be | 672 | * CLK_MULTIPLIER_ROUND_CLOSEST - Makes the best calculated divider to be |
| 657 | * rounded to the closest integer instead of the down one. | 673 | * rounded to the closest integer instead of the down one. |
| 674 | * CLK_MULTIPLIER_BIG_ENDIAN - By default little endian register accesses are | ||
| 675 | * used for the multiplier register. Setting this flag makes the register | ||
| 676 | * accesses big endian. | ||
| 658 | */ | 677 | */ |
| 659 | struct clk_multiplier { | 678 | struct clk_multiplier { |
| 660 | struct clk_hw hw; | 679 | struct clk_hw hw; |
| @@ -669,6 +688,7 @@ struct clk_multiplier { | |||
| 669 | 688 | ||
| 670 | #define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) | 689 | #define CLK_MULTIPLIER_ZERO_BYPASS BIT(0) |
| 671 | #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) | 690 | #define CLK_MULTIPLIER_ROUND_CLOSEST BIT(1) |
| 691 | #define CLK_MULTIPLIER_BIG_ENDIAN BIT(2) | ||
| 672 | 692 | ||
| 673 | extern const struct clk_ops clk_multiplier_ops; | 693 | extern const struct clk_ops clk_multiplier_ops; |
| 674 | 694 | ||
| @@ -775,6 +795,9 @@ unsigned int __clk_get_enable_count(struct clk *clk); | |||
| 775 | unsigned long clk_hw_get_rate(const struct clk_hw *hw); | 795 | unsigned long clk_hw_get_rate(const struct clk_hw *hw); |
| 776 | unsigned long __clk_get_flags(struct clk *clk); | 796 | unsigned long __clk_get_flags(struct clk *clk); |
| 777 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); | 797 | unsigned long clk_hw_get_flags(const struct clk_hw *hw); |
| 798 | #define clk_hw_can_set_rate_parent(hw) \ | ||
| 799 | (clk_hw_get_flags((hw)) & CLK_SET_RATE_PARENT) | ||
| 800 | |||
| 778 | bool clk_hw_is_prepared(const struct clk_hw *hw); | 801 | bool clk_hw_is_prepared(const struct clk_hw *hw); |
| 779 | bool clk_hw_rate_is_protected(const struct clk_hw *hw); | 802 | bool clk_hw_rate_is_protected(const struct clk_hw *hw); |
| 780 | bool clk_hw_is_enabled(const struct clk_hw *hw); | 803 | bool clk_hw_is_enabled(const struct clk_hw *hw); |
| @@ -973,37 +996,6 @@ static inline int of_clk_detect_critical(struct device_node *np, int index, | |||
| 973 | } | 996 | } |
| 974 | #endif /* CONFIG_OF */ | 997 | #endif /* CONFIG_OF */ |
| 975 | 998 | ||
| 976 | /* | ||
| 977 | * wrap access to peripherals in accessor routines | ||
| 978 | * for improved portability across platforms | ||
| 979 | */ | ||
| 980 | |||
| 981 | #if IS_ENABLED(CONFIG_PPC) | ||
| 982 | |||
| 983 | static inline u32 clk_readl(u32 __iomem *reg) | ||
| 984 | { | ||
| 985 | return ioread32be(reg); | ||
| 986 | } | ||
| 987 | |||
| 988 | static inline void clk_writel(u32 val, u32 __iomem *reg) | ||
| 989 | { | ||
| 990 | iowrite32be(val, reg); | ||
| 991 | } | ||
| 992 | |||
| 993 | #else /* platform dependent I/O accessors */ | ||
| 994 | |||
| 995 | static inline u32 clk_readl(u32 __iomem *reg) | ||
| 996 | { | ||
| 997 | return readl(reg); | ||
| 998 | } | ||
| 999 | |||
| 1000 | static inline void clk_writel(u32 val, u32 __iomem *reg) | ||
| 1001 | { | ||
| 1002 | writel(val, reg); | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | #endif /* platform dependent I/O accessors */ | ||
| 1006 | |||
| 1007 | void clk_gate_restore_context(struct clk_hw *hw); | 999 | void clk_gate_restore_context(struct clk_hw *hw); |
| 1008 | 1000 | ||
| 1009 | #endif /* CONFIG_COMMON_CLK */ | 1001 | #endif /* CONFIG_COMMON_CLK */ |
diff --git a/include/linux/clk.h b/include/linux/clk.h index a7773b5c0b9f..f689fc58d7be 100644 --- a/include/linux/clk.h +++ b/include/linux/clk.h | |||
| @@ -384,6 +384,17 @@ int __must_check devm_clk_bulk_get_all(struct device *dev, | |||
| 384 | struct clk *devm_clk_get(struct device *dev, const char *id); | 384 | struct clk *devm_clk_get(struct device *dev, const char *id); |
| 385 | 385 | ||
| 386 | /** | 386 | /** |
| 387 | * devm_clk_get_optional - lookup and obtain a managed reference to an optional | ||
| 388 | * clock producer. | ||
| 389 | * @dev: device for clock "consumer" | ||
| 390 | * @id: clock consumer ID | ||
| 391 | * | ||
| 392 | * Behaves the same as devm_clk_get() except where there is no clock producer. | ||
| 393 | * In this case, instead of returning -ENOENT, the function returns NULL. | ||
| 394 | */ | ||
| 395 | struct clk *devm_clk_get_optional(struct device *dev, const char *id); | ||
| 396 | |||
| 397 | /** | ||
| 387 | * devm_get_clk_from_child - lookup and obtain a managed reference to a | 398 | * devm_get_clk_from_child - lookup and obtain a managed reference to a |
| 388 | * clock producer from child node. | 399 | * clock producer from child node. |
| 389 | * @dev: device for clock "consumer" | 400 | * @dev: device for clock "consumer" |
| @@ -718,6 +729,12 @@ static inline struct clk *devm_clk_get(struct device *dev, const char *id) | |||
| 718 | return NULL; | 729 | return NULL; |
| 719 | } | 730 | } |
| 720 | 731 | ||
| 732 | static inline struct clk *devm_clk_get_optional(struct device *dev, | ||
| 733 | const char *id) | ||
| 734 | { | ||
| 735 | return NULL; | ||
| 736 | } | ||
| 737 | |||
| 721 | static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, | 738 | static inline int __must_check devm_clk_bulk_get(struct device *dev, int num_clks, |
| 722 | struct clk_bulk_data *clks) | 739 | struct clk_bulk_data *clks) |
| 723 | { | 740 | { |
| @@ -794,6 +811,22 @@ static inline bool clk_has_parent(struct clk *clk, struct clk *parent) | |||
| 794 | return true; | 811 | return true; |
| 795 | } | 812 | } |
| 796 | 813 | ||
| 814 | static inline int clk_set_rate_range(struct clk *clk, unsigned long min, | ||
| 815 | unsigned long max) | ||
| 816 | { | ||
| 817 | return 0; | ||
| 818 | } | ||
| 819 | |||
| 820 | static inline int clk_set_min_rate(struct clk *clk, unsigned long rate) | ||
| 821 | { | ||
| 822 | return 0; | ||
| 823 | } | ||
| 824 | |||
| 825 | static inline int clk_set_max_rate(struct clk *clk, unsigned long rate) | ||
| 826 | { | ||
| 827 | return 0; | ||
| 828 | } | ||
| 829 | |||
| 797 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) | 830 | static inline int clk_set_parent(struct clk *clk, struct clk *parent) |
| 798 | { | 831 | { |
| 799 | return 0; | 832 | return 0; |
| @@ -862,6 +895,25 @@ static inline void clk_bulk_disable_unprepare(int num_clks, | |||
| 862 | clk_bulk_unprepare(num_clks, clks); | 895 | clk_bulk_unprepare(num_clks, clks); |
| 863 | } | 896 | } |
| 864 | 897 | ||
| 898 | /** | ||
| 899 | * clk_get_optional - lookup and obtain a reference to an optional clock | ||
| 900 | * producer. | ||
| 901 | * @dev: device for clock "consumer" | ||
| 902 | * @id: clock consumer ID | ||
| 903 | * | ||
| 904 | * Behaves the same as clk_get() except where there is no clock producer. In | ||
| 905 | * this case, instead of returning -ENOENT, the function returns NULL. | ||
| 906 | */ | ||
| 907 | static inline struct clk *clk_get_optional(struct device *dev, const char *id) | ||
| 908 | { | ||
| 909 | struct clk *clk = clk_get(dev, id); | ||
| 910 | |||
| 911 | if (clk == ERR_PTR(-ENOENT)) | ||
| 912 | return NULL; | ||
| 913 | |||
| 914 | return clk; | ||
| 915 | } | ||
| 916 | |||
| 865 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) | 917 | #if defined(CONFIG_OF) && defined(CONFIG_COMMON_CLK) |
| 866 | struct clk *of_clk_get(struct device_node *np, int index); | 918 | struct clk *of_clk_get(struct device_node *np, int index); |
| 867 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); | 919 | struct clk *of_clk_get_by_name(struct device_node *np, const char *name); |
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index eacc5df57b99..78872efc7be0 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h | |||
| @@ -160,6 +160,7 @@ struct clk_hw_omap { | |||
| 160 | struct clockdomain *clkdm; | 160 | struct clockdomain *clkdm; |
| 161 | const struct clk_hw_omap_ops *ops; | 161 | const struct clk_hw_omap_ops *ops; |
| 162 | u32 context; | 162 | u32 context; |
| 163 | int autoidle_count; | ||
| 163 | }; | 164 | }; |
| 164 | 165 | ||
| 165 | /* | 166 | /* |
diff --git a/include/linux/clkdev.h b/include/linux/clkdev.h index 4890ff033220..ccb32af5848b 100644 --- a/include/linux/clkdev.h +++ b/include/linux/clkdev.h | |||
| @@ -52,4 +52,8 @@ int clk_add_alias(const char *, const char *, const char *, struct device *); | |||
| 52 | int clk_register_clkdev(struct clk *, const char *, const char *); | 52 | int clk_register_clkdev(struct clk *, const char *, const char *); |
| 53 | int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); | 53 | int clk_hw_register_clkdev(struct clk_hw *, const char *, const char *); |
| 54 | 54 | ||
| 55 | int devm_clk_hw_register_clkdev(struct device *dev, struct clk_hw *hw, | ||
| 56 | const char *con_id, const char *dev_id); | ||
| 57 | void devm_clk_release_clkdev(struct device *dev, const char *con_id, | ||
| 58 | const char *dev_id); | ||
| 55 | #endif | 59 | #endif |
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 68250a57aace..9569e7c786d3 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
| @@ -88,14 +88,13 @@ extern int sysctl_compact_memory; | |||
| 88 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, | 88 | extern int sysctl_compaction_handler(struct ctl_table *table, int write, |
| 89 | void __user *buffer, size_t *length, loff_t *ppos); | 89 | void __user *buffer, size_t *length, loff_t *ppos); |
| 90 | extern int sysctl_extfrag_threshold; | 90 | extern int sysctl_extfrag_threshold; |
| 91 | extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | ||
| 92 | void __user *buffer, size_t *length, loff_t *ppos); | ||
| 93 | extern int sysctl_compact_unevictable_allowed; | 91 | extern int sysctl_compact_unevictable_allowed; |
| 94 | 92 | ||
| 95 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 93 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
| 96 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, | 94 | extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, |
| 97 | unsigned int order, unsigned int alloc_flags, | 95 | unsigned int order, unsigned int alloc_flags, |
| 98 | const struct alloc_context *ac, enum compact_priority prio); | 96 | const struct alloc_context *ac, enum compact_priority prio, |
| 97 | struct page **page); | ||
| 99 | extern void reset_isolation_suitable(pg_data_t *pgdat); | 98 | extern void reset_isolation_suitable(pg_data_t *pgdat); |
| 100 | extern enum compact_result compaction_suitable(struct zone *zone, int order, | 99 | extern enum compact_result compaction_suitable(struct zone *zone, int order, |
| 101 | unsigned int alloc_flags, int classzone_idx); | 100 | unsigned int alloc_flags, int classzone_idx); |
| @@ -227,8 +226,8 @@ static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_i | |||
| 227 | 226 | ||
| 228 | #endif /* CONFIG_COMPACTION */ | 227 | #endif /* CONFIG_COMPACTION */ |
| 229 | 228 | ||
| 230 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | ||
| 231 | struct node; | 229 | struct node; |
| 230 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | ||
| 232 | extern int compaction_register_node(struct node *node); | 231 | extern int compaction_register_node(struct node *node); |
| 233 | extern void compaction_unregister_node(struct node *node); | 232 | extern void compaction_unregister_node(struct node *node); |
| 234 | 233 | ||
diff --git a/include/linux/compat.h b/include/linux/compat.h index 056be0d03722..ebddcb6cfcf8 100644 --- a/include/linux/compat.h +++ b/include/linux/compat.h | |||
| @@ -132,37 +132,6 @@ struct compat_tms { | |||
| 132 | compat_clock_t tms_cstime; | 132 | compat_clock_t tms_cstime; |
| 133 | }; | 133 | }; |
| 134 | 134 | ||
| 135 | struct compat_timex { | ||
| 136 | compat_uint_t modes; | ||
| 137 | compat_long_t offset; | ||
| 138 | compat_long_t freq; | ||
| 139 | compat_long_t maxerror; | ||
| 140 | compat_long_t esterror; | ||
| 141 | compat_int_t status; | ||
| 142 | compat_long_t constant; | ||
| 143 | compat_long_t precision; | ||
| 144 | compat_long_t tolerance; | ||
| 145 | struct old_timeval32 time; | ||
| 146 | compat_long_t tick; | ||
| 147 | compat_long_t ppsfreq; | ||
| 148 | compat_long_t jitter; | ||
| 149 | compat_int_t shift; | ||
| 150 | compat_long_t stabil; | ||
| 151 | compat_long_t jitcnt; | ||
| 152 | compat_long_t calcnt; | ||
| 153 | compat_long_t errcnt; | ||
| 154 | compat_long_t stbcnt; | ||
| 155 | compat_int_t tai; | ||
| 156 | |||
| 157 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; | ||
| 158 | compat_int_t:32; compat_int_t:32; compat_int_t:32; compat_int_t:32; | ||
| 159 | compat_int_t:32; compat_int_t:32; compat_int_t:32; | ||
| 160 | }; | ||
| 161 | |||
| 162 | struct timex; | ||
| 163 | int compat_get_timex(struct timex *, const struct compat_timex __user *); | ||
| 164 | int compat_put_timex(struct compat_timex __user *, const struct timex *); | ||
| 165 | |||
| 166 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) | 135 | #define _COMPAT_NSIG_WORDS (_COMPAT_NSIG / _COMPAT_NSIG_BPW) |
| 167 | 136 | ||
| 168 | typedef struct { | 137 | typedef struct { |
| @@ -551,11 +520,6 @@ int __compat_save_altstack(compat_stack_t __user *, unsigned long); | |||
| 551 | asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); | 520 | asmlinkage long compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p); |
| 552 | asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, | 521 | asmlinkage long compat_sys_io_submit(compat_aio_context_t ctx_id, int nr, |
| 553 | u32 __user *iocb); | 522 | u32 __user *iocb); |
| 554 | asmlinkage long compat_sys_io_getevents(compat_aio_context_t ctx_id, | ||
| 555 | compat_long_t min_nr, | ||
| 556 | compat_long_t nr, | ||
| 557 | struct io_event __user *events, | ||
| 558 | struct old_timespec32 __user *timeout); | ||
| 559 | asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, | 523 | asmlinkage long compat_sys_io_pgetevents(compat_aio_context_t ctx_id, |
| 560 | compat_long_t min_nr, | 524 | compat_long_t min_nr, |
| 561 | compat_long_t nr, | 525 | compat_long_t nr, |
| @@ -648,7 +612,7 @@ asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, | |||
| 648 | compat_loff_t __user *offset, compat_size_t count); | 612 | compat_loff_t __user *offset, compat_size_t count); |
| 649 | 613 | ||
| 650 | /* fs/select.c */ | 614 | /* fs/select.c */ |
| 651 | asmlinkage long compat_sys_pselect6(int n, compat_ulong_t __user *inp, | 615 | asmlinkage long compat_sys_pselect6_time32(int n, compat_ulong_t __user *inp, |
| 652 | compat_ulong_t __user *outp, | 616 | compat_ulong_t __user *outp, |
| 653 | compat_ulong_t __user *exp, | 617 | compat_ulong_t __user *exp, |
| 654 | struct old_timespec32 __user *tsp, | 618 | struct old_timespec32 __user *tsp, |
| @@ -658,7 +622,7 @@ asmlinkage long compat_sys_pselect6_time64(int n, compat_ulong_t __user *inp, | |||
| 658 | compat_ulong_t __user *exp, | 622 | compat_ulong_t __user *exp, |
| 659 | struct __kernel_timespec __user *tsp, | 623 | struct __kernel_timespec __user *tsp, |
| 660 | void __user *sig); | 624 | void __user *sig); |
| 661 | asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds, | 625 | asmlinkage long compat_sys_ppoll_time32(struct pollfd __user *ufds, |
| 662 | unsigned int nfds, | 626 | unsigned int nfds, |
| 663 | struct old_timespec32 __user *tsp, | 627 | struct old_timespec32 __user *tsp, |
| 664 | const compat_sigset_t __user *sigmask, | 628 | const compat_sigset_t __user *sigmask, |
| @@ -688,19 +652,6 @@ asmlinkage long compat_sys_newfstat(unsigned int fd, | |||
| 688 | 652 | ||
| 689 | /* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ | 653 | /* fs/sync.c: No generic prototype for sync_file_range and sync_file_range2 */ |
| 690 | 654 | ||
| 691 | /* fs/timerfd.c */ | ||
| 692 | asmlinkage long compat_sys_timerfd_gettime(int ufd, | ||
| 693 | struct old_itimerspec32 __user *otmr); | ||
| 694 | asmlinkage long compat_sys_timerfd_settime(int ufd, int flags, | ||
| 695 | const struct old_itimerspec32 __user *utmr, | ||
| 696 | struct old_itimerspec32 __user *otmr); | ||
| 697 | |||
| 698 | /* fs/utimes.c */ | ||
| 699 | asmlinkage long compat_sys_utimensat(unsigned int dfd, | ||
| 700 | const char __user *filename, | ||
| 701 | struct old_timespec32 __user *t, | ||
| 702 | int flags); | ||
| 703 | |||
| 704 | /* kernel/exit.c */ | 655 | /* kernel/exit.c */ |
| 705 | asmlinkage long compat_sys_waitid(int, compat_pid_t, | 656 | asmlinkage long compat_sys_waitid(int, compat_pid_t, |
| 706 | struct compat_siginfo __user *, int, | 657 | struct compat_siginfo __user *, int, |
| @@ -709,9 +660,6 @@ asmlinkage long compat_sys_waitid(int, compat_pid_t, | |||
| 709 | 660 | ||
| 710 | 661 | ||
| 711 | /* kernel/futex.c */ | 662 | /* kernel/futex.c */ |
| 712 | asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val, | ||
| 713 | struct old_timespec32 __user *utime, u32 __user *uaddr2, | ||
| 714 | u32 val3); | ||
| 715 | asmlinkage long | 663 | asmlinkage long |
| 716 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, | 664 | compat_sys_set_robust_list(struct compat_robust_list_head __user *head, |
| 717 | compat_size_t len); | 665 | compat_size_t len); |
| @@ -719,10 +667,6 @@ asmlinkage long | |||
| 719 | compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, | 667 | compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr, |
| 720 | compat_size_t __user *len_ptr); | 668 | compat_size_t __user *len_ptr); |
| 721 | 669 | ||
| 722 | /* kernel/hrtimer.c */ | ||
| 723 | asmlinkage long compat_sys_nanosleep(struct old_timespec32 __user *rqtp, | ||
| 724 | struct old_timespec32 __user *rmtp); | ||
| 725 | |||
| 726 | /* kernel/itimer.c */ | 670 | /* kernel/itimer.c */ |
| 727 | asmlinkage long compat_sys_getitimer(int which, | 671 | asmlinkage long compat_sys_getitimer(int which, |
| 728 | struct compat_itimerval __user *it); | 672 | struct compat_itimerval __user *it); |
| @@ -740,20 +684,6 @@ asmlinkage long compat_sys_kexec_load(compat_ulong_t entry, | |||
| 740 | asmlinkage long compat_sys_timer_create(clockid_t which_clock, | 684 | asmlinkage long compat_sys_timer_create(clockid_t which_clock, |
| 741 | struct compat_sigevent __user *timer_event_spec, | 685 | struct compat_sigevent __user *timer_event_spec, |
| 742 | timer_t __user *created_timer_id); | 686 | timer_t __user *created_timer_id); |
| 743 | asmlinkage long compat_sys_timer_gettime(timer_t timer_id, | ||
| 744 | struct old_itimerspec32 __user *setting); | ||
| 745 | asmlinkage long compat_sys_timer_settime(timer_t timer_id, int flags, | ||
| 746 | struct old_itimerspec32 __user *new, | ||
| 747 | struct old_itimerspec32 __user *old); | ||
| 748 | asmlinkage long compat_sys_clock_settime(clockid_t which_clock, | ||
| 749 | struct old_timespec32 __user *tp); | ||
| 750 | asmlinkage long compat_sys_clock_gettime(clockid_t which_clock, | ||
| 751 | struct old_timespec32 __user *tp); | ||
| 752 | asmlinkage long compat_sys_clock_getres(clockid_t which_clock, | ||
| 753 | struct old_timespec32 __user *tp); | ||
| 754 | asmlinkage long compat_sys_clock_nanosleep(clockid_t which_clock, int flags, | ||
| 755 | struct old_timespec32 __user *rqtp, | ||
| 756 | struct old_timespec32 __user *rmtp); | ||
| 757 | 687 | ||
| 758 | /* kernel/ptrace.c */ | 688 | /* kernel/ptrace.c */ |
| 759 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, | 689 | asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid, |
| @@ -766,8 +696,6 @@ asmlinkage long compat_sys_sched_setaffinity(compat_pid_t pid, | |||
| 766 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, | 696 | asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, |
| 767 | unsigned int len, | 697 | unsigned int len, |
| 768 | compat_ulong_t __user *user_mask_ptr); | 698 | compat_ulong_t __user *user_mask_ptr); |
| 769 | asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, | ||
| 770 | struct old_timespec32 __user *interval); | ||
| 771 | 699 | ||
| 772 | /* kernel/signal.c */ | 700 | /* kernel/signal.c */ |
| 773 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, | 701 | asmlinkage long compat_sys_sigaltstack(const compat_stack_t __user *uss_ptr, |
| @@ -785,7 +713,7 @@ asmlinkage long compat_sys_rt_sigprocmask(int how, compat_sigset_t __user *set, | |||
| 785 | compat_size_t sigsetsize); | 713 | compat_size_t sigsetsize); |
| 786 | asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, | 714 | asmlinkage long compat_sys_rt_sigpending(compat_sigset_t __user *uset, |
| 787 | compat_size_t sigsetsize); | 715 | compat_size_t sigsetsize); |
| 788 | asmlinkage long compat_sys_rt_sigtimedwait(compat_sigset_t __user *uthese, | 716 | asmlinkage long compat_sys_rt_sigtimedwait_time32(compat_sigset_t __user *uthese, |
| 789 | struct compat_siginfo __user *uinfo, | 717 | struct compat_siginfo __user *uinfo, |
| 790 | struct old_timespec32 __user *uts, compat_size_t sigsetsize); | 718 | struct old_timespec32 __user *uts, compat_size_t sigsetsize); |
| 791 | asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, | 719 | asmlinkage long compat_sys_rt_sigtimedwait_time64(compat_sigset_t __user *uthese, |
| @@ -808,7 +736,6 @@ asmlinkage long compat_sys_gettimeofday(struct old_timeval32 __user *tv, | |||
| 808 | struct timezone __user *tz); | 736 | struct timezone __user *tz); |
| 809 | asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, | 737 | asmlinkage long compat_sys_settimeofday(struct old_timeval32 __user *tv, |
| 810 | struct timezone __user *tz); | 738 | struct timezone __user *tz); |
| 811 | asmlinkage long compat_sys_adjtimex(struct compat_timex __user *utp); | ||
| 812 | 739 | ||
| 813 | /* kernel/timer.c */ | 740 | /* kernel/timer.c */ |
| 814 | asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); | 741 | asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); |
| @@ -817,14 +744,6 @@ asmlinkage long compat_sys_sysinfo(struct compat_sysinfo __user *info); | |||
| 817 | asmlinkage long compat_sys_mq_open(const char __user *u_name, | 744 | asmlinkage long compat_sys_mq_open(const char __user *u_name, |
| 818 | int oflag, compat_mode_t mode, | 745 | int oflag, compat_mode_t mode, |
| 819 | struct compat_mq_attr __user *u_attr); | 746 | struct compat_mq_attr __user *u_attr); |
| 820 | asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, | ||
| 821 | const char __user *u_msg_ptr, | ||
| 822 | compat_size_t msg_len, unsigned int msg_prio, | ||
| 823 | const struct old_timespec32 __user *u_abs_timeout); | ||
| 824 | asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, | ||
| 825 | char __user *u_msg_ptr, | ||
| 826 | compat_size_t msg_len, unsigned int __user *u_msg_prio, | ||
| 827 | const struct old_timespec32 __user *u_abs_timeout); | ||
| 828 | asmlinkage long compat_sys_mq_notify(mqd_t mqdes, | 747 | asmlinkage long compat_sys_mq_notify(mqd_t mqdes, |
| 829 | const struct compat_sigevent __user *u_notification); | 748 | const struct compat_sigevent __user *u_notification); |
| 830 | asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, | 749 | asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, |
| @@ -840,8 +759,6 @@ asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp, | |||
| 840 | 759 | ||
| 841 | /* ipc/sem.c */ | 760 | /* ipc/sem.c */ |
| 842 | asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); | 761 | asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg); |
| 843 | asmlinkage long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, | ||
| 844 | unsigned nsems, const struct old_timespec32 __user *timeout); | ||
| 845 | 762 | ||
| 846 | /* ipc/shm.c */ | 763 | /* ipc/shm.c */ |
| 847 | asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); | 764 | asmlinkage long compat_sys_shmctl(int first, int second, void __user *uptr); |
| @@ -899,7 +816,7 @@ asmlinkage long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, | |||
| 899 | asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, | 816 | asmlinkage long compat_sys_recvmmsg_time64(int fd, struct compat_mmsghdr __user *mmsg, |
| 900 | unsigned vlen, unsigned int flags, | 817 | unsigned vlen, unsigned int flags, |
| 901 | struct __kernel_timespec __user *timeout); | 818 | struct __kernel_timespec __user *timeout); |
| 902 | asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 819 | asmlinkage long compat_sys_recvmmsg_time32(int fd, struct compat_mmsghdr __user *mmsg, |
| 903 | unsigned vlen, unsigned int flags, | 820 | unsigned vlen, unsigned int flags, |
| 904 | struct old_timespec32 __user *timeout); | 821 | struct old_timespec32 __user *timeout); |
| 905 | asmlinkage long compat_sys_wait4(compat_pid_t pid, | 822 | asmlinkage long compat_sys_wait4(compat_pid_t pid, |
| @@ -910,8 +827,6 @@ asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32, | |||
| 910 | asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, | 827 | asmlinkage long compat_sys_open_by_handle_at(int mountdirfd, |
| 911 | struct file_handle __user *handle, | 828 | struct file_handle __user *handle, |
| 912 | int flags); | 829 | int flags); |
| 913 | asmlinkage long compat_sys_clock_adjtime(clockid_t which_clock, | ||
| 914 | struct compat_timex __user *tp); | ||
| 915 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, | 830 | asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, |
| 916 | unsigned vlen, unsigned int flags); | 831 | unsigned vlen, unsigned int flags); |
| 917 | asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, | 832 | asmlinkage ssize_t compat_sys_process_vm_readv(compat_pid_t pid, |
| @@ -952,8 +867,6 @@ asmlinkage long compat_sys_pwritev64v2(unsigned long fd, | |||
| 952 | /* __ARCH_WANT_SYSCALL_NO_AT */ | 867 | /* __ARCH_WANT_SYSCALL_NO_AT */ |
| 953 | asmlinkage long compat_sys_open(const char __user *filename, int flags, | 868 | asmlinkage long compat_sys_open(const char __user *filename, int flags, |
| 954 | umode_t mode); | 869 | umode_t mode); |
| 955 | asmlinkage long compat_sys_utimes(const char __user *filename, | ||
| 956 | struct old_timeval32 __user *t); | ||
| 957 | 870 | ||
| 958 | /* __ARCH_WANT_SYSCALL_NO_FLAGS */ | 871 | /* __ARCH_WANT_SYSCALL_NO_FLAGS */ |
| 959 | asmlinkage long compat_sys_signalfd(int ufd, | 872 | asmlinkage long compat_sys_signalfd(int ufd, |
| @@ -967,12 +880,6 @@ asmlinkage long compat_sys_newlstat(const char __user *filename, | |||
| 967 | struct compat_stat __user *statbuf); | 880 | struct compat_stat __user *statbuf); |
| 968 | 881 | ||
| 969 | /* __ARCH_WANT_SYSCALL_DEPRECATED */ | 882 | /* __ARCH_WANT_SYSCALL_DEPRECATED */ |
| 970 | asmlinkage long compat_sys_time(old_time32_t __user *tloc); | ||
| 971 | asmlinkage long compat_sys_utime(const char __user *filename, | ||
| 972 | struct old_utimbuf32 __user *t); | ||
| 973 | asmlinkage long compat_sys_futimesat(unsigned int dfd, | ||
| 974 | const char __user *filename, | ||
| 975 | struct old_timeval32 __user *t); | ||
| 976 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, | 883 | asmlinkage long compat_sys_select(int n, compat_ulong_t __user *inp, |
| 977 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, | 884 | compat_ulong_t __user *outp, compat_ulong_t __user *exp, |
| 978 | struct old_timeval32 __user *tvp); | 885 | struct old_timeval32 __user *tvp); |
| @@ -1007,9 +914,6 @@ asmlinkage long compat_sys_sigaction(int sig, | |||
| 1007 | struct compat_old_sigaction __user *oact); | 914 | struct compat_old_sigaction __user *oact); |
| 1008 | #endif | 915 | #endif |
| 1009 | 916 | ||
| 1010 | /* obsolete: kernel/time/time.c */ | ||
| 1011 | asmlinkage long compat_sys_stime(old_time32_t __user *tptr); | ||
| 1012 | |||
| 1013 | /* obsolete: net/socket.c */ | 917 | /* obsolete: net/socket.c */ |
| 1014 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args); | 918 | asmlinkage long compat_sys_socketcall(int call, u32 __user *args); |
| 1015 | 919 | ||
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h index 39f668d5066b..333a6695a918 100644 --- a/include/linux/compiler-clang.h +++ b/include/linux/compiler-clang.h | |||
| @@ -3,9 +3,8 @@ | |||
| 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." | 3 | #error "Please don't include <linux/compiler-clang.h> directly, include <linux/compiler.h> instead." |
| 4 | #endif | 4 | #endif |
| 5 | 5 | ||
| 6 | /* Some compiler specific definitions are overwritten here | 6 | /* Compiler specific definitions for Clang compiler */ |
| 7 | * for Clang compiler | 7 | |
| 8 | */ | ||
| 9 | #define uninitialized_var(x) x = *(&(x)) | 8 | #define uninitialized_var(x) x = *(&(x)) |
| 10 | 9 | ||
| 11 | /* same as gcc, this was present in clang-2.6 so we can assume it works | 10 | /* same as gcc, this was present in clang-2.6 so we can assume it works |
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 5776da43da97..e8579412ad21 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h | |||
| @@ -58,17 +58,13 @@ | |||
| 58 | (typeof(ptr)) (__ptr + (off)); \ | 58 | (typeof(ptr)) (__ptr + (off)); \ |
| 59 | }) | 59 | }) |
| 60 | 60 | ||
| 61 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ | ||
| 62 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 63 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 64 | |||
| 65 | /* | 61 | /* |
| 66 | * A trick to suppress uninitialized variable warning without generating any | 62 | * A trick to suppress uninitialized variable warning without generating any |
| 67 | * code | 63 | * code |
| 68 | */ | 64 | */ |
| 69 | #define uninitialized_var(x) x = x | 65 | #define uninitialized_var(x) x = x |
| 70 | 66 | ||
| 71 | #ifdef RETPOLINE | 67 | #ifdef CONFIG_RETPOLINE |
| 72 | #define __noretpoline __attribute__((__indirect_branch__("keep"))) | 68 | #define __noretpoline __attribute__((__indirect_branch__("keep"))) |
| 73 | #endif | 69 | #endif |
| 74 | 70 | ||
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h index 517bd14e1222..b17f3cd18334 100644 --- a/include/linux/compiler-intel.h +++ b/include/linux/compiler-intel.h | |||
| @@ -5,9 +5,7 @@ | |||
| 5 | 5 | ||
| 6 | #ifdef __ECC | 6 | #ifdef __ECC |
| 7 | 7 | ||
| 8 | /* Some compiler specific definitions are overwritten here | 8 | /* Compiler specific definitions for Intel ECC compiler */ |
| 9 | * for Intel ECC compiler | ||
| 10 | */ | ||
| 11 | 9 | ||
| 12 | #include <asm/intrinsics.h> | 10 | #include <asm/intrinsics.h> |
| 13 | 11 | ||
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index fc5004a4b07d..445348facea9 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -161,7 +161,9 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val, | |||
| 161 | #endif | 161 | #endif |
| 162 | 162 | ||
| 163 | #ifndef OPTIMIZER_HIDE_VAR | 163 | #ifndef OPTIMIZER_HIDE_VAR |
| 164 | #define OPTIMIZER_HIDE_VAR(var) barrier() | 164 | /* Make the optimizer believe the variable can be manipulated arbitrarily. */ |
| 165 | #define OPTIMIZER_HIDE_VAR(var) \ | ||
| 166 | __asm__ ("" : "=r" (var) : "0" (var)) | ||
| 165 | #endif | 167 | #endif |
| 166 | 168 | ||
| 167 | /* Not-quite-unique ID. */ | 169 | /* Not-quite-unique ID. */ |
diff --git a/include/linux/compiler_attributes.h b/include/linux/compiler_attributes.h index 19f32b0c29af..6b318efd8a74 100644 --- a/include/linux/compiler_attributes.h +++ b/include/linux/compiler_attributes.h | |||
| @@ -34,6 +34,7 @@ | |||
| 34 | #ifndef __has_attribute | 34 | #ifndef __has_attribute |
| 35 | # define __has_attribute(x) __GCC4_has_attribute_##x | 35 | # define __has_attribute(x) __GCC4_has_attribute_##x |
| 36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) | 36 | # define __GCC4_has_attribute___assume_aligned__ (__GNUC_MINOR__ >= 9) |
| 37 | # define __GCC4_has_attribute___copy__ 0 | ||
| 37 | # define __GCC4_has_attribute___designated_init__ 0 | 38 | # define __GCC4_has_attribute___designated_init__ 0 |
| 38 | # define __GCC4_has_attribute___externally_visible__ 1 | 39 | # define __GCC4_has_attribute___externally_visible__ 1 |
| 39 | # define __GCC4_has_attribute___noclone__ 1 | 40 | # define __GCC4_has_attribute___noclone__ 1 |
| @@ -101,6 +102,19 @@ | |||
| 101 | #define __attribute_const__ __attribute__((__const__)) | 102 | #define __attribute_const__ __attribute__((__const__)) |
| 102 | 103 | ||
| 103 | /* | 104 | /* |
| 105 | * Optional: only supported since gcc >= 9 | ||
| 106 | * Optional: not supported by clang | ||
| 107 | * Optional: not supported by icc | ||
| 108 | * | ||
| 109 | * gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-copy-function-attribute | ||
| 110 | */ | ||
| 111 | #if __has_attribute(__copy__) | ||
| 112 | # define __copy(symbol) __attribute__((__copy__(symbol))) | ||
| 113 | #else | ||
| 114 | # define __copy(symbol) | ||
| 115 | #endif | ||
| 116 | |||
| 117 | /* | ||
| 104 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' | 118 | * Don't. Just don't. See commit 771c035372a0 ("deprecate the '__deprecated' |
| 105 | * attribute warnings entirely and for good") for more information. | 119 | * attribute warnings entirely and for good") for more information. |
| 106 | * | 120 | * |
diff --git a/include/linux/component.h b/include/linux/component.h index e71fbbbc74e2..16de18f473d7 100644 --- a/include/linux/component.h +++ b/include/linux/component.h | |||
| @@ -4,16 +4,38 @@ | |||
| 4 | 4 | ||
| 5 | #include <linux/stddef.h> | 5 | #include <linux/stddef.h> |
| 6 | 6 | ||
| 7 | |||
| 7 | struct device; | 8 | struct device; |
| 8 | 9 | ||
| 10 | /** | ||
| 11 | * struct component_ops - callbacks for component drivers | ||
| 12 | * | ||
| 13 | * Components are registered with component_add() and unregistered with | ||
| 14 | * component_del(). | ||
| 15 | */ | ||
| 9 | struct component_ops { | 16 | struct component_ops { |
| 17 | /** | ||
| 18 | * @bind: | ||
| 19 | * | ||
| 20 | * Called through component_bind_all() when the aggregate driver is | ||
| 21 | * ready to bind the overall driver. | ||
| 22 | */ | ||
| 10 | int (*bind)(struct device *comp, struct device *master, | 23 | int (*bind)(struct device *comp, struct device *master, |
| 11 | void *master_data); | 24 | void *master_data); |
| 25 | /** | ||
| 26 | * @unbind: | ||
| 27 | * | ||
| 28 | * Called through component_unbind_all() when the aggregate driver is | ||
| 29 | * ready to bind the overall driver, or when component_bind_all() fails | ||
| 30 | * part-ways through and needs to unbind some already bound components. | ||
| 31 | */ | ||
| 12 | void (*unbind)(struct device *comp, struct device *master, | 32 | void (*unbind)(struct device *comp, struct device *master, |
| 13 | void *master_data); | 33 | void *master_data); |
| 14 | }; | 34 | }; |
| 15 | 35 | ||
| 16 | int component_add(struct device *, const struct component_ops *); | 36 | int component_add(struct device *, const struct component_ops *); |
| 37 | int component_add_typed(struct device *dev, const struct component_ops *ops, | ||
| 38 | int subcomponent); | ||
| 17 | void component_del(struct device *, const struct component_ops *); | 39 | void component_del(struct device *, const struct component_ops *); |
| 18 | 40 | ||
| 19 | int component_bind_all(struct device *master, void *master_data); | 41 | int component_bind_all(struct device *master, void *master_data); |
| @@ -21,8 +43,42 @@ void component_unbind_all(struct device *master, void *master_data); | |||
| 21 | 43 | ||
| 22 | struct master; | 44 | struct master; |
| 23 | 45 | ||
| 46 | /** | ||
| 47 | * struct component_master_ops - callback for the aggregate driver | ||
| 48 | * | ||
| 49 | * Aggregate drivers are registered with component_master_add_with_match() and | ||
| 50 | * unregistered with component_master_del(). | ||
| 51 | */ | ||
| 24 | struct component_master_ops { | 52 | struct component_master_ops { |
| 53 | /** | ||
| 54 | * @bind: | ||
| 55 | * | ||
| 56 | * Called when all components or the aggregate driver, as specified in | ||
| 57 | * the match list passed to component_master_add_with_match(), are | ||
| 58 | * ready. Usually there are 3 steps to bind an aggregate driver: | ||
| 59 | * | ||
| 60 | * 1. Allocate a structure for the aggregate driver. | ||
| 61 | * | ||
| 62 | * 2. Bind all components to the aggregate driver by calling | ||
| 63 | * component_bind_all() with the aggregate driver structure as opaque | ||
| 64 | * pointer data. | ||
| 65 | * | ||
| 66 | * 3. Register the aggregate driver with the subsystem to publish its | ||
| 67 | * interfaces. | ||
| 68 | * | ||
| 69 | * Note that the lifetime of the aggregate driver does not align with | ||
| 70 | * any of the underlying &struct device instances. Therefore devm cannot | ||
| 71 | * be used and all resources acquired or allocated in this callback must | ||
| 72 | * be explicitly released in the @unbind callback. | ||
| 73 | */ | ||
| 25 | int (*bind)(struct device *master); | 74 | int (*bind)(struct device *master); |
| 75 | /** | ||
| 76 | * @unbind: | ||
| 77 | * | ||
| 78 | * Called when either the aggregate driver, using | ||
| 79 | * component_master_del(), or one of its components, using | ||
| 80 | * component_del(), is unregistered. | ||
| 81 | */ | ||
| 26 | void (*unbind)(struct device *master); | 82 | void (*unbind)(struct device *master); |
| 27 | }; | 83 | }; |
| 28 | 84 | ||
| @@ -37,7 +93,27 @@ void component_match_add_release(struct device *master, | |||
| 37 | struct component_match **matchptr, | 93 | struct component_match **matchptr, |
| 38 | void (*release)(struct device *, void *), | 94 | void (*release)(struct device *, void *), |
| 39 | int (*compare)(struct device *, void *), void *compare_data); | 95 | int (*compare)(struct device *, void *), void *compare_data); |
| 96 | void component_match_add_typed(struct device *master, | ||
| 97 | struct component_match **matchptr, | ||
| 98 | int (*compare_typed)(struct device *, int, void *), void *compare_data); | ||
| 40 | 99 | ||
| 100 | /** | ||
| 101 | * component_match_add - add a component match entry | ||
| 102 | * @master: device with the aggregate driver | ||
| 103 | * @matchptr: pointer to the list of component matches | ||
| 104 | * @compare: compare function to match against all components | ||
| 105 | * @compare_data: opaque pointer passed to the @compare function | ||
| 106 | * | ||
| 107 | * Adds a new component match to the list stored in @matchptr, which the @master | ||
| 108 | * aggregate driver needs to function. The list of component matches pointed to | ||
| 109 | * by @matchptr must be initialized to NULL before adding the first match. This | ||
| 110 | * only matches against components added with component_add(). | ||
| 111 | * | ||
| 112 | * The allocated match list in @matchptr is automatically released using devm | ||
| 113 | * actions. | ||
| 114 | * | ||
| 115 | * See also component_match_add_release() and component_match_add_typed(). | ||
| 116 | */ | ||
| 41 | static inline void component_match_add(struct device *master, | 117 | static inline void component_match_add(struct device *master, |
| 42 | struct component_match **matchptr, | 118 | struct component_match **matchptr, |
| 43 | int (*compare)(struct device *, void *), void *compare_data) | 119 | int (*compare)(struct device *, void *), void *compare_data) |
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h index ab137f97ecbd..ed798e114663 100644 --- a/include/linux/console_struct.h +++ b/include/linux/console_struct.h | |||
| @@ -119,7 +119,7 @@ struct vc_data { | |||
| 119 | unsigned int vc_s_blink : 1; | 119 | unsigned int vc_s_blink : 1; |
| 120 | unsigned int vc_s_reverse : 1; | 120 | unsigned int vc_s_reverse : 1; |
| 121 | /* misc */ | 121 | /* misc */ |
| 122 | unsigned int vc_ques : 1; | 122 | unsigned int vc_priv : 3; |
| 123 | unsigned int vc_need_wrap : 1; | 123 | unsigned int vc_need_wrap : 1; |
| 124 | unsigned int vc_can_do_color : 1; | 124 | unsigned int vc_can_do_color : 1; |
| 125 | unsigned int vc_report_mouse : 2; | 125 | unsigned int vc_report_mouse : 2; |
diff --git a/include/linux/coresight.h b/include/linux/coresight.h index 46c67a764877..7b87965f7a65 100644 --- a/include/linux/coresight.h +++ b/include/linux/coresight.h | |||
| @@ -154,8 +154,9 @@ struct coresight_connection { | |||
| 154 | * @orphan: true if the component has connections that haven't been linked. | 154 | * @orphan: true if the component has connections that haven't been linked. |
| 155 | * @enable: 'true' if component is currently part of an active path. | 155 | * @enable: 'true' if component is currently part of an active path. |
| 156 | * @activated: 'true' only if a _sink_ has been activated. A sink can be | 156 | * @activated: 'true' only if a _sink_ has been activated. A sink can be |
| 157 | activated but not yet enabled. Enabling for a _sink_ | 157 | * activated but not yet enabled. Enabling for a _sink_ |
| 158 | happens when a source has been selected for that it. | 158 | * appens when a source has been selected for that it. |
| 159 | * @ea: Device attribute for sink representation under PMU directory. | ||
| 159 | */ | 160 | */ |
| 160 | struct coresight_device { | 161 | struct coresight_device { |
| 161 | struct coresight_connection *conns; | 162 | struct coresight_connection *conns; |
| @@ -168,7 +169,9 @@ struct coresight_device { | |||
| 168 | atomic_t *refcnt; | 169 | atomic_t *refcnt; |
| 169 | bool orphan; | 170 | bool orphan; |
| 170 | bool enable; /* true only if configured as part of a path */ | 171 | bool enable; /* true only if configured as part of a path */ |
| 172 | /* sink specific fields */ | ||
| 171 | bool activated; /* true only if a sink is part of a path */ | 173 | bool activated; /* true only if a sink is part of a path */ |
| 174 | struct dev_ext_attribute *ea; | ||
| 172 | }; | 175 | }; |
| 173 | 176 | ||
| 174 | #define to_coresight_device(d) container_of(d, struct coresight_device, dev) | 177 | #define to_coresight_device(d) container_of(d, struct coresight_device, dev) |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 218df7f4d3e1..5041357d0297 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
| @@ -180,12 +180,10 @@ enum cpuhp_smt_control { | |||
| 180 | #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) | 180 | #if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT) |
| 181 | extern enum cpuhp_smt_control cpu_smt_control; | 181 | extern enum cpuhp_smt_control cpu_smt_control; |
| 182 | extern void cpu_smt_disable(bool force); | 182 | extern void cpu_smt_disable(bool force); |
| 183 | extern void cpu_smt_check_topology_early(void); | ||
| 184 | extern void cpu_smt_check_topology(void); | 183 | extern void cpu_smt_check_topology(void); |
| 185 | #else | 184 | #else |
| 186 | # define cpu_smt_control (CPU_SMT_ENABLED) | 185 | # define cpu_smt_control (CPU_SMT_ENABLED) |
| 187 | static inline void cpu_smt_disable(bool force) { } | 186 | static inline void cpu_smt_disable(bool force) { } |
| 188 | static inline void cpu_smt_check_topology_early(void) { } | ||
| 189 | static inline void cpu_smt_check_topology(void) { } | 187 | static inline void cpu_smt_check_topology(void) { } |
| 190 | #endif | 188 | #endif |
| 191 | 189 | ||
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index c86d6d8bdfed..b160e98076e3 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
| @@ -151,6 +151,9 @@ struct cpufreq_policy { | |||
| 151 | 151 | ||
| 152 | /* For cpufreq driver's internal use */ | 152 | /* For cpufreq driver's internal use */ |
| 153 | void *driver_data; | 153 | void *driver_data; |
| 154 | |||
| 155 | /* Pointer to the cooling device if used for thermal mitigation */ | ||
| 156 | struct thermal_cooling_device *cdev; | ||
| 154 | }; | 157 | }; |
| 155 | 158 | ||
| 156 | /* Only for ACPI */ | 159 | /* Only for ACPI */ |
| @@ -254,20 +257,12 @@ __ATTR(_name, 0644, show_##_name, store_##_name) | |||
| 254 | static struct freq_attr _name = \ | 257 | static struct freq_attr _name = \ |
| 255 | __ATTR(_name, 0200, NULL, store_##_name) | 258 | __ATTR(_name, 0200, NULL, store_##_name) |
| 256 | 259 | ||
| 257 | struct global_attr { | ||
| 258 | struct attribute attr; | ||
| 259 | ssize_t (*show)(struct kobject *kobj, | ||
| 260 | struct attribute *attr, char *buf); | ||
| 261 | ssize_t (*store)(struct kobject *a, struct attribute *b, | ||
| 262 | const char *c, size_t count); | ||
| 263 | }; | ||
| 264 | |||
| 265 | #define define_one_global_ro(_name) \ | 260 | #define define_one_global_ro(_name) \ |
| 266 | static struct global_attr _name = \ | 261 | static struct kobj_attribute _name = \ |
| 267 | __ATTR(_name, 0444, show_##_name, NULL) | 262 | __ATTR(_name, 0444, show_##_name, NULL) |
| 268 | 263 | ||
| 269 | #define define_one_global_rw(_name) \ | 264 | #define define_one_global_rw(_name) \ |
| 270 | static struct global_attr _name = \ | 265 | static struct kobj_attribute _name = \ |
| 271 | __ATTR(_name, 0644, show_##_name, store_##_name) | 266 | __ATTR(_name, 0644, show_##_name, store_##_name) |
| 272 | 267 | ||
| 273 | 268 | ||
| @@ -330,6 +325,8 @@ struct cpufreq_driver { | |||
| 330 | /* optional */ | 325 | /* optional */ |
| 331 | int (*bios_limit)(int cpu, unsigned int *limit); | 326 | int (*bios_limit)(int cpu, unsigned int *limit); |
| 332 | 327 | ||
| 328 | int (*online)(struct cpufreq_policy *policy); | ||
| 329 | int (*offline)(struct cpufreq_policy *policy); | ||
| 333 | int (*exit)(struct cpufreq_policy *policy); | 330 | int (*exit)(struct cpufreq_policy *policy); |
| 334 | void (*stop_cpu)(struct cpufreq_policy *policy); | 331 | void (*stop_cpu)(struct cpufreq_policy *policy); |
| 335 | int (*suspend)(struct cpufreq_policy *policy); | 332 | int (*suspend)(struct cpufreq_policy *policy); |
| @@ -346,14 +343,15 @@ struct cpufreq_driver { | |||
| 346 | }; | 343 | }; |
| 347 | 344 | ||
| 348 | /* flags */ | 345 | /* flags */ |
| 349 | #define CPUFREQ_STICKY (1 << 0) /* driver isn't removed even if | 346 | |
| 350 | all ->init() calls failed */ | 347 | /* driver isn't removed even if all ->init() calls failed */ |
| 351 | #define CPUFREQ_CONST_LOOPS (1 << 1) /* loops_per_jiffy or other | 348 | #define CPUFREQ_STICKY BIT(0) |
| 352 | kernel "constants" aren't | 349 | |
| 353 | affected by frequency | 350 | /* loops_per_jiffy or other kernel "constants" aren't affected by frequency transitions */ |
| 354 | transitions */ | 351 | #define CPUFREQ_CONST_LOOPS BIT(1) |
| 355 | #define CPUFREQ_PM_NO_WARN (1 << 2) /* don't warn on suspend/resume | 352 | |
| 356 | speed mismatches */ | 353 | /* don't warn on suspend/resume speed mismatches */ |
| 354 | #define CPUFREQ_PM_NO_WARN BIT(2) | ||
| 357 | 355 | ||
| 358 | /* | 356 | /* |
| 359 | * This should be set by platforms having multiple clock-domains, i.e. | 357 | * This should be set by platforms having multiple clock-domains, i.e. |
| @@ -361,14 +359,14 @@ struct cpufreq_driver { | |||
| 361 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same | 359 | * be created in cpu/cpu<num>/cpufreq/ directory and so they can use the same |
| 362 | * governor with different tunables for different clusters. | 360 | * governor with different tunables for different clusters. |
| 363 | */ | 361 | */ |
| 364 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY (1 << 3) | 362 | #define CPUFREQ_HAVE_GOVERNOR_PER_POLICY BIT(3) |
| 365 | 363 | ||
| 366 | /* | 364 | /* |
| 367 | * Driver will do POSTCHANGE notifications from outside of their ->target() | 365 | * Driver will do POSTCHANGE notifications from outside of their ->target() |
| 368 | * routine and so must set cpufreq_driver->flags with this flag, so that core | 366 | * routine and so must set cpufreq_driver->flags with this flag, so that core |
| 369 | * can handle them specially. | 367 | * can handle them specially. |
| 370 | */ | 368 | */ |
| 371 | #define CPUFREQ_ASYNC_NOTIFICATION (1 << 4) | 369 | #define CPUFREQ_ASYNC_NOTIFICATION BIT(4) |
| 372 | 370 | ||
| 373 | /* | 371 | /* |
| 374 | * Set by drivers which want cpufreq core to check if CPU is running at a | 372 | * Set by drivers which want cpufreq core to check if CPU is running at a |
| @@ -377,13 +375,19 @@ struct cpufreq_driver { | |||
| 377 | * from the table. And if that fails, we will stop further boot process by | 375 | * from the table. And if that fails, we will stop further boot process by |
| 378 | * issuing a BUG_ON(). | 376 | * issuing a BUG_ON(). |
| 379 | */ | 377 | */ |
| 380 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5) | 378 | #define CPUFREQ_NEED_INITIAL_FREQ_CHECK BIT(5) |
| 381 | 379 | ||
| 382 | /* | 380 | /* |
| 383 | * Set by drivers to disallow use of governors with "dynamic_switching" flag | 381 | * Set by drivers to disallow use of governors with "dynamic_switching" flag |
| 384 | * set. | 382 | * set. |
| 385 | */ | 383 | */ |
| 386 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING (1 << 6) | 384 | #define CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING BIT(6) |
| 385 | |||
| 386 | /* | ||
| 387 | * Set by drivers that want the core to automatically register the cpufreq | ||
| 388 | * driver as a thermal cooling device. | ||
| 389 | */ | ||
| 390 | #define CPUFREQ_IS_COOLING_DEV BIT(7) | ||
| 387 | 391 | ||
| 388 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); | 392 | int cpufreq_register_driver(struct cpufreq_driver *driver_data); |
| 389 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); | 393 | int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); |
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index fd586d0301e7..e78281d07b70 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
| @@ -121,6 +121,7 @@ enum cpuhp_state { | |||
| 121 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, | 121 | CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING, |
| 122 | CPUHP_AP_ARM_TWD_STARTING, | 122 | CPUHP_AP_ARM_TWD_STARTING, |
| 123 | CPUHP_AP_QCOM_TIMER_STARTING, | 123 | CPUHP_AP_QCOM_TIMER_STARTING, |
| 124 | CPUHP_AP_TEGRA_TIMER_STARTING, | ||
| 124 | CPUHP_AP_ARMADA_TIMER_STARTING, | 125 | CPUHP_AP_ARMADA_TIMER_STARTING, |
| 125 | CPUHP_AP_MARCO_TIMER_STARTING, | 126 | CPUHP_AP_MARCO_TIMER_STARTING, |
| 126 | CPUHP_AP_MIPS_GIC_TIMER_STARTING, | 127 | CPUHP_AP_MIPS_GIC_TIMER_STARTING, |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index 4dff74f48d4b..3b39472324a3 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -69,11 +69,9 @@ struct cpuidle_state { | |||
| 69 | 69 | ||
| 70 | /* Idle State Flags */ | 70 | /* Idle State Flags */ |
| 71 | #define CPUIDLE_FLAG_NONE (0x00) | 71 | #define CPUIDLE_FLAG_NONE (0x00) |
| 72 | #define CPUIDLE_FLAG_POLLING (0x01) /* polling state */ | 72 | #define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */ |
| 73 | #define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */ | 73 | #define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */ |
| 74 | #define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */ | 74 | #define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */ |
| 75 | |||
| 76 | #define CPUIDLE_DRIVER_FLAGS_MASK (0xFFFF0000) | ||
| 77 | 75 | ||
| 78 | struct cpuidle_device_kobj; | 76 | struct cpuidle_device_kobj; |
| 79 | struct cpuidle_state_kobj; | 77 | struct cpuidle_state_kobj; |
diff --git a/include/linux/cred.h b/include/linux/cred.h index 4907c9df86b3..ddd45bb74887 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
| @@ -15,7 +15,6 @@ | |||
| 15 | #include <linux/capability.h> | 15 | #include <linux/capability.h> |
| 16 | #include <linux/init.h> | 16 | #include <linux/init.h> |
| 17 | #include <linux/key.h> | 17 | #include <linux/key.h> |
| 18 | #include <linux/selinux.h> | ||
| 19 | #include <linux/atomic.h> | 18 | #include <linux/atomic.h> |
| 20 | #include <linux/uidgid.h> | 19 | #include <linux/uidgid.h> |
| 21 | #include <linux/sched.h> | 20 | #include <linux/sched.h> |
diff --git a/include/linux/crypto.h b/include/linux/crypto.h index 902ec171fc6d..f2565a103158 100644 --- a/include/linux/crypto.h +++ b/include/linux/crypto.h | |||
| @@ -118,7 +118,7 @@ | |||
| 118 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 | 118 | #define CRYPTO_TFM_REQ_MASK 0x000fff00 |
| 119 | #define CRYPTO_TFM_RES_MASK 0xfff00000 | 119 | #define CRYPTO_TFM_RES_MASK 0xfff00000 |
| 120 | 120 | ||
| 121 | #define CRYPTO_TFM_REQ_WEAK_KEY 0x00000100 | 121 | #define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100 |
| 122 | #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 | 122 | #define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200 |
| 123 | #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 | 123 | #define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400 |
| 124 | #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 | 124 | #define CRYPTO_TFM_RES_WEAK_KEY 0x00100000 |
| @@ -188,14 +188,6 @@ struct blkcipher_desc { | |||
| 188 | u32 flags; | 188 | u32 flags; |
| 189 | }; | 189 | }; |
| 190 | 190 | ||
| 191 | struct cipher_desc { | ||
| 192 | struct crypto_tfm *tfm; | ||
| 193 | void (*crfn)(struct crypto_tfm *tfm, u8 *dst, const u8 *src); | ||
| 194 | unsigned int (*prfn)(const struct cipher_desc *desc, u8 *dst, | ||
| 195 | const u8 *src, unsigned int nbytes); | ||
| 196 | void *info; | ||
| 197 | }; | ||
| 198 | |||
| 199 | /** | 191 | /** |
| 200 | * DOC: Block Cipher Algorithm Definitions | 192 | * DOC: Block Cipher Algorithm Definitions |
| 201 | * | 193 | * |
diff --git a/include/linux/davinci_emac.h b/include/linux/davinci_emac.h index 05b97144d342..28e6cf1356da 100644 --- a/include/linux/davinci_emac.h +++ b/include/linux/davinci_emac.h | |||
| @@ -46,5 +46,4 @@ enum { | |||
| 46 | EMAC_VERSION_2, /* DM646x */ | 46 | EMAC_VERSION_2, /* DM646x */ |
| 47 | }; | 47 | }; |
| 48 | 48 | ||
| 49 | void davinci_get_mac_addr(struct nvmem_device *nvmem, void *context); | ||
| 50 | #endif | 49 | #endif |
diff --git a/include/linux/dcache.h b/include/linux/dcache.h index ef4b70f64f33..60996e64c579 100644 --- a/include/linux/dcache.h +++ b/include/linux/dcache.h | |||
| @@ -62,9 +62,10 @@ extern const struct qstr slash_name; | |||
| 62 | struct dentry_stat_t { | 62 | struct dentry_stat_t { |
| 63 | long nr_dentry; | 63 | long nr_dentry; |
| 64 | long nr_unused; | 64 | long nr_unused; |
| 65 | long age_limit; /* age in seconds */ | 65 | long age_limit; /* age in seconds */ |
| 66 | long want_pages; /* pages requested by system */ | 66 | long want_pages; /* pages requested by system */ |
| 67 | long dummy[2]; | 67 | long nr_negative; /* # of unused negative dentries */ |
| 68 | long dummy; /* Reserved for future use */ | ||
| 68 | }; | 69 | }; |
| 69 | extern struct dentry_stat_t dentry_stat; | 70 | extern struct dentry_stat_t dentry_stat; |
| 70 | 71 | ||
diff --git a/include/linux/delay.h b/include/linux/delay.h index b78bab4395d8..8e6828094c1e 100644 --- a/include/linux/delay.h +++ b/include/linux/delay.h | |||
| @@ -55,6 +55,7 @@ static inline void ndelay(unsigned long x) | |||
| 55 | 55 | ||
| 56 | extern unsigned long lpj_fine; | 56 | extern unsigned long lpj_fine; |
| 57 | void calibrate_delay(void); | 57 | void calibrate_delay(void); |
| 58 | void __attribute__((weak)) calibration_delay_done(void); | ||
| 58 | void msleep(unsigned int msecs); | 59 | void msleep(unsigned int msecs); |
| 59 | unsigned long msleep_interruptible(unsigned int msecs); | 60 | unsigned long msleep_interruptible(unsigned int msecs); |
| 60 | void usleep_range(unsigned long min, unsigned long max); | 61 | void usleep_range(unsigned long min, unsigned long max); |
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h index e528baebad69..b0672756d056 100644 --- a/include/linux/device-mapper.h +++ b/include/linux/device-mapper.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/bio.h> | 11 | #include <linux/bio.h> |
| 12 | #include <linux/blkdev.h> | 12 | #include <linux/blkdev.h> |
| 13 | #include <linux/dm-ioctl.h> | ||
| 13 | #include <linux/math64.h> | 14 | #include <linux/math64.h> |
| 14 | #include <linux/ratelimit.h> | 15 | #include <linux/ratelimit.h> |
| 15 | 16 | ||
| @@ -315,12 +316,6 @@ struct dm_target { | |||
| 315 | * whether or not its underlying devices have support. | 316 | * whether or not its underlying devices have support. |
| 316 | */ | 317 | */ |
| 317 | bool discards_supported:1; | 318 | bool discards_supported:1; |
| 318 | |||
| 319 | /* | ||
| 320 | * Set if the target required discard bios to be split | ||
| 321 | * on max_io_len boundary. | ||
| 322 | */ | ||
| 323 | bool split_discard_bios:1; | ||
| 324 | }; | 319 | }; |
| 325 | 320 | ||
| 326 | /* Each target can link one of these into the table */ | 321 | /* Each target can link one of these into the table */ |
| @@ -431,6 +426,14 @@ void dm_remap_zone_report(struct dm_target *ti, sector_t start, | |||
| 431 | struct blk_zone *zones, unsigned int *nr_zones); | 426 | struct blk_zone *zones, unsigned int *nr_zones); |
| 432 | union map_info *dm_get_rq_mapinfo(struct request *rq); | 427 | union map_info *dm_get_rq_mapinfo(struct request *rq); |
| 433 | 428 | ||
| 429 | /* | ||
| 430 | * Device mapper functions to parse and create devices specified by the | ||
| 431 | * parameter "dm-mod.create=" | ||
| 432 | */ | ||
| 433 | int __init dm_early_create(struct dm_ioctl *dmi, | ||
| 434 | struct dm_target_spec **spec_array, | ||
| 435 | char **target_params_array); | ||
| 436 | |||
| 434 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); | 437 | struct queue_limits *dm_get_queue_limits(struct mapped_device *md); |
| 435 | 438 | ||
| 436 | /* | 439 | /* |
| @@ -609,7 +612,7 @@ do { \ | |||
| 609 | */ | 612 | */ |
| 610 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) | 613 | #define dm_target_offset(ti, sector) ((sector) - (ti)->begin) |
| 611 | 614 | ||
| 612 | static inline sector_t to_sector(unsigned long n) | 615 | static inline sector_t to_sector(unsigned long long n) |
| 613 | { | 616 | { |
| 614 | return (n >> SECTOR_SHIFT); | 617 | return (n >> SECTOR_SHIFT); |
| 615 | } | 618 | } |
diff --git a/include/linux/device.h b/include/linux/device.h index 6cb4640b6160..4e6987e11f68 100644 --- a/include/linux/device.h +++ b/include/linux/device.h | |||
| @@ -49,8 +49,6 @@ struct bus_attribute { | |||
| 49 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); | 49 | ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count); |
| 50 | }; | 50 | }; |
| 51 | 51 | ||
| 52 | #define BUS_ATTR(_name, _mode, _show, _store) \ | ||
| 53 | struct bus_attribute bus_attr_##_name = __ATTR(_name, _mode, _show, _store) | ||
| 54 | #define BUS_ATTR_RW(_name) \ | 52 | #define BUS_ATTR_RW(_name) \ |
| 55 | struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) | 53 | struct bus_attribute bus_attr_##_name = __ATTR_RW(_name) |
| 56 | #define BUS_ATTR_RO(_name) \ | 54 | #define BUS_ATTR_RO(_name) \ |
| @@ -341,6 +339,7 @@ struct device *driver_find_device(struct device_driver *drv, | |||
| 341 | struct device *start, void *data, | 339 | struct device *start, void *data, |
| 342 | int (*match)(struct device *dev, void *data)); | 340 | int (*match)(struct device *dev, void *data)); |
| 343 | 341 | ||
| 342 | void driver_deferred_probe_add(struct device *dev); | ||
| 344 | int driver_deferred_probe_check_state(struct device *dev); | 343 | int driver_deferred_probe_check_state(struct device *dev); |
| 345 | 344 | ||
| 346 | /** | 345 | /** |
| @@ -757,11 +756,17 @@ struct device_dma_parameters { | |||
| 757 | 756 | ||
| 758 | /** | 757 | /** |
| 759 | * struct device_connection - Device Connection Descriptor | 758 | * struct device_connection - Device Connection Descriptor |
| 759 | * @fwnode: The device node of the connected device | ||
| 760 | * @endpoint: The names of the two devices connected together | 760 | * @endpoint: The names of the two devices connected together |
| 761 | * @id: Unique identifier for the connection | 761 | * @id: Unique identifier for the connection |
| 762 | * @list: List head, private, for internal use only | 762 | * @list: List head, private, for internal use only |
| 763 | * | ||
| 764 | * NOTE: @fwnode is not used together with @endpoint. @fwnode is used when | ||
| 765 | * platform firmware defines the connection. When the connection is registered | ||
| 766 | * with device_connection_add() @endpoint is used instead. | ||
| 763 | */ | 767 | */ |
| 764 | struct device_connection { | 768 | struct device_connection { |
| 769 | struct fwnode_handle *fwnode; | ||
| 765 | const char *endpoint[2]; | 770 | const char *endpoint[2]; |
| 766 | const char *id; | 771 | const char *id; |
| 767 | struct list_head list; | 772 | struct list_head list; |
| @@ -827,12 +832,14 @@ enum device_link_state { | |||
| 827 | * PM_RUNTIME: If set, the runtime PM framework will use this link. | 832 | * PM_RUNTIME: If set, the runtime PM framework will use this link. |
| 828 | * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. | 833 | * RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation. |
| 829 | * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. | 834 | * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind. |
| 835 | * AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds. | ||
| 830 | */ | 836 | */ |
| 831 | #define DL_FLAG_STATELESS BIT(0) | 837 | #define DL_FLAG_STATELESS BIT(0) |
| 832 | #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) | 838 | #define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1) |
| 833 | #define DL_FLAG_PM_RUNTIME BIT(2) | 839 | #define DL_FLAG_PM_RUNTIME BIT(2) |
| 834 | #define DL_FLAG_RPM_ACTIVE BIT(3) | 840 | #define DL_FLAG_RPM_ACTIVE BIT(3) |
| 835 | #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) | 841 | #define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4) |
| 842 | #define DL_FLAG_AUTOPROBE_CONSUMER BIT(5) | ||
| 836 | 843 | ||
| 837 | /** | 844 | /** |
| 838 | * struct device_link - Device link representation. | 845 | * struct device_link - Device link representation. |
| @@ -845,6 +852,7 @@ enum device_link_state { | |||
| 845 | * @rpm_active: Whether or not the consumer device is runtime-PM-active. | 852 | * @rpm_active: Whether or not the consumer device is runtime-PM-active. |
| 846 | * @kref: Count repeated addition of the same link. | 853 | * @kref: Count repeated addition of the same link. |
| 847 | * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. | 854 | * @rcu_head: An RCU head to use for deferred execution of SRCU callbacks. |
| 855 | * @supplier_preactivated: Supplier has been made active before consumer probe. | ||
| 848 | */ | 856 | */ |
| 849 | struct device_link { | 857 | struct device_link { |
| 850 | struct device *supplier; | 858 | struct device *supplier; |
| @@ -853,11 +861,12 @@ struct device_link { | |||
| 853 | struct list_head c_node; | 861 | struct list_head c_node; |
| 854 | enum device_link_state status; | 862 | enum device_link_state status; |
| 855 | u32 flags; | 863 | u32 flags; |
| 856 | bool rpm_active; | 864 | refcount_t rpm_active; |
| 857 | struct kref kref; | 865 | struct kref kref; |
| 858 | #ifdef CONFIG_SRCU | 866 | #ifdef CONFIG_SRCU |
| 859 | struct rcu_head rcu_head; | 867 | struct rcu_head rcu_head; |
| 860 | #endif | 868 | #endif |
| 869 | bool supplier_preactivated; /* Owned by consumer probe. */ | ||
| 861 | }; | 870 | }; |
| 862 | 871 | ||
| 863 | /** | 872 | /** |
| @@ -985,7 +994,7 @@ struct device { | |||
| 985 | void *platform_data; /* Platform specific data, device | 994 | void *platform_data; /* Platform specific data, device |
| 986 | core doesn't touch it */ | 995 | core doesn't touch it */ |
| 987 | void *driver_data; /* Driver data, set and get with | 996 | void *driver_data; /* Driver data, set and get with |
| 988 | dev_set/get_drvdata */ | 997 | dev_set_drvdata/dev_get_drvdata */ |
| 989 | struct dev_links_info links; | 998 | struct dev_links_info links; |
| 990 | struct dev_pm_info power; | 999 | struct dev_pm_info power; |
| 991 | struct dev_pm_domain *pm_domain; | 1000 | struct dev_pm_domain *pm_domain; |
| @@ -1017,8 +1026,10 @@ struct device { | |||
| 1017 | 1026 | ||
| 1018 | struct list_head dma_pools; /* dma pools (if dma'ble) */ | 1027 | struct list_head dma_pools; /* dma pools (if dma'ble) */ |
| 1019 | 1028 | ||
| 1029 | #ifdef CONFIG_DMA_DECLARE_COHERENT | ||
| 1020 | struct dma_coherent_mem *dma_mem; /* internal for coherent mem | 1030 | struct dma_coherent_mem *dma_mem; /* internal for coherent mem |
| 1021 | override */ | 1031 | override */ |
| 1032 | #endif | ||
| 1022 | #ifdef CONFIG_DMA_CMA | 1033 | #ifdef CONFIG_DMA_CMA |
| 1023 | struct cma *cma_area; /* contiguous memory area for dma | 1034 | struct cma *cma_area; /* contiguous memory area for dma |
| 1024 | allocations */ | 1035 | allocations */ |
| @@ -1035,7 +1046,6 @@ struct device { | |||
| 1035 | spinlock_t devres_lock; | 1046 | spinlock_t devres_lock; |
| 1036 | struct list_head devres_head; | 1047 | struct list_head devres_head; |
| 1037 | 1048 | ||
| 1038 | struct klist_node knode_class; | ||
| 1039 | struct class *class; | 1049 | struct class *class; |
| 1040 | const struct attribute_group **groups; /* optional groups */ | 1050 | const struct attribute_group **groups; /* optional groups */ |
| 1041 | 1051 | ||
| @@ -1095,7 +1105,7 @@ static inline void set_dev_node(struct device *dev, int node) | |||
| 1095 | #else | 1105 | #else |
| 1096 | static inline int dev_to_node(struct device *dev) | 1106 | static inline int dev_to_node(struct device *dev) |
| 1097 | { | 1107 | { |
| 1098 | return -1; | 1108 | return NUMA_NO_NODE; |
| 1099 | } | 1109 | } |
| 1100 | static inline void set_dev_node(struct device *dev, int node) | 1110 | static inline void set_dev_node(struct device *dev, int node) |
| 1101 | { | 1111 | { |
| @@ -1165,6 +1175,16 @@ static inline bool device_async_suspend_enabled(struct device *dev) | |||
| 1165 | return !!dev->power.async_suspend; | 1175 | return !!dev->power.async_suspend; |
| 1166 | } | 1176 | } |
| 1167 | 1177 | ||
| 1178 | static inline bool device_pm_not_required(struct device *dev) | ||
| 1179 | { | ||
| 1180 | return dev->power.no_pm; | ||
| 1181 | } | ||
| 1182 | |||
| 1183 | static inline void device_set_pm_not_required(struct device *dev) | ||
| 1184 | { | ||
| 1185 | dev->power.no_pm = true; | ||
| 1186 | } | ||
| 1187 | |||
| 1168 | static inline void dev_pm_syscore_device(struct device *dev, bool val) | 1188 | static inline void dev_pm_syscore_device(struct device *dev, bool val) |
| 1169 | { | 1189 | { |
| 1170 | #ifdef CONFIG_PM_SLEEP | 1190 | #ifdef CONFIG_PM_SLEEP |
| @@ -1382,28 +1402,28 @@ void device_link_remove(void *consumer, struct device *supplier); | |||
| 1382 | 1402 | ||
| 1383 | #ifdef CONFIG_PRINTK | 1403 | #ifdef CONFIG_PRINTK |
| 1384 | 1404 | ||
| 1385 | __printf(3, 0) | 1405 | __printf(3, 0) __cold |
| 1386 | int dev_vprintk_emit(int level, const struct device *dev, | 1406 | int dev_vprintk_emit(int level, const struct device *dev, |
| 1387 | const char *fmt, va_list args); | 1407 | const char *fmt, va_list args); |
| 1388 | __printf(3, 4) | 1408 | __printf(3, 4) __cold |
| 1389 | int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); | 1409 | int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...); |
| 1390 | 1410 | ||
| 1391 | __printf(3, 4) | 1411 | __printf(3, 4) __cold |
| 1392 | void dev_printk(const char *level, const struct device *dev, | 1412 | void dev_printk(const char *level, const struct device *dev, |
| 1393 | const char *fmt, ...); | 1413 | const char *fmt, ...); |
| 1394 | __printf(2, 3) | 1414 | __printf(2, 3) __cold |
| 1395 | void _dev_emerg(const struct device *dev, const char *fmt, ...); | 1415 | void _dev_emerg(const struct device *dev, const char *fmt, ...); |
| 1396 | __printf(2, 3) | 1416 | __printf(2, 3) __cold |
| 1397 | void _dev_alert(const struct device *dev, const char *fmt, ...); | 1417 | void _dev_alert(const struct device *dev, const char *fmt, ...); |
| 1398 | __printf(2, 3) | 1418 | __printf(2, 3) __cold |
| 1399 | void _dev_crit(const struct device *dev, const char *fmt, ...); | 1419 | void _dev_crit(const struct device *dev, const char *fmt, ...); |
| 1400 | __printf(2, 3) | 1420 | __printf(2, 3) __cold |
| 1401 | void _dev_err(const struct device *dev, const char *fmt, ...); | 1421 | void _dev_err(const struct device *dev, const char *fmt, ...); |
| 1402 | __printf(2, 3) | 1422 | __printf(2, 3) __cold |
| 1403 | void _dev_warn(const struct device *dev, const char *fmt, ...); | 1423 | void _dev_warn(const struct device *dev, const char *fmt, ...); |
| 1404 | __printf(2, 3) | 1424 | __printf(2, 3) __cold |
| 1405 | void _dev_notice(const struct device *dev, const char *fmt, ...); | 1425 | void _dev_notice(const struct device *dev, const char *fmt, ...); |
| 1406 | __printf(2, 3) | 1426 | __printf(2, 3) __cold |
| 1407 | void _dev_info(const struct device *dev, const char *fmt, ...); | 1427 | void _dev_info(const struct device *dev, const char *fmt, ...); |
| 1408 | 1428 | ||
| 1409 | #else | 1429 | #else |
| @@ -1548,7 +1568,7 @@ do { \ | |||
| 1548 | DEFAULT_RATELIMIT_INTERVAL, \ | 1568 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 1549 | DEFAULT_RATELIMIT_BURST); \ | 1569 | DEFAULT_RATELIMIT_BURST); \ |
| 1550 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 1570 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
| 1551 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ | 1571 | if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ |
| 1552 | __ratelimit(&_rs)) \ | 1572 | __ratelimit(&_rs)) \ |
| 1553 | __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \ | 1573 | __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \ |
| 1554 | ##__VA_ARGS__); \ | 1574 | ##__VA_ARGS__); \ |
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h index bc8940ca280d..c0ff417b4770 100644 --- a/include/linux/dma-fence-array.h +++ b/include/linux/dma-fence-array.h | |||
| @@ -40,6 +40,7 @@ struct dma_fence_array_cb { | |||
| 40 | * @num_fences: number of fences in the array | 40 | * @num_fences: number of fences in the array |
| 41 | * @num_pending: fences in the array still pending | 41 | * @num_pending: fences in the array still pending |
| 42 | * @fences: array of the fences | 42 | * @fences: array of the fences |
| 43 | * @work: internal irq_work function | ||
| 43 | */ | 44 | */ |
| 44 | struct dma_fence_array { | 45 | struct dma_fence_array { |
| 45 | struct dma_fence base; | 46 | struct dma_fence base; |
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h index 999e4b104410..6b788467b2e3 100644 --- a/include/linux/dma-fence.h +++ b/include/linux/dma-fence.h | |||
| @@ -77,7 +77,7 @@ struct dma_fence { | |||
| 77 | struct list_head cb_list; | 77 | struct list_head cb_list; |
| 78 | spinlock_t *lock; | 78 | spinlock_t *lock; |
| 79 | u64 context; | 79 | u64 context; |
| 80 | unsigned seqno; | 80 | u64 seqno; |
| 81 | unsigned long flags; | 81 | unsigned long flags; |
| 82 | ktime_t timestamp; | 82 | ktime_t timestamp; |
| 83 | int error; | 83 | int error; |
| @@ -244,7 +244,7 @@ struct dma_fence_ops { | |||
| 244 | }; | 244 | }; |
| 245 | 245 | ||
| 246 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, | 246 | void dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops, |
| 247 | spinlock_t *lock, u64 context, unsigned seqno); | 247 | spinlock_t *lock, u64 context, u64 seqno); |
| 248 | 248 | ||
| 249 | void dma_fence_release(struct kref *kref); | 249 | void dma_fence_release(struct kref *kref); |
| 250 | void dma_fence_free(struct dma_fence *fence); | 250 | void dma_fence_free(struct dma_fence *fence); |
| @@ -414,9 +414,17 @@ dma_fence_is_signaled(struct dma_fence *fence) | |||
| 414 | * Returns true if f1 is chronologically later than f2. Both fences must be | 414 | * Returns true if f1 is chronologically later than f2. Both fences must be |
| 415 | * from the same context, since a seqno is not common across contexts. | 415 | * from the same context, since a seqno is not common across contexts. |
| 416 | */ | 416 | */ |
| 417 | static inline bool __dma_fence_is_later(u32 f1, u32 f2) | 417 | static inline bool __dma_fence_is_later(u64 f1, u64 f2) |
| 418 | { | 418 | { |
| 419 | return (int)(f1 - f2) > 0; | 419 | /* This is for backward compatibility with drivers which can only handle |
| 420 | * 32bit sequence numbers. Use a 64bit compare when any of the higher | ||
| 421 | * bits are none zero, otherwise use a 32bit compare with wrap around | ||
| 422 | * handling. | ||
| 423 | */ | ||
| 424 | if (upper_32_bits(f1) || upper_32_bits(f2)) | ||
| 425 | return f1 > f2; | ||
| 426 | |||
| 427 | return (int)(lower_32_bits(f1) - lower_32_bits(f2)) > 0; | ||
| 420 | } | 428 | } |
| 421 | 429 | ||
| 422 | /** | 430 | /** |
| @@ -548,21 +556,21 @@ u64 dma_fence_context_alloc(unsigned num); | |||
| 548 | do { \ | 556 | do { \ |
| 549 | struct dma_fence *__ff = (f); \ | 557 | struct dma_fence *__ff = (f); \ |
| 550 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ | 558 | if (IS_ENABLED(CONFIG_DMA_FENCE_TRACE)) \ |
| 551 | pr_info("f %llu#%u: " fmt, \ | 559 | pr_info("f %llu#%llu: " fmt, \ |
| 552 | __ff->context, __ff->seqno, ##args); \ | 560 | __ff->context, __ff->seqno, ##args); \ |
| 553 | } while (0) | 561 | } while (0) |
| 554 | 562 | ||
| 555 | #define DMA_FENCE_WARN(f, fmt, args...) \ | 563 | #define DMA_FENCE_WARN(f, fmt, args...) \ |
| 556 | do { \ | 564 | do { \ |
| 557 | struct dma_fence *__ff = (f); \ | 565 | struct dma_fence *__ff = (f); \ |
| 558 | pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 566 | pr_warn("f %llu#%llu: " fmt, __ff->context, __ff->seqno,\ |
| 559 | ##args); \ | 567 | ##args); \ |
| 560 | } while (0) | 568 | } while (0) |
| 561 | 569 | ||
| 562 | #define DMA_FENCE_ERR(f, fmt, args...) \ | 570 | #define DMA_FENCE_ERR(f, fmt, args...) \ |
| 563 | do { \ | 571 | do { \ |
| 564 | struct dma_fence *__ff = (f); \ | 572 | struct dma_fence *__ff = (f); \ |
| 565 | pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \ | 573 | pr_err("f %llu#%llu: " fmt, __ff->context, __ff->seqno, \ |
| 566 | ##args); \ | 574 | ##args); \ |
| 567 | } while (0) | 575 | } while (0) |
| 568 | 576 | ||
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h index cef2127e1d70..75e60be91e5f 100644 --- a/include/linux/dma-mapping.h +++ b/include/linux/dma-mapping.h | |||
| @@ -130,6 +130,7 @@ struct dma_map_ops { | |||
| 130 | enum dma_data_direction direction); | 130 | enum dma_data_direction direction); |
| 131 | int (*dma_supported)(struct device *dev, u64 mask); | 131 | int (*dma_supported)(struct device *dev, u64 mask); |
| 132 | u64 (*get_required_mask)(struct device *dev); | 132 | u64 (*get_required_mask)(struct device *dev); |
| 133 | size_t (*max_mapping_size)(struct device *dev); | ||
| 133 | }; | 134 | }; |
| 134 | 135 | ||
| 135 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) | 136 | #define DMA_MAPPING_ERROR (~(dma_addr_t)0) |
| @@ -153,7 +154,7 @@ static inline int is_device_dma_capable(struct device *dev) | |||
| 153 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; | 154 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | 157 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
| 157 | /* | 158 | /* |
| 158 | * These three functions are only for dma allocator. | 159 | * These three functions are only for dma allocator. |
| 159 | * Don't use them in device drivers. | 160 | * Don't use them in device drivers. |
| @@ -192,7 +193,7 @@ static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma, | |||
| 192 | { | 193 | { |
| 193 | return 0; | 194 | return 0; |
| 194 | } | 195 | } |
| 195 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | 196 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ |
| 196 | 197 | ||
| 197 | static inline bool dma_is_direct(const struct dma_map_ops *ops) | 198 | static inline bool dma_is_direct(const struct dma_map_ops *ops) |
| 198 | { | 199 | { |
| @@ -208,6 +209,8 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, | |||
| 208 | unsigned long attrs); | 209 | unsigned long attrs); |
| 209 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, | 210 | int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, |
| 210 | enum dma_data_direction dir, unsigned long attrs); | 211 | enum dma_data_direction dir, unsigned long attrs); |
| 212 | dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, | ||
| 213 | size_t size, enum dma_data_direction dir, unsigned long attrs); | ||
| 211 | 214 | ||
| 212 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ | 215 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 213 | defined(CONFIG_SWIOTLB) | 216 | defined(CONFIG_SWIOTLB) |
| @@ -257,6 +260,8 @@ static inline void dma_direct_sync_sg_for_cpu(struct device *dev, | |||
| 257 | } | 260 | } |
| 258 | #endif | 261 | #endif |
| 259 | 262 | ||
| 263 | size_t dma_direct_max_mapping_size(struct device *dev); | ||
| 264 | |||
| 260 | #ifdef CONFIG_HAS_DMA | 265 | #ifdef CONFIG_HAS_DMA |
| 261 | #include <asm/dma-mapping.h> | 266 | #include <asm/dma-mapping.h> |
| 262 | 267 | ||
| @@ -346,19 +351,20 @@ static inline dma_addr_t dma_map_resource(struct device *dev, | |||
| 346 | unsigned long attrs) | 351 | unsigned long attrs) |
| 347 | { | 352 | { |
| 348 | const struct dma_map_ops *ops = get_dma_ops(dev); | 353 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 349 | dma_addr_t addr; | 354 | dma_addr_t addr = DMA_MAPPING_ERROR; |
| 350 | 355 | ||
| 351 | BUG_ON(!valid_dma_direction(dir)); | 356 | BUG_ON(!valid_dma_direction(dir)); |
| 352 | 357 | ||
| 353 | /* Don't allow RAM to be mapped */ | 358 | /* Don't allow RAM to be mapped */ |
| 354 | BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); | 359 | if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) |
| 360 | return DMA_MAPPING_ERROR; | ||
| 355 | 361 | ||
| 356 | addr = phys_addr; | 362 | if (dma_is_direct(ops)) |
| 357 | if (ops && ops->map_resource) | 363 | addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs); |
| 364 | else if (ops->map_resource) | ||
| 358 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); | 365 | addr = ops->map_resource(dev, phys_addr, size, dir, attrs); |
| 359 | 366 | ||
| 360 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); | 367 | debug_dma_map_resource(dev, phys_addr, size, dir, addr); |
| 361 | |||
| 362 | return addr; | 368 | return addr; |
| 363 | } | 369 | } |
| 364 | 370 | ||
| @@ -369,7 +375,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, | |||
| 369 | const struct dma_map_ops *ops = get_dma_ops(dev); | 375 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 370 | 376 | ||
| 371 | BUG_ON(!valid_dma_direction(dir)); | 377 | BUG_ON(!valid_dma_direction(dir)); |
| 372 | if (ops && ops->unmap_resource) | 378 | if (!dma_is_direct(ops) && ops->unmap_resource) |
| 373 | ops->unmap_resource(dev, addr, size, dir, attrs); | 379 | ops->unmap_resource(dev, addr, size, dir, attrs); |
| 374 | debug_dma_unmap_resource(dev, addr, size, dir); | 380 | debug_dma_unmap_resource(dev, addr, size, dir); |
| 375 | } | 381 | } |
| @@ -460,6 +466,7 @@ int dma_supported(struct device *dev, u64 mask); | |||
| 460 | int dma_set_mask(struct device *dev, u64 mask); | 466 | int dma_set_mask(struct device *dev, u64 mask); |
| 461 | int dma_set_coherent_mask(struct device *dev, u64 mask); | 467 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
| 462 | u64 dma_get_required_mask(struct device *dev); | 468 | u64 dma_get_required_mask(struct device *dev); |
| 469 | size_t dma_max_mapping_size(struct device *dev); | ||
| 463 | #else /* CONFIG_HAS_DMA */ | 470 | #else /* CONFIG_HAS_DMA */ |
| 464 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, | 471 | static inline dma_addr_t dma_map_page_attrs(struct device *dev, |
| 465 | struct page *page, size_t offset, size_t size, | 472 | struct page *page, size_t offset, size_t size, |
| @@ -561,6 +568,10 @@ static inline u64 dma_get_required_mask(struct device *dev) | |||
| 561 | { | 568 | { |
| 562 | return 0; | 569 | return 0; |
| 563 | } | 570 | } |
| 571 | static inline size_t dma_max_mapping_size(struct device *dev) | ||
| 572 | { | ||
| 573 | return 0; | ||
| 574 | } | ||
| 564 | #endif /* CONFIG_HAS_DMA */ | 575 | #endif /* CONFIG_HAS_DMA */ |
| 565 | 576 | ||
| 566 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, | 577 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
| @@ -668,15 +679,23 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) | |||
| 668 | return dma_set_mask_and_coherent(dev, mask); | 679 | return dma_set_mask_and_coherent(dev, mask); |
| 669 | } | 680 | } |
| 670 | 681 | ||
| 671 | #ifndef arch_setup_dma_ops | 682 | #ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS |
| 683 | void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, | ||
| 684 | const struct iommu_ops *iommu, bool coherent); | ||
| 685 | #else | ||
| 672 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, | 686 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
| 673 | u64 size, const struct iommu_ops *iommu, | 687 | u64 size, const struct iommu_ops *iommu, bool coherent) |
| 674 | bool coherent) { } | 688 | { |
| 675 | #endif | 689 | } |
| 690 | #endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */ | ||
| 676 | 691 | ||
| 677 | #ifndef arch_teardown_dma_ops | 692 | #ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS |
| 678 | static inline void arch_teardown_dma_ops(struct device *dev) { } | 693 | void arch_teardown_dma_ops(struct device *dev); |
| 679 | #endif | 694 | #else |
| 695 | static inline void arch_teardown_dma_ops(struct device *dev) | ||
| 696 | { | ||
| 697 | } | ||
| 698 | #endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */ | ||
| 680 | 699 | ||
| 681 | static inline unsigned int dma_get_max_seg_size(struct device *dev) | 700 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
| 682 | { | 701 | { |
| @@ -717,15 +736,6 @@ static inline unsigned long dma_max_pfn(struct device *dev) | |||
| 717 | } | 736 | } |
| 718 | #endif | 737 | #endif |
| 719 | 738 | ||
| 720 | /* | ||
| 721 | * Please always use dma_alloc_coherent instead as it already zeroes the memory! | ||
| 722 | */ | ||
| 723 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, | ||
| 724 | dma_addr_t *dma_handle, gfp_t flag) | ||
| 725 | { | ||
| 726 | return dma_alloc_coherent(dev, size, dma_handle, flag); | ||
| 727 | } | ||
| 728 | |||
| 729 | static inline int dma_get_cache_alignment(void) | 739 | static inline int dma_get_cache_alignment(void) |
| 730 | { | 740 | { |
| 731 | #ifdef ARCH_DMA_MINALIGN | 741 | #ifdef ARCH_DMA_MINALIGN |
| @@ -734,19 +744,14 @@ static inline int dma_get_cache_alignment(void) | |||
| 734 | return 1; | 744 | return 1; |
| 735 | } | 745 | } |
| 736 | 746 | ||
| 737 | /* flags for the coherent memory api */ | 747 | #ifdef CONFIG_DMA_DECLARE_COHERENT |
| 738 | #define DMA_MEMORY_EXCLUSIVE 0x01 | ||
| 739 | |||
| 740 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT | ||
| 741 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | 748 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 742 | dma_addr_t device_addr, size_t size, int flags); | 749 | dma_addr_t device_addr, size_t size); |
| 743 | void dma_release_declared_memory(struct device *dev); | 750 | void dma_release_declared_memory(struct device *dev); |
| 744 | void *dma_mark_declared_memory_occupied(struct device *dev, | ||
| 745 | dma_addr_t device_addr, size_t size); | ||
| 746 | #else | 751 | #else |
| 747 | static inline int | 752 | static inline int |
| 748 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, | 753 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 749 | dma_addr_t device_addr, size_t size, int flags) | 754 | dma_addr_t device_addr, size_t size) |
| 750 | { | 755 | { |
| 751 | return -ENOSYS; | 756 | return -ENOSYS; |
| 752 | } | 757 | } |
| @@ -755,14 +760,7 @@ static inline void | |||
| 755 | dma_release_declared_memory(struct device *dev) | 760 | dma_release_declared_memory(struct device *dev) |
| 756 | { | 761 | { |
| 757 | } | 762 | } |
| 758 | 763 | #endif /* CONFIG_DMA_DECLARE_COHERENT */ | |
| 759 | static inline void * | ||
| 760 | dma_mark_declared_memory_occupied(struct device *dev, | ||
| 761 | dma_addr_t device_addr, size_t size) | ||
| 762 | { | ||
| 763 | return ERR_PTR(-EBUSY); | ||
| 764 | } | ||
| 765 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ | ||
| 766 | 764 | ||
| 767 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, | 765 | static inline void *dmam_alloc_coherent(struct device *dev, size_t size, |
| 768 | dma_addr_t *dma_handle, gfp_t gfp) | 766 | dma_addr_t *dma_handle, gfp_t gfp) |
diff --git a/include/linux/dma/dw.h b/include/linux/dma/dw.h index e166cac8e870..9752f3745f76 100644 --- a/include/linux/dma/dw.h +++ b/include/linux/dma/dw.h | |||
| @@ -1,13 +1,10 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2007 Atmel Corporation | 5 | * Copyright (C) 2007 Atmel Corporation |
| 5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
| 6 | * Copyright (C) 2014 Intel Corporation | 7 | * Copyright (C) 2014 Intel Corporation |
| 7 | * | ||
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License version 2 as | ||
| 10 | * published by the Free Software Foundation. | ||
| 11 | */ | 8 | */ |
| 12 | #ifndef _DMA_DW_H | 9 | #ifndef _DMA_DW_H |
| 13 | #define _DMA_DW_H | 10 | #define _DMA_DW_H |
| @@ -45,9 +42,13 @@ struct dw_dma_chip { | |||
| 45 | #if IS_ENABLED(CONFIG_DW_DMAC_CORE) | 42 | #if IS_ENABLED(CONFIG_DW_DMAC_CORE) |
| 46 | int dw_dma_probe(struct dw_dma_chip *chip); | 43 | int dw_dma_probe(struct dw_dma_chip *chip); |
| 47 | int dw_dma_remove(struct dw_dma_chip *chip); | 44 | int dw_dma_remove(struct dw_dma_chip *chip); |
| 45 | int idma32_dma_probe(struct dw_dma_chip *chip); | ||
| 46 | int idma32_dma_remove(struct dw_dma_chip *chip); | ||
| 48 | #else | 47 | #else |
| 49 | static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } | 48 | static inline int dw_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } |
| 50 | static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } | 49 | static inline int dw_dma_remove(struct dw_dma_chip *chip) { return 0; } |
| 50 | static inline int idma32_dma_probe(struct dw_dma_chip *chip) { return -ENODEV; } | ||
| 51 | static inline int idma32_dma_remove(struct dw_dma_chip *chip) { return 0; } | ||
| 51 | #endif /* CONFIG_DW_DMAC_CORE */ | 52 | #endif /* CONFIG_DW_DMAC_CORE */ |
| 52 | 53 | ||
| 53 | #endif /* _DMA_DW_H */ | 54 | #endif /* _DMA_DW_H */ |
diff --git a/include/linux/dynamic_debug.h b/include/linux/dynamic_debug.h index b3419da1a776..c2be029b9b53 100644 --- a/include/linux/dynamic_debug.h +++ b/include/linux/dynamic_debug.h | |||
| @@ -47,10 +47,10 @@ struct _ddebug { | |||
| 47 | } __attribute__((aligned(8))); | 47 | } __attribute__((aligned(8))); |
| 48 | 48 | ||
| 49 | 49 | ||
| 50 | int ddebug_add_module(struct _ddebug *tab, unsigned int n, | ||
| 51 | const char *modname); | ||
| 52 | 50 | ||
| 53 | #if defined(CONFIG_DYNAMIC_DEBUG) | 51 | #if defined(CONFIG_DYNAMIC_DEBUG) |
| 52 | int ddebug_add_module(struct _ddebug *tab, unsigned int n, | ||
| 53 | const char *modname); | ||
| 54 | extern int ddebug_remove_module(const char *mod_name); | 54 | extern int ddebug_remove_module(const char *mod_name); |
| 55 | extern __printf(2, 3) | 55 | extern __printf(2, 3) |
| 56 | void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); | 56 | void __dynamic_pr_debug(struct _ddebug *descriptor, const char *fmt, ...); |
| @@ -71,7 +71,7 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
| 71 | const struct net_device *dev, | 71 | const struct net_device *dev, |
| 72 | const char *fmt, ...); | 72 | const char *fmt, ...); |
| 73 | 73 | ||
| 74 | #define DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, key, init) \ | 74 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ |
| 75 | static struct _ddebug __aligned(8) \ | 75 | static struct _ddebug __aligned(8) \ |
| 76 | __attribute__((section("__verbose"))) name = { \ | 76 | __attribute__((section("__verbose"))) name = { \ |
| 77 | .modname = KBUILD_MODNAME, \ | 77 | .modname = KBUILD_MODNAME, \ |
| @@ -80,35 +80,27 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
| 80 | .format = (fmt), \ | 80 | .format = (fmt), \ |
| 81 | .lineno = __LINE__, \ | 81 | .lineno = __LINE__, \ |
| 82 | .flags = _DPRINTK_FLAGS_DEFAULT, \ | 82 | .flags = _DPRINTK_FLAGS_DEFAULT, \ |
| 83 | dd_key_init(key, init) \ | 83 | _DPRINTK_KEY_INIT \ |
| 84 | } | 84 | } |
| 85 | 85 | ||
| 86 | #ifdef CONFIG_JUMP_LABEL | 86 | #ifdef CONFIG_JUMP_LABEL |
| 87 | 87 | ||
| 88 | #define dd_key_init(key, init) key = (init) | ||
| 89 | |||
| 90 | #ifdef DEBUG | 88 | #ifdef DEBUG |
| 91 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | 89 | |
| 92 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_true, \ | 90 | #define _DPRINTK_KEY_INIT .key.dd_key_true = (STATIC_KEY_TRUE_INIT) |
| 93 | (STATIC_KEY_TRUE_INIT)) | ||
| 94 | 91 | ||
| 95 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | 92 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ |
| 96 | static_branch_likely(&descriptor.key.dd_key_true) | 93 | static_branch_likely(&descriptor.key.dd_key_true) |
| 97 | #else | 94 | #else |
| 98 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | 95 | #define _DPRINTK_KEY_INIT .key.dd_key_false = (STATIC_KEY_FALSE_INIT) |
| 99 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, .key.dd_key_false, \ | ||
| 100 | (STATIC_KEY_FALSE_INIT)) | ||
| 101 | 96 | ||
| 102 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | 97 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ |
| 103 | static_branch_unlikely(&descriptor.key.dd_key_false) | 98 | static_branch_unlikely(&descriptor.key.dd_key_false) |
| 104 | #endif | 99 | #endif |
| 105 | 100 | ||
| 106 | #else | 101 | #else /* !HAVE_JUMP_LABEL */ |
| 107 | 102 | ||
| 108 | #define dd_key_init(key, init) | 103 | #define _DPRINTK_KEY_INIT |
| 109 | |||
| 110 | #define DEFINE_DYNAMIC_DEBUG_METADATA(name, fmt) \ | ||
| 111 | DEFINE_DYNAMIC_DEBUG_METADATA_KEY(name, fmt, 0, 0) | ||
| 112 | 104 | ||
| 113 | #ifdef DEBUG | 105 | #ifdef DEBUG |
| 114 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ | 106 | #define DYNAMIC_DEBUG_BRANCH(descriptor) \ |
| @@ -120,46 +112,66 @@ void __dynamic_netdev_dbg(struct _ddebug *descriptor, | |||
| 120 | 112 | ||
| 121 | #endif | 113 | #endif |
| 122 | 114 | ||
| 123 | #define dynamic_pr_debug(fmt, ...) \ | 115 | #define __dynamic_func_call(id, fmt, func, ...) do { \ |
| 124 | do { \ | 116 | DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ |
| 125 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 117 | if (DYNAMIC_DEBUG_BRANCH(id)) \ |
| 126 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ | 118 | func(&id, ##__VA_ARGS__); \ |
| 127 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ | ||
| 128 | ##__VA_ARGS__); \ | ||
| 129 | } while (0) | 119 | } while (0) |
| 130 | 120 | ||
| 131 | #define dynamic_dev_dbg(dev, fmt, ...) \ | 121 | #define __dynamic_func_call_no_desc(id, fmt, func, ...) do { \ |
| 132 | do { \ | 122 | DEFINE_DYNAMIC_DEBUG_METADATA(id, fmt); \ |
| 133 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 123 | if (DYNAMIC_DEBUG_BRANCH(id)) \ |
| 134 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ | 124 | func(__VA_ARGS__); \ |
| 135 | __dynamic_dev_dbg(&descriptor, dev, fmt, \ | ||
| 136 | ##__VA_ARGS__); \ | ||
| 137 | } while (0) | 125 | } while (0) |
| 138 | 126 | ||
| 127 | /* | ||
| 128 | * "Factory macro" for generating a call to func, guarded by a | ||
| 129 | * DYNAMIC_DEBUG_BRANCH. The dynamic debug decriptor will be | ||
| 130 | * initialized using the fmt argument. The function will be called with | ||
| 131 | * the address of the descriptor as first argument, followed by all | ||
| 132 | * the varargs. Note that fmt is repeated in invocations of this | ||
| 133 | * macro. | ||
| 134 | */ | ||
| 135 | #define _dynamic_func_call(fmt, func, ...) \ | ||
| 136 | __dynamic_func_call(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) | ||
| 137 | /* | ||
| 138 | * A variant that does the same, except that the descriptor is not | ||
| 139 | * passed as the first argument to the function; it is only called | ||
| 140 | * with precisely the macro's varargs. | ||
| 141 | */ | ||
| 142 | #define _dynamic_func_call_no_desc(fmt, func, ...) \ | ||
| 143 | __dynamic_func_call_no_desc(__UNIQUE_ID(ddebug), fmt, func, ##__VA_ARGS__) | ||
| 144 | |||
| 145 | #define dynamic_pr_debug(fmt, ...) \ | ||
| 146 | _dynamic_func_call(fmt, __dynamic_pr_debug, \ | ||
| 147 | pr_fmt(fmt), ##__VA_ARGS__) | ||
| 148 | |||
| 149 | #define dynamic_dev_dbg(dev, fmt, ...) \ | ||
| 150 | _dynamic_func_call(fmt,__dynamic_dev_dbg, \ | ||
| 151 | dev, fmt, ##__VA_ARGS__) | ||
| 152 | |||
| 139 | #define dynamic_netdev_dbg(dev, fmt, ...) \ | 153 | #define dynamic_netdev_dbg(dev, fmt, ...) \ |
| 140 | do { \ | 154 | _dynamic_func_call(fmt, __dynamic_netdev_dbg, \ |
| 141 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 155 | dev, fmt, ##__VA_ARGS__) |
| 142 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ | ||
| 143 | __dynamic_netdev_dbg(&descriptor, dev, fmt, \ | ||
| 144 | ##__VA_ARGS__); \ | ||
| 145 | } while (0) | ||
| 146 | 156 | ||
| 147 | #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ | 157 | #define dynamic_hex_dump(prefix_str, prefix_type, rowsize, \ |
| 148 | groupsize, buf, len, ascii) \ | 158 | groupsize, buf, len, ascii) \ |
| 149 | do { \ | 159 | _dynamic_func_call_no_desc(__builtin_constant_p(prefix_str) ? prefix_str : "hexdump", \ |
| 150 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, \ | 160 | print_hex_dump, \ |
| 151 | __builtin_constant_p(prefix_str) ? prefix_str : "hexdump");\ | 161 | KERN_DEBUG, prefix_str, prefix_type, \ |
| 152 | if (DYNAMIC_DEBUG_BRANCH(descriptor)) \ | 162 | rowsize, groupsize, buf, len, ascii) |
| 153 | print_hex_dump(KERN_DEBUG, prefix_str, \ | ||
| 154 | prefix_type, rowsize, groupsize, \ | ||
| 155 | buf, len, ascii); \ | ||
| 156 | } while (0) | ||
| 157 | 163 | ||
| 158 | #else | 164 | #else |
| 159 | 165 | ||
| 160 | #include <linux/string.h> | 166 | #include <linux/string.h> |
| 161 | #include <linux/errno.h> | 167 | #include <linux/errno.h> |
| 162 | 168 | ||
| 169 | static inline int ddebug_add_module(struct _ddebug *tab, unsigned int n, | ||
| 170 | const char *modname) | ||
| 171 | { | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | |||
| 163 | static inline int ddebug_remove_module(const char *mod) | 175 | static inline int ddebug_remove_module(const char *mod) |
| 164 | { | 176 | { |
| 165 | return 0; | 177 | return 0; |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 45ff763fba76..6ebc2098cfe1 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -48,7 +48,20 @@ typedef u16 efi_char16_t; /* UNICODE character */ | |||
| 48 | typedef u64 efi_physical_addr_t; | 48 | typedef u64 efi_physical_addr_t; |
| 49 | typedef void *efi_handle_t; | 49 | typedef void *efi_handle_t; |
| 50 | 50 | ||
| 51 | typedef guid_t efi_guid_t; | 51 | /* |
| 52 | * The UEFI spec and EDK2 reference implementation both define EFI_GUID as | ||
| 53 | * struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment | ||
| 54 | * is 32 bits not 8 bits like our guid_t. In some cases (i.e., on 32-bit ARM), | ||
| 55 | * this means that firmware services invoked by the kernel may assume that | ||
| 56 | * efi_guid_t* arguments are 32-bit aligned, and use memory accessors that | ||
| 57 | * do not tolerate misalignment. So let's set the minimum alignment to 32 bits. | ||
| 58 | * | ||
| 59 | * Note that the UEFI spec as well as some comments in the EDK2 code base | ||
| 60 | * suggest that EFI_GUID should be 64-bit aligned, but this appears to be | ||
| 61 | * a mistake, given that no code seems to exist that actually enforces that | ||
| 62 | * or relies on it. | ||
| 63 | */ | ||
| 64 | typedef guid_t efi_guid_t __aligned(__alignof__(u32)); | ||
| 52 | 65 | ||
| 53 | #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ | 66 | #define EFI_GUID(a,b,c,d0,d1,d2,d3,d4,d5,d6,d7) \ |
| 54 | GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) | 67 | GUID_INIT(a, b, c, d0, d1, d2, d3, d4, d5, d6, d7) |
| @@ -1198,8 +1211,6 @@ static inline bool efi_enabled(int feature) | |||
| 1198 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); | 1211 | extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused); |
| 1199 | 1212 | ||
| 1200 | extern bool efi_is_table_address(unsigned long phys_addr); | 1213 | extern bool efi_is_table_address(unsigned long phys_addr); |
| 1201 | |||
| 1202 | extern int efi_apply_persistent_mem_reservations(void); | ||
| 1203 | #else | 1214 | #else |
| 1204 | static inline bool efi_enabled(int feature) | 1215 | static inline bool efi_enabled(int feature) |
| 1205 | { | 1216 | { |
| @@ -1218,11 +1229,6 @@ static inline bool efi_is_table_address(unsigned long phys_addr) | |||
| 1218 | { | 1229 | { |
| 1219 | return false; | 1230 | return false; |
| 1220 | } | 1231 | } |
| 1221 | |||
| 1222 | static inline int efi_apply_persistent_mem_reservations(void) | ||
| 1223 | { | ||
| 1224 | return 0; | ||
| 1225 | } | ||
| 1226 | #endif | 1232 | #endif |
| 1227 | 1233 | ||
| 1228 | extern int efi_status_to_err(efi_status_t status); | 1234 | extern int efi_status_to_err(efi_status_t status); |
| @@ -1605,8 +1611,14 @@ efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg, | |||
| 1605 | struct screen_info *si, efi_guid_t *proto, | 1611 | struct screen_info *si, efi_guid_t *proto, |
| 1606 | unsigned long size); | 1612 | unsigned long size); |
| 1607 | 1613 | ||
| 1608 | bool efi_runtime_disabled(void); | 1614 | #ifdef CONFIG_EFI |
| 1615 | extern bool efi_runtime_disabled(void); | ||
| 1616 | #else | ||
| 1617 | static inline bool efi_runtime_disabled(void) { return true; } | ||
| 1618 | #endif | ||
| 1619 | |||
| 1609 | extern void efi_call_virt_check_flags(unsigned long flags, const char *call); | 1620 | extern void efi_call_virt_check_flags(unsigned long flags, const char *call); |
| 1621 | extern unsigned long efi_call_virt_save_flags(void); | ||
| 1610 | 1622 | ||
| 1611 | enum efi_secureboot_mode { | 1623 | enum efi_secureboot_mode { |
| 1612 | efi_secureboot_mode_unset, | 1624 | efi_secureboot_mode_unset, |
| @@ -1652,7 +1664,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); | |||
| 1652 | \ | 1664 | \ |
| 1653 | arch_efi_call_virt_setup(); \ | 1665 | arch_efi_call_virt_setup(); \ |
| 1654 | \ | 1666 | \ |
| 1655 | local_save_flags(__flags); \ | 1667 | __flags = efi_call_virt_save_flags(); \ |
| 1656 | __s = arch_efi_call_virt(p, f, args); \ | 1668 | __s = arch_efi_call_virt(p, f, args); \ |
| 1657 | efi_call_virt_check_flags(__flags, __stringify(f)); \ | 1669 | efi_call_virt_check_flags(__flags, __stringify(f)); \ |
| 1658 | \ | 1670 | \ |
| @@ -1667,7 +1679,7 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table); | |||
| 1667 | \ | 1679 | \ |
| 1668 | arch_efi_call_virt_setup(); \ | 1680 | arch_efi_call_virt_setup(); \ |
| 1669 | \ | 1681 | \ |
| 1670 | local_save_flags(__flags); \ | 1682 | __flags = efi_call_virt_save_flags(); \ |
| 1671 | arch_efi_call_virt(p, f, args); \ | 1683 | arch_efi_call_virt(p, f, args); \ |
| 1672 | efi_call_virt_check_flags(__flags, __stringify(f)); \ | 1684 | efi_call_virt_check_flags(__flags, __stringify(f)); \ |
| 1673 | \ | 1685 | \ |
| @@ -1706,19 +1718,19 @@ extern int efi_tpm_eventlog_init(void); | |||
| 1706 | * fault happened while executing an efi runtime service. | 1718 | * fault happened while executing an efi runtime service. |
| 1707 | */ | 1719 | */ |
| 1708 | enum efi_rts_ids { | 1720 | enum efi_rts_ids { |
| 1709 | NONE, | 1721 | EFI_NONE, |
| 1710 | GET_TIME, | 1722 | EFI_GET_TIME, |
| 1711 | SET_TIME, | 1723 | EFI_SET_TIME, |
| 1712 | GET_WAKEUP_TIME, | 1724 | EFI_GET_WAKEUP_TIME, |
| 1713 | SET_WAKEUP_TIME, | 1725 | EFI_SET_WAKEUP_TIME, |
| 1714 | GET_VARIABLE, | 1726 | EFI_GET_VARIABLE, |
| 1715 | GET_NEXT_VARIABLE, | 1727 | EFI_GET_NEXT_VARIABLE, |
| 1716 | SET_VARIABLE, | 1728 | EFI_SET_VARIABLE, |
| 1717 | QUERY_VARIABLE_INFO, | 1729 | EFI_QUERY_VARIABLE_INFO, |
| 1718 | GET_NEXT_HIGH_MONO_COUNT, | 1730 | EFI_GET_NEXT_HIGH_MONO_COUNT, |
| 1719 | RESET_SYSTEM, | 1731 | EFI_RESET_SYSTEM, |
| 1720 | UPDATE_CAPSULE, | 1732 | EFI_UPDATE_CAPSULE, |
| 1721 | QUERY_CAPSULE_CAPS, | 1733 | EFI_QUERY_CAPSULE_CAPS, |
| 1722 | }; | 1734 | }; |
| 1723 | 1735 | ||
| 1724 | /* | 1736 | /* |
diff --git a/include/linux/elevator.h b/include/linux/elevator.h index 2e9e2763bf47..6e8bc53740f0 100644 --- a/include/linux/elevator.h +++ b/include/linux/elevator.h | |||
| @@ -31,6 +31,7 @@ struct elevator_mq_ops { | |||
| 31 | void (*exit_sched)(struct elevator_queue *); | 31 | void (*exit_sched)(struct elevator_queue *); |
| 32 | int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); | 32 | int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
| 33 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); | 33 | void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int); |
| 34 | void (*depth_updated)(struct blk_mq_hw_ctx *); | ||
| 34 | 35 | ||
| 35 | bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); | 36 | bool (*allow_merge)(struct request_queue *, struct request *, struct bio *); |
| 36 | bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); | 37 | bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *); |
diff --git a/include/linux/errno.h b/include/linux/errno.h index 3cba627577d6..d73f597a2484 100644 --- a/include/linux/errno.h +++ b/include/linux/errno.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ | 18 | #define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ |
| 19 | #define EPROBE_DEFER 517 /* Driver requests probe retry */ | 19 | #define EPROBE_DEFER 517 /* Driver requests probe retry */ |
| 20 | #define EOPENSTALE 518 /* open found a stale dentry */ | 20 | #define EOPENSTALE 518 /* open found a stale dentry */ |
| 21 | #define ENOPARAM 519 /* Parameter not supported */ | ||
| 21 | 22 | ||
| 22 | /* Defined for the NFSv3 protocol */ | 23 | /* Defined for the NFSv3 protocol */ |
| 23 | #define EBADHANDLE 521 /* Illegal NFS file handle */ | 24 | #define EBADHANDLE 521 /* Illegal NFS file handle */ |
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h index 2c0af7b00715..aa8bfd6f738c 100644 --- a/include/linux/etherdevice.h +++ b/include/linux/etherdevice.h | |||
| @@ -44,6 +44,7 @@ int eth_header_cache(const struct neighbour *neigh, struct hh_cache *hh, | |||
| 44 | __be16 type); | 44 | __be16 type); |
| 45 | void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, | 45 | void eth_header_cache_update(struct hh_cache *hh, const struct net_device *dev, |
| 46 | const unsigned char *haddr); | 46 | const unsigned char *haddr); |
| 47 | __be16 eth_header_parse_protocol(const struct sk_buff *skb); | ||
| 47 | int eth_prepare_mac_addr_change(struct net_device *dev, void *p); | 48 | int eth_prepare_mac_addr_change(struct net_device *dev, void *p); |
| 48 | void eth_commit_mac_addr_change(struct net_device *dev, void *p); | 49 | void eth_commit_mac_addr_change(struct net_device *dev, void *p); |
| 49 | int eth_mac_addr(struct net_device *dev, void *p); | 50 | int eth_mac_addr(struct net_device *dev, void *p); |
| @@ -448,6 +449,18 @@ static inline void eth_addr_dec(u8 *addr) | |||
| 448 | } | 449 | } |
| 449 | 450 | ||
| 450 | /** | 451 | /** |
| 452 | * eth_addr_inc() - Increment the given MAC address. | ||
| 453 | * @addr: Pointer to a six-byte array containing Ethernet address to increment. | ||
| 454 | */ | ||
| 455 | static inline void eth_addr_inc(u8 *addr) | ||
| 456 | { | ||
| 457 | u64 u = ether_addr_to_u64(addr); | ||
| 458 | |||
| 459 | u++; | ||
| 460 | u64_to_ether_addr(u, addr); | ||
| 461 | } | ||
| 462 | |||
| 463 | /** | ||
| 451 | * is_etherdev_addr - Tell if given Ethernet address belongs to the device. | 464 | * is_etherdev_addr - Tell if given Ethernet address belongs to the device. |
| 452 | * @dev: Pointer to a device structure | 465 | * @dev: Pointer to a device structure |
| 453 | * @addr: Pointer to a six-byte array containing the Ethernet address | 466 | * @addr: Pointer to a six-byte array containing the Ethernet address |
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h index afd9596ce636..e6ebc9761822 100644 --- a/include/linux/ethtool.h +++ b/include/linux/ethtool.h | |||
| @@ -98,10 +98,6 @@ static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) | |||
| 98 | return index % n_rx_rings; | 98 | return index % n_rx_rings; |
| 99 | } | 99 | } |
| 100 | 100 | ||
| 101 | /* number of link mode bits/ulongs handled internally by kernel */ | ||
| 102 | #define __ETHTOOL_LINK_MODE_MASK_NBITS \ | ||
| 103 | (__ETHTOOL_LINK_MODE_LAST + 1) | ||
| 104 | |||
| 105 | /* declare a link mode bitmap */ | 101 | /* declare a link mode bitmap */ |
| 106 | #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ | 102 | #define __ETHTOOL_DECLARE_LINK_MODE_MASK(name) \ |
| 107 | DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) | 103 | DECLARE_BITMAP(name, __ETHTOOL_LINK_MODE_MASK_NBITS) |
| @@ -400,4 +396,19 @@ struct ethtool_ops { | |||
| 400 | void (*get_ethtool_phy_stats)(struct net_device *, | 396 | void (*get_ethtool_phy_stats)(struct net_device *, |
| 401 | struct ethtool_stats *, u64 *); | 397 | struct ethtool_stats *, u64 *); |
| 402 | }; | 398 | }; |
| 399 | |||
| 400 | struct ethtool_rx_flow_rule { | ||
| 401 | struct flow_rule *rule; | ||
| 402 | unsigned long priv[0]; | ||
| 403 | }; | ||
| 404 | |||
| 405 | struct ethtool_rx_flow_spec_input { | ||
| 406 | const struct ethtool_rx_flow_spec *fs; | ||
| 407 | u32 rss_ctx; | ||
| 408 | }; | ||
| 409 | |||
| 410 | struct ethtool_rx_flow_rule * | ||
| 411 | ethtool_rx_flow_rule_create(const struct ethtool_rx_flow_spec_input *input); | ||
| 412 | void ethtool_rx_flow_rule_destroy(struct ethtool_rx_flow_rule *rule); | ||
| 413 | |||
| 403 | #endif /* _LINUX_ETHTOOL_H */ | 414 | #endif /* _LINUX_ETHTOOL_H */ |
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h index d7711048ef93..f5740423b002 100644 --- a/include/linux/f2fs_fs.h +++ b/include/linux/f2fs_fs.h | |||
| @@ -116,6 +116,7 @@ struct f2fs_super_block { | |||
| 116 | /* | 116 | /* |
| 117 | * For checkpoint | 117 | * For checkpoint |
| 118 | */ | 118 | */ |
| 119 | #define CP_DISABLED_QUICK_FLAG 0x00002000 | ||
| 119 | #define CP_DISABLED_FLAG 0x00001000 | 120 | #define CP_DISABLED_FLAG 0x00001000 |
| 120 | #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 | 121 | #define CP_QUOTA_NEED_FSCK_FLAG 0x00000800 |
| 121 | #define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 | 122 | #define CP_LARGE_NAT_BITMAP_FLAG 0x00000400 |
| @@ -186,7 +187,7 @@ struct f2fs_orphan_block { | |||
| 186 | struct f2fs_extent { | 187 | struct f2fs_extent { |
| 187 | __le32 fofs; /* start file offset of the extent */ | 188 | __le32 fofs; /* start file offset of the extent */ |
| 188 | __le32 blk; /* start block address of the extent */ | 189 | __le32 blk; /* start block address of the extent */ |
| 189 | __le32 len; /* lengh of the extent */ | 190 | __le32 len; /* length of the extent */ |
| 190 | } __packed; | 191 | } __packed; |
| 191 | 192 | ||
| 192 | #define F2FS_NAME_LEN 255 | 193 | #define F2FS_NAME_LEN 255 |
| @@ -284,7 +285,7 @@ enum { | |||
| 284 | 285 | ||
| 285 | struct node_footer { | 286 | struct node_footer { |
| 286 | __le32 nid; /* node id */ | 287 | __le32 nid; /* node id */ |
| 287 | __le32 ino; /* inode nunmber */ | 288 | __le32 ino; /* inode number */ |
| 288 | __le32 flag; /* include cold/fsync/dentry marks and offset */ | 289 | __le32 flag; /* include cold/fsync/dentry marks and offset */ |
| 289 | __le64 cp_ver; /* checkpoint version */ | 290 | __le64 cp_ver; /* checkpoint version */ |
| 290 | __le32 next_blkaddr; /* next node page block address */ | 291 | __le32 next_blkaddr; /* next node page block address */ |
| @@ -489,12 +490,12 @@ typedef __le32 f2fs_hash_t; | |||
| 489 | 490 | ||
| 490 | /* | 491 | /* |
| 491 | * space utilization of regular dentry and inline dentry (w/o extra reservation) | 492 | * space utilization of regular dentry and inline dentry (w/o extra reservation) |
| 492 | * regular dentry inline dentry | 493 | * regular dentry inline dentry (def) inline dentry (min) |
| 493 | * bitmap 1 * 27 = 27 1 * 23 = 23 | 494 | * bitmap 1 * 27 = 27 1 * 23 = 23 1 * 1 = 1 |
| 494 | * reserved 1 * 3 = 3 1 * 7 = 7 | 495 | * reserved 1 * 3 = 3 1 * 7 = 7 1 * 1 = 1 |
| 495 | * dentry 11 * 214 = 2354 11 * 182 = 2002 | 496 | * dentry 11 * 214 = 2354 11 * 182 = 2002 11 * 2 = 22 |
| 496 | * filename 8 * 214 = 1712 8 * 182 = 1456 | 497 | * filename 8 * 214 = 1712 8 * 182 = 1456 8 * 2 = 16 |
| 497 | * total 4096 3488 | 498 | * total 4096 3488 40 |
| 498 | * | 499 | * |
| 499 | * Note: there are more reserved space in inline dentry than in regular | 500 | * Note: there are more reserved space in inline dentry than in regular |
| 500 | * dentry, when converting inline dentry we should handle this carefully. | 501 | * dentry, when converting inline dentry we should handle this carefully. |
| @@ -506,12 +507,13 @@ typedef __le32 f2fs_hash_t; | |||
| 506 | #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ | 507 | #define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \ |
| 507 | F2FS_SLOT_LEN) * \ | 508 | F2FS_SLOT_LEN) * \ |
| 508 | NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) | 509 | NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP)) |
| 510 | #define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */ | ||
| 509 | 511 | ||
| 510 | /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ | 512 | /* One directory entry slot representing F2FS_SLOT_LEN-sized file name */ |
| 511 | struct f2fs_dir_entry { | 513 | struct f2fs_dir_entry { |
| 512 | __le32 hash_code; /* hash code of file name */ | 514 | __le32 hash_code; /* hash code of file name */ |
| 513 | __le32 ino; /* inode number */ | 515 | __le32 ino; /* inode number */ |
| 514 | __le16 name_len; /* lengh of file name */ | 516 | __le16 name_len; /* length of file name */ |
| 515 | __u8 file_type; /* file type */ | 517 | __u8 file_type; /* file type */ |
| 516 | } __packed; | 518 | } __packed; |
| 517 | 519 | ||
diff --git a/include/linux/fanotify.h b/include/linux/fanotify.h index 9e2142795335..b79fa9bb7359 100644 --- a/include/linux/fanotify.h +++ b/include/linux/fanotify.h | |||
| @@ -19,7 +19,7 @@ | |||
| 19 | FAN_CLASS_PRE_CONTENT) | 19 | FAN_CLASS_PRE_CONTENT) |
| 20 | 20 | ||
| 21 | #define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ | 21 | #define FANOTIFY_INIT_FLAGS (FANOTIFY_CLASS_BITS | \ |
| 22 | FAN_REPORT_TID | \ | 22 | FAN_REPORT_TID | FAN_REPORT_FID | \ |
| 23 | FAN_CLOEXEC | FAN_NONBLOCK | \ | 23 | FAN_CLOEXEC | FAN_NONBLOCK | \ |
| 24 | FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) | 24 | FAN_UNLIMITED_QUEUE | FAN_UNLIMITED_MARKS) |
| 25 | 25 | ||
| @@ -35,10 +35,28 @@ | |||
| 35 | FAN_MARK_IGNORED_SURV_MODIFY | \ | 35 | FAN_MARK_IGNORED_SURV_MODIFY | \ |
| 36 | FAN_MARK_FLUSH) | 36 | FAN_MARK_FLUSH) |
| 37 | 37 | ||
| 38 | /* Events that user can request to be notified on */ | 38 | /* |
| 39 | #define FANOTIFY_EVENTS (FAN_ACCESS | FAN_MODIFY | \ | 39 | * Events that can be reported with data type FSNOTIFY_EVENT_PATH. |
| 40 | * Note that FAN_MODIFY can also be reported with data type | ||
| 41 | * FSNOTIFY_EVENT_INODE. | ||
| 42 | */ | ||
| 43 | #define FANOTIFY_PATH_EVENTS (FAN_ACCESS | FAN_MODIFY | \ | ||
| 40 | FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) | 44 | FAN_CLOSE | FAN_OPEN | FAN_OPEN_EXEC) |
| 41 | 45 | ||
| 46 | /* | ||
| 47 | * Directory entry modification events - reported only to directory | ||
| 48 | * where entry is modified and not to a watching parent. | ||
| 49 | */ | ||
| 50 | #define FANOTIFY_DIRENT_EVENTS (FAN_MOVE | FAN_CREATE | FAN_DELETE) | ||
| 51 | |||
| 52 | /* Events that can only be reported with data type FSNOTIFY_EVENT_INODE */ | ||
| 53 | #define FANOTIFY_INODE_EVENTS (FANOTIFY_DIRENT_EVENTS | \ | ||
| 54 | FAN_ATTRIB | FAN_MOVE_SELF | FAN_DELETE_SELF) | ||
| 55 | |||
| 56 | /* Events that user can request to be notified on */ | ||
| 57 | #define FANOTIFY_EVENTS (FANOTIFY_PATH_EVENTS | \ | ||
| 58 | FANOTIFY_INODE_EVENTS) | ||
| 59 | |||
| 42 | /* Events that require a permission response from user */ | 60 | /* Events that require a permission response from user */ |
| 43 | #define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ | 61 | #define FANOTIFY_PERM_EVENTS (FAN_OPEN_PERM | FAN_ACCESS_PERM | \ |
| 44 | FAN_OPEN_EXEC_PERM) | 62 | FAN_OPEN_EXEC_PERM) |
| @@ -49,7 +67,7 @@ | |||
| 49 | /* Events that may be reported to user */ | 67 | /* Events that may be reported to user */ |
| 50 | #define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \ | 68 | #define FANOTIFY_OUTGOING_EVENTS (FANOTIFY_EVENTS | \ |
| 51 | FANOTIFY_PERM_EVENTS | \ | 69 | FANOTIFY_PERM_EVENTS | \ |
| 52 | FAN_Q_OVERFLOW) | 70 | FAN_Q_OVERFLOW | FAN_ONDIR) |
| 53 | 71 | ||
| 54 | #define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ | 72 | #define ALL_FANOTIFY_EVENT_BITS (FANOTIFY_OUTGOING_EVENTS | \ |
| 55 | FANOTIFY_EVENT_FLAGS) | 73 | FANOTIFY_EVENT_FLAGS) |
diff --git a/include/linux/fb.h b/include/linux/fb.h index 7cdd31a69719..f52ef0ad6781 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h | |||
| @@ -653,6 +653,7 @@ extern int fb_new_modelist(struct fb_info *info); | |||
| 653 | 653 | ||
| 654 | extern struct fb_info *registered_fb[FB_MAX]; | 654 | extern struct fb_info *registered_fb[FB_MAX]; |
| 655 | extern int num_registered_fb; | 655 | extern int num_registered_fb; |
| 656 | extern bool fb_center_logo; | ||
| 656 | extern struct class *fb_class; | 657 | extern struct class *fb_class; |
| 657 | 658 | ||
| 658 | #define for_each_registered_fb(i) \ | 659 | #define for_each_registered_fb(i) \ |
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h index 27dc7a60693e..d019df946cb2 100644 --- a/include/linux/fcntl.h +++ b/include/linux/fcntl.h | |||
| @@ -12,7 +12,7 @@ | |||
| 12 | O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) | 12 | O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE) |
| 13 | 13 | ||
| 14 | #ifndef force_o_largefile | 14 | #ifndef force_o_largefile |
| 15 | #define force_o_largefile() (BITS_PER_LONG != 32) | 15 | #define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T)) |
| 16 | #endif | 16 | #endif |
| 17 | 17 | ||
| 18 | #if BITS_PER_LONG == 32 | 18 | #if BITS_PER_LONG == 32 |
diff --git a/include/linux/file.h b/include/linux/file.h index 6b2fb032416c..3fcddff56bc4 100644 --- a/include/linux/file.h +++ b/include/linux/file.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | struct file; | 13 | struct file; |
| 14 | 14 | ||
| 15 | extern void fput(struct file *); | 15 | extern void fput(struct file *); |
| 16 | extern void fput_many(struct file *, unsigned int); | ||
| 16 | 17 | ||
| 17 | struct file_operations; | 18 | struct file_operations; |
| 18 | struct vfsmount; | 19 | struct vfsmount; |
| @@ -44,6 +45,7 @@ static inline void fdput(struct fd fd) | |||
| 44 | } | 45 | } |
| 45 | 46 | ||
| 46 | extern struct file *fget(unsigned int fd); | 47 | extern struct file *fget(unsigned int fd); |
| 48 | extern struct file *fget_many(unsigned int fd, unsigned int refs); | ||
| 47 | extern struct file *fget_raw(unsigned int fd); | 49 | extern struct file *fget_raw(unsigned int fd); |
| 48 | extern unsigned long __fdget(unsigned int fd); | 50 | extern unsigned long __fdget(unsigned int fd); |
| 49 | extern unsigned long __fdget_raw(unsigned int fd); | 51 | extern unsigned long __fdget_raw(unsigned int fd); |
diff --git a/include/linux/filter.h b/include/linux/filter.h index ad106d845b22..6074aa064b54 100644 --- a/include/linux/filter.h +++ b/include/linux/filter.h | |||
| @@ -277,6 +277,26 @@ struct sock_reuseport; | |||
| 277 | .off = OFF, \ | 277 | .off = OFF, \ |
| 278 | .imm = IMM }) | 278 | .imm = IMM }) |
| 279 | 279 | ||
| 280 | /* Like BPF_JMP_REG, but with 32-bit wide operands for comparison. */ | ||
| 281 | |||
| 282 | #define BPF_JMP32_REG(OP, DST, SRC, OFF) \ | ||
| 283 | ((struct bpf_insn) { \ | ||
| 284 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_X, \ | ||
| 285 | .dst_reg = DST, \ | ||
| 286 | .src_reg = SRC, \ | ||
| 287 | .off = OFF, \ | ||
| 288 | .imm = 0 }) | ||
| 289 | |||
| 290 | /* Like BPF_JMP_IMM, but with 32-bit wide operands for comparison. */ | ||
| 291 | |||
| 292 | #define BPF_JMP32_IMM(OP, DST, IMM, OFF) \ | ||
| 293 | ((struct bpf_insn) { \ | ||
| 294 | .code = BPF_JMP32 | BPF_OP(OP) | BPF_K, \ | ||
| 295 | .dst_reg = DST, \ | ||
| 296 | .src_reg = 0, \ | ||
| 297 | .off = OFF, \ | ||
| 298 | .imm = IMM }) | ||
| 299 | |||
| 280 | /* Unconditional jumps, goto pc + off16 */ | 300 | /* Unconditional jumps, goto pc + off16 */ |
| 281 | 301 | ||
| 282 | #define BPF_JMP_A(OFF) \ | 302 | #define BPF_JMP_A(OFF) \ |
| @@ -513,7 +533,24 @@ struct sk_filter { | |||
| 513 | struct bpf_prog *prog; | 533 | struct bpf_prog *prog; |
| 514 | }; | 534 | }; |
| 515 | 535 | ||
| 516 | #define BPF_PROG_RUN(filter, ctx) (*(filter)->bpf_func)(ctx, (filter)->insnsi) | 536 | DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key); |
| 537 | |||
| 538 | #define BPF_PROG_RUN(prog, ctx) ({ \ | ||
| 539 | u32 ret; \ | ||
| 540 | cant_sleep(); \ | ||
| 541 | if (static_branch_unlikely(&bpf_stats_enabled_key)) { \ | ||
| 542 | struct bpf_prog_stats *stats; \ | ||
| 543 | u64 start = sched_clock(); \ | ||
| 544 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | ||
| 545 | stats = this_cpu_ptr(prog->aux->stats); \ | ||
| 546 | u64_stats_update_begin(&stats->syncp); \ | ||
| 547 | stats->cnt++; \ | ||
| 548 | stats->nsecs += sched_clock() - start; \ | ||
| 549 | u64_stats_update_end(&stats->syncp); \ | ||
| 550 | } else { \ | ||
| 551 | ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \ | ||
| 552 | } \ | ||
| 553 | ret; }) | ||
| 517 | 554 | ||
| 518 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN | 555 | #define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN |
| 519 | 556 | ||
| @@ -591,8 +628,8 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb) | |||
| 591 | return qdisc_skb_cb(skb)->data; | 628 | return qdisc_skb_cb(skb)->data; |
| 592 | } | 629 | } |
| 593 | 630 | ||
| 594 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | 631 | static inline u32 __bpf_prog_run_save_cb(const struct bpf_prog *prog, |
| 595 | struct sk_buff *skb) | 632 | struct sk_buff *skb) |
| 596 | { | 633 | { |
| 597 | u8 *cb_data = bpf_skb_cb(skb); | 634 | u8 *cb_data = bpf_skb_cb(skb); |
| 598 | u8 cb_saved[BPF_SKB_CB_LEN]; | 635 | u8 cb_saved[BPF_SKB_CB_LEN]; |
| @@ -611,15 +648,30 @@ static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | |||
| 611 | return res; | 648 | return res; |
| 612 | } | 649 | } |
| 613 | 650 | ||
| 651 | static inline u32 bpf_prog_run_save_cb(const struct bpf_prog *prog, | ||
| 652 | struct sk_buff *skb) | ||
| 653 | { | ||
| 654 | u32 res; | ||
| 655 | |||
| 656 | preempt_disable(); | ||
| 657 | res = __bpf_prog_run_save_cb(prog, skb); | ||
| 658 | preempt_enable(); | ||
| 659 | return res; | ||
| 660 | } | ||
| 661 | |||
| 614 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, | 662 | static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog, |
| 615 | struct sk_buff *skb) | 663 | struct sk_buff *skb) |
| 616 | { | 664 | { |
| 617 | u8 *cb_data = bpf_skb_cb(skb); | 665 | u8 *cb_data = bpf_skb_cb(skb); |
| 666 | u32 res; | ||
| 618 | 667 | ||
| 619 | if (unlikely(prog->cb_access)) | 668 | if (unlikely(prog->cb_access)) |
| 620 | memset(cb_data, 0, BPF_SKB_CB_LEN); | 669 | memset(cb_data, 0, BPF_SKB_CB_LEN); |
| 621 | 670 | ||
| 622 | return BPF_PROG_RUN(prog, skb); | 671 | preempt_disable(); |
| 672 | res = BPF_PROG_RUN(prog, skb); | ||
| 673 | preempt_enable(); | ||
| 674 | return res; | ||
| 623 | } | 675 | } |
| 624 | 676 | ||
| 625 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, | 677 | static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog, |
| @@ -729,6 +781,7 @@ void bpf_prog_free_jited_linfo(struct bpf_prog *prog); | |||
| 729 | void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); | 781 | void bpf_prog_free_unused_jited_linfo(struct bpf_prog *prog); |
| 730 | 782 | ||
| 731 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); | 783 | struct bpf_prog *bpf_prog_alloc(unsigned int size, gfp_t gfp_extra_flags); |
| 784 | struct bpf_prog *bpf_prog_alloc_no_stats(unsigned int size, gfp_t gfp_extra_flags); | ||
| 732 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, | 785 | struct bpf_prog *bpf_prog_realloc(struct bpf_prog *fp_old, unsigned int size, |
| 733 | gfp_t gfp_extra_flags); | 786 | gfp_t gfp_extra_flags); |
| 734 | void __bpf_prog_free(struct bpf_prog *fp); | 787 | void __bpf_prog_free(struct bpf_prog *fp); |
| @@ -778,6 +831,7 @@ static inline bool bpf_dump_raw_ok(void) | |||
| 778 | 831 | ||
| 779 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, | 832 | struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off, |
| 780 | const struct bpf_insn *patch, u32 len); | 833 | const struct bpf_insn *patch, u32 len); |
| 834 | int bpf_remove_insns(struct bpf_prog *prog, u32 off, u32 cnt); | ||
| 781 | 835 | ||
| 782 | void bpf_clear_redirect_map(struct bpf_map *map); | 836 | void bpf_clear_redirect_map(struct bpf_map *map); |
| 783 | 837 | ||
| @@ -859,7 +913,9 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr, | |||
| 859 | unsigned int alignment, | 913 | unsigned int alignment, |
| 860 | bpf_jit_fill_hole_t bpf_fill_ill_insns); | 914 | bpf_jit_fill_hole_t bpf_fill_ill_insns); |
| 861 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); | 915 | void bpf_jit_binary_free(struct bpf_binary_header *hdr); |
| 862 | 916 | u64 bpf_jit_alloc_exec_limit(void); | |
| 917 | void *bpf_jit_alloc_exec(unsigned long size); | ||
| 918 | void bpf_jit_free_exec(void *addr); | ||
| 863 | void bpf_jit_free(struct bpf_prog *fp); | 919 | void bpf_jit_free(struct bpf_prog *fp); |
| 864 | 920 | ||
| 865 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, | 921 | int bpf_jit_get_func_addr(const struct bpf_prog *prog, |
| @@ -951,6 +1007,7 @@ bpf_address_lookup(unsigned long addr, unsigned long *size, | |||
| 951 | 1007 | ||
| 952 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); | 1008 | void bpf_prog_kallsyms_add(struct bpf_prog *fp); |
| 953 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); | 1009 | void bpf_prog_kallsyms_del(struct bpf_prog *fp); |
| 1010 | void bpf_get_prog_name(const struct bpf_prog *prog, char *sym); | ||
| 954 | 1011 | ||
| 955 | #else /* CONFIG_BPF_JIT */ | 1012 | #else /* CONFIG_BPF_JIT */ |
| 956 | 1013 | ||
| @@ -1006,6 +1063,12 @@ static inline void bpf_prog_kallsyms_add(struct bpf_prog *fp) | |||
| 1006 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) | 1063 | static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp) |
| 1007 | { | 1064 | { |
| 1008 | } | 1065 | } |
| 1066 | |||
| 1067 | static inline void bpf_get_prog_name(const struct bpf_prog *prog, char *sym) | ||
| 1068 | { | ||
| 1069 | sym[0] = '\0'; | ||
| 1070 | } | ||
| 1071 | |||
| 1009 | #endif /* CONFIG_BPF_JIT */ | 1072 | #endif /* CONFIG_BPF_JIT */ |
| 1010 | 1073 | ||
| 1011 | void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); | 1074 | void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp); |
diff --git a/include/linux/firmware/imx/svc/misc.h b/include/linux/firmware/imx/svc/misc.h index e21c49aba92f..031dd4d3c766 100644 --- a/include/linux/firmware/imx/svc/misc.h +++ b/include/linux/firmware/imx/svc/misc.h | |||
| @@ -52,4 +52,7 @@ int imx_sc_misc_set_control(struct imx_sc_ipc *ipc, u32 resource, | |||
| 52 | int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, | 52 | int imx_sc_misc_get_control(struct imx_sc_ipc *ipc, u32 resource, |
| 53 | u8 ctrl, u32 *val); | 53 | u8 ctrl, u32 *val); |
| 54 | 54 | ||
| 55 | int imx_sc_pm_cpu_start(struct imx_sc_ipc *ipc, u32 resource, | ||
| 56 | bool enable, u64 phys_addr); | ||
| 57 | |||
| 55 | #endif /* _SC_MISC_API_H */ | 58 | #endif /* _SC_MISC_API_H */ |
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h index 3c3c28eff56a..642dab10f65d 100644 --- a/include/linux/firmware/xlnx-zynqmp.h +++ b/include/linux/firmware/xlnx-zynqmp.h | |||
| @@ -28,12 +28,35 @@ | |||
| 28 | /* SMC SIP service Call Function Identifier Prefix */ | 28 | /* SMC SIP service Call Function Identifier Prefix */ |
| 29 | #define PM_SIP_SVC 0xC2000000 | 29 | #define PM_SIP_SVC 0xC2000000 |
| 30 | #define PM_GET_TRUSTZONE_VERSION 0xa03 | 30 | #define PM_GET_TRUSTZONE_VERSION 0xa03 |
| 31 | #define PM_SET_SUSPEND_MODE 0xa02 | ||
| 32 | #define GET_CALLBACK_DATA 0xa01 | ||
| 31 | 33 | ||
| 32 | /* Number of 32bits values in payload */ | 34 | /* Number of 32bits values in payload */ |
| 33 | #define PAYLOAD_ARG_CNT 4U | 35 | #define PAYLOAD_ARG_CNT 4U |
| 34 | 36 | ||
| 37 | /* Number of arguments for a callback */ | ||
| 38 | #define CB_ARG_CNT 4 | ||
| 39 | |||
| 40 | /* Payload size (consists of callback API ID + arguments) */ | ||
| 41 | #define CB_PAYLOAD_SIZE (CB_ARG_CNT + 1) | ||
| 42 | |||
| 43 | #define ZYNQMP_PM_MAX_QOS 100U | ||
| 44 | |||
| 45 | /* Node capabilities */ | ||
| 46 | #define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U | ||
| 47 | #define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U | ||
| 48 | #define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U | ||
| 49 | #define ZYNQMP_PM_CAPABILITY_POWER 0x8U | ||
| 50 | |||
| 35 | enum pm_api_id { | 51 | enum pm_api_id { |
| 36 | PM_GET_API_VERSION = 1, | 52 | PM_GET_API_VERSION = 1, |
| 53 | PM_REQUEST_NODE = 13, | ||
| 54 | PM_RELEASE_NODE, | ||
| 55 | PM_SET_REQUIREMENT, | ||
| 56 | PM_RESET_ASSERT = 17, | ||
| 57 | PM_RESET_GET_STATUS, | ||
| 58 | PM_PM_INIT_FINALIZE = 21, | ||
| 59 | PM_GET_CHIPID = 24, | ||
| 37 | PM_IOCTL = 34, | 60 | PM_IOCTL = 34, |
| 38 | PM_QUERY_DATA, | 61 | PM_QUERY_DATA, |
| 39 | PM_CLOCK_ENABLE, | 62 | PM_CLOCK_ENABLE, |
| @@ -75,6 +98,149 @@ enum pm_query_id { | |||
| 75 | PM_QID_CLOCK_GET_NUM_CLOCKS = 12, | 98 | PM_QID_CLOCK_GET_NUM_CLOCKS = 12, |
| 76 | }; | 99 | }; |
| 77 | 100 | ||
| 101 | enum zynqmp_pm_reset_action { | ||
| 102 | PM_RESET_ACTION_RELEASE, | ||
| 103 | PM_RESET_ACTION_ASSERT, | ||
| 104 | PM_RESET_ACTION_PULSE, | ||
| 105 | }; | ||
| 106 | |||
| 107 | enum zynqmp_pm_reset { | ||
| 108 | ZYNQMP_PM_RESET_START = 1000, | ||
| 109 | ZYNQMP_PM_RESET_PCIE_CFG = ZYNQMP_PM_RESET_START, | ||
| 110 | ZYNQMP_PM_RESET_PCIE_BRIDGE, | ||
| 111 | ZYNQMP_PM_RESET_PCIE_CTRL, | ||
| 112 | ZYNQMP_PM_RESET_DP, | ||
| 113 | ZYNQMP_PM_RESET_SWDT_CRF, | ||
| 114 | ZYNQMP_PM_RESET_AFI_FM5, | ||
| 115 | ZYNQMP_PM_RESET_AFI_FM4, | ||
| 116 | ZYNQMP_PM_RESET_AFI_FM3, | ||
| 117 | ZYNQMP_PM_RESET_AFI_FM2, | ||
| 118 | ZYNQMP_PM_RESET_AFI_FM1, | ||
| 119 | ZYNQMP_PM_RESET_AFI_FM0, | ||
| 120 | ZYNQMP_PM_RESET_GDMA, | ||
| 121 | ZYNQMP_PM_RESET_GPU_PP1, | ||
| 122 | ZYNQMP_PM_RESET_GPU_PP0, | ||
| 123 | ZYNQMP_PM_RESET_GPU, | ||
| 124 | ZYNQMP_PM_RESET_GT, | ||
| 125 | ZYNQMP_PM_RESET_SATA, | ||
| 126 | ZYNQMP_PM_RESET_ACPU3_PWRON, | ||
| 127 | ZYNQMP_PM_RESET_ACPU2_PWRON, | ||
| 128 | ZYNQMP_PM_RESET_ACPU1_PWRON, | ||
| 129 | ZYNQMP_PM_RESET_ACPU0_PWRON, | ||
| 130 | ZYNQMP_PM_RESET_APU_L2, | ||
| 131 | ZYNQMP_PM_RESET_ACPU3, | ||
| 132 | ZYNQMP_PM_RESET_ACPU2, | ||
| 133 | ZYNQMP_PM_RESET_ACPU1, | ||
| 134 | ZYNQMP_PM_RESET_ACPU0, | ||
| 135 | ZYNQMP_PM_RESET_DDR, | ||
| 136 | ZYNQMP_PM_RESET_APM_FPD, | ||
| 137 | ZYNQMP_PM_RESET_SOFT, | ||
| 138 | ZYNQMP_PM_RESET_GEM0, | ||
| 139 | ZYNQMP_PM_RESET_GEM1, | ||
| 140 | ZYNQMP_PM_RESET_GEM2, | ||
| 141 | ZYNQMP_PM_RESET_GEM3, | ||
| 142 | ZYNQMP_PM_RESET_QSPI, | ||
| 143 | ZYNQMP_PM_RESET_UART0, | ||
| 144 | ZYNQMP_PM_RESET_UART1, | ||
| 145 | ZYNQMP_PM_RESET_SPI0, | ||
| 146 | ZYNQMP_PM_RESET_SPI1, | ||
| 147 | ZYNQMP_PM_RESET_SDIO0, | ||
| 148 | ZYNQMP_PM_RESET_SDIO1, | ||
| 149 | ZYNQMP_PM_RESET_CAN0, | ||
| 150 | ZYNQMP_PM_RESET_CAN1, | ||
| 151 | ZYNQMP_PM_RESET_I2C0, | ||
| 152 | ZYNQMP_PM_RESET_I2C1, | ||
| 153 | ZYNQMP_PM_RESET_TTC0, | ||
| 154 | ZYNQMP_PM_RESET_TTC1, | ||
| 155 | ZYNQMP_PM_RESET_TTC2, | ||
| 156 | ZYNQMP_PM_RESET_TTC3, | ||
| 157 | ZYNQMP_PM_RESET_SWDT_CRL, | ||
| 158 | ZYNQMP_PM_RESET_NAND, | ||
| 159 | ZYNQMP_PM_RESET_ADMA, | ||
| 160 | ZYNQMP_PM_RESET_GPIO, | ||
| 161 | ZYNQMP_PM_RESET_IOU_CC, | ||
| 162 | ZYNQMP_PM_RESET_TIMESTAMP, | ||
| 163 | ZYNQMP_PM_RESET_RPU_R50, | ||
| 164 | ZYNQMP_PM_RESET_RPU_R51, | ||
| 165 | ZYNQMP_PM_RESET_RPU_AMBA, | ||
| 166 | ZYNQMP_PM_RESET_OCM, | ||
| 167 | ZYNQMP_PM_RESET_RPU_PGE, | ||
| 168 | ZYNQMP_PM_RESET_USB0_CORERESET, | ||
| 169 | ZYNQMP_PM_RESET_USB1_CORERESET, | ||
| 170 | ZYNQMP_PM_RESET_USB0_HIBERRESET, | ||
| 171 | ZYNQMP_PM_RESET_USB1_HIBERRESET, | ||
| 172 | ZYNQMP_PM_RESET_USB0_APB, | ||
| 173 | ZYNQMP_PM_RESET_USB1_APB, | ||
| 174 | ZYNQMP_PM_RESET_IPI, | ||
| 175 | ZYNQMP_PM_RESET_APM_LPD, | ||
| 176 | ZYNQMP_PM_RESET_RTC, | ||
| 177 | ZYNQMP_PM_RESET_SYSMON, | ||
| 178 | ZYNQMP_PM_RESET_AFI_FM6, | ||
| 179 | ZYNQMP_PM_RESET_LPD_SWDT, | ||
| 180 | ZYNQMP_PM_RESET_FPD, | ||
| 181 | ZYNQMP_PM_RESET_RPU_DBG1, | ||
| 182 | ZYNQMP_PM_RESET_RPU_DBG0, | ||
| 183 | ZYNQMP_PM_RESET_DBG_LPD, | ||
| 184 | ZYNQMP_PM_RESET_DBG_FPD, | ||
| 185 | ZYNQMP_PM_RESET_APLL, | ||
| 186 | ZYNQMP_PM_RESET_DPLL, | ||
| 187 | ZYNQMP_PM_RESET_VPLL, | ||
| 188 | ZYNQMP_PM_RESET_IOPLL, | ||
| 189 | ZYNQMP_PM_RESET_RPLL, | ||
| 190 | ZYNQMP_PM_RESET_GPO3_PL_0, | ||
| 191 | ZYNQMP_PM_RESET_GPO3_PL_1, | ||
| 192 | ZYNQMP_PM_RESET_GPO3_PL_2, | ||
| 193 | ZYNQMP_PM_RESET_GPO3_PL_3, | ||
| 194 | ZYNQMP_PM_RESET_GPO3_PL_4, | ||
| 195 | ZYNQMP_PM_RESET_GPO3_PL_5, | ||
| 196 | ZYNQMP_PM_RESET_GPO3_PL_6, | ||
| 197 | ZYNQMP_PM_RESET_GPO3_PL_7, | ||
| 198 | ZYNQMP_PM_RESET_GPO3_PL_8, | ||
| 199 | ZYNQMP_PM_RESET_GPO3_PL_9, | ||
| 200 | ZYNQMP_PM_RESET_GPO3_PL_10, | ||
| 201 | ZYNQMP_PM_RESET_GPO3_PL_11, | ||
| 202 | ZYNQMP_PM_RESET_GPO3_PL_12, | ||
| 203 | ZYNQMP_PM_RESET_GPO3_PL_13, | ||
| 204 | ZYNQMP_PM_RESET_GPO3_PL_14, | ||
| 205 | ZYNQMP_PM_RESET_GPO3_PL_15, | ||
| 206 | ZYNQMP_PM_RESET_GPO3_PL_16, | ||
| 207 | ZYNQMP_PM_RESET_GPO3_PL_17, | ||
| 208 | ZYNQMP_PM_RESET_GPO3_PL_18, | ||
| 209 | ZYNQMP_PM_RESET_GPO3_PL_19, | ||
| 210 | ZYNQMP_PM_RESET_GPO3_PL_20, | ||
| 211 | ZYNQMP_PM_RESET_GPO3_PL_21, | ||
| 212 | ZYNQMP_PM_RESET_GPO3_PL_22, | ||
| 213 | ZYNQMP_PM_RESET_GPO3_PL_23, | ||
| 214 | ZYNQMP_PM_RESET_GPO3_PL_24, | ||
| 215 | ZYNQMP_PM_RESET_GPO3_PL_25, | ||
| 216 | ZYNQMP_PM_RESET_GPO3_PL_26, | ||
| 217 | ZYNQMP_PM_RESET_GPO3_PL_27, | ||
| 218 | ZYNQMP_PM_RESET_GPO3_PL_28, | ||
| 219 | ZYNQMP_PM_RESET_GPO3_PL_29, | ||
| 220 | ZYNQMP_PM_RESET_GPO3_PL_30, | ||
| 221 | ZYNQMP_PM_RESET_GPO3_PL_31, | ||
| 222 | ZYNQMP_PM_RESET_RPU_LS, | ||
| 223 | ZYNQMP_PM_RESET_PS_ONLY, | ||
| 224 | ZYNQMP_PM_RESET_PL, | ||
| 225 | ZYNQMP_PM_RESET_PS_PL0, | ||
| 226 | ZYNQMP_PM_RESET_PS_PL1, | ||
| 227 | ZYNQMP_PM_RESET_PS_PL2, | ||
| 228 | ZYNQMP_PM_RESET_PS_PL3, | ||
| 229 | ZYNQMP_PM_RESET_END = ZYNQMP_PM_RESET_PS_PL3 | ||
| 230 | }; | ||
| 231 | |||
| 232 | enum zynqmp_pm_suspend_reason { | ||
| 233 | SUSPEND_POWER_REQUEST = 201, | ||
| 234 | SUSPEND_ALERT, | ||
| 235 | SUSPEND_SYSTEM_SHUTDOWN, | ||
| 236 | }; | ||
| 237 | |||
| 238 | enum zynqmp_pm_request_ack { | ||
| 239 | ZYNQMP_PM_REQUEST_ACK_NO = 1, | ||
| 240 | ZYNQMP_PM_REQUEST_ACK_BLOCKING, | ||
| 241 | ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING, | ||
| 242 | }; | ||
| 243 | |||
| 78 | /** | 244 | /** |
| 79 | * struct zynqmp_pm_query_data - PM query data | 245 | * struct zynqmp_pm_query_data - PM query data |
| 80 | * @qid: query ID | 246 | * @qid: query ID |
| @@ -91,6 +257,7 @@ struct zynqmp_pm_query_data { | |||
| 91 | 257 | ||
| 92 | struct zynqmp_eemi_ops { | 258 | struct zynqmp_eemi_ops { |
| 93 | int (*get_api_version)(u32 *version); | 259 | int (*get_api_version)(u32 *version); |
| 260 | int (*get_chipid)(u32 *idcode, u32 *version); | ||
| 94 | int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); | 261 | int (*query_data)(struct zynqmp_pm_query_data qdata, u32 *out); |
| 95 | int (*clock_enable)(u32 clock_id); | 262 | int (*clock_enable)(u32 clock_id); |
| 96 | int (*clock_disable)(u32 clock_id); | 263 | int (*clock_disable)(u32 clock_id); |
| @@ -102,8 +269,25 @@ struct zynqmp_eemi_ops { | |||
| 102 | int (*clock_setparent)(u32 clock_id, u32 parent_id); | 269 | int (*clock_setparent)(u32 clock_id, u32 parent_id); |
| 103 | int (*clock_getparent)(u32 clock_id, u32 *parent_id); | 270 | int (*clock_getparent)(u32 clock_id, u32 *parent_id); |
| 104 | int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); | 271 | int (*ioctl)(u32 node_id, u32 ioctl_id, u32 arg1, u32 arg2, u32 *out); |
| 272 | int (*reset_assert)(const enum zynqmp_pm_reset reset, | ||
| 273 | const enum zynqmp_pm_reset_action assert_flag); | ||
| 274 | int (*reset_get_status)(const enum zynqmp_pm_reset reset, u32 *status); | ||
| 275 | int (*init_finalize)(void); | ||
| 276 | int (*set_suspend_mode)(u32 mode); | ||
| 277 | int (*request_node)(const u32 node, | ||
| 278 | const u32 capabilities, | ||
| 279 | const u32 qos, | ||
| 280 | const enum zynqmp_pm_request_ack ack); | ||
| 281 | int (*release_node)(const u32 node); | ||
| 282 | int (*set_requirement)(const u32 node, | ||
| 283 | const u32 capabilities, | ||
| 284 | const u32 qos, | ||
| 285 | const enum zynqmp_pm_request_ack ack); | ||
| 105 | }; | 286 | }; |
| 106 | 287 | ||
| 288 | int zynqmp_pm_invoke_fn(u32 pm_api_id, u32 arg0, u32 arg1, | ||
| 289 | u32 arg2, u32 arg3, u32 *ret_payload); | ||
| 290 | |||
| 107 | #if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) | 291 | #if IS_REACHABLE(CONFIG_ARCH_ZYNQMP) |
| 108 | const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); | 292 | const struct zynqmp_eemi_ops *zynqmp_pm_get_eemi_ops(void); |
| 109 | #else | 293 | #else |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h deleted file mode 100644 index b94fa61b51fb..000000000000 --- a/include/linux/flex_array.h +++ /dev/null | |||
| @@ -1,149 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _FLEX_ARRAY_H | ||
| 3 | #define _FLEX_ARRAY_H | ||
| 4 | |||
| 5 | #include <linux/types.h> | ||
| 6 | #include <linux/reciprocal_div.h> | ||
| 7 | #include <asm/page.h> | ||
| 8 | |||
| 9 | #define FLEX_ARRAY_PART_SIZE PAGE_SIZE | ||
| 10 | #define FLEX_ARRAY_BASE_SIZE PAGE_SIZE | ||
| 11 | |||
| 12 | struct flex_array_part; | ||
| 13 | |||
| 14 | /* | ||
| 15 | * This is meant to replace cases where an array-like | ||
| 16 | * structure has gotten too big to fit into kmalloc() | ||
| 17 | * and the developer is getting tempted to use | ||
| 18 | * vmalloc(). | ||
| 19 | */ | ||
| 20 | |||
| 21 | struct flex_array { | ||
| 22 | union { | ||
| 23 | struct { | ||
| 24 | int element_size; | ||
| 25 | int total_nr_elements; | ||
| 26 | int elems_per_part; | ||
| 27 | struct reciprocal_value reciprocal_elems; | ||
| 28 | struct flex_array_part *parts[]; | ||
| 29 | }; | ||
| 30 | /* | ||
| 31 | * This little trick makes sure that | ||
| 32 | * sizeof(flex_array) == PAGE_SIZE | ||
| 33 | */ | ||
| 34 | char padding[FLEX_ARRAY_BASE_SIZE]; | ||
| 35 | }; | ||
| 36 | }; | ||
| 37 | |||
| 38 | /* Number of bytes left in base struct flex_array, excluding metadata */ | ||
| 39 | #define FLEX_ARRAY_BASE_BYTES_LEFT \ | ||
| 40 | (FLEX_ARRAY_BASE_SIZE - offsetof(struct flex_array, parts)) | ||
| 41 | |||
| 42 | /* Number of pointers in base to struct flex_array_part pages */ | ||
| 43 | #define FLEX_ARRAY_NR_BASE_PTRS \ | ||
| 44 | (FLEX_ARRAY_BASE_BYTES_LEFT / sizeof(struct flex_array_part *)) | ||
| 45 | |||
| 46 | /* Number of elements of size that fit in struct flex_array_part */ | ||
| 47 | #define FLEX_ARRAY_ELEMENTS_PER_PART(size) \ | ||
| 48 | (FLEX_ARRAY_PART_SIZE / size) | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Defines a statically allocated flex array and ensures its parameters are | ||
| 52 | * valid. | ||
| 53 | */ | ||
| 54 | #define DEFINE_FLEX_ARRAY(__arrayname, __element_size, __total) \ | ||
| 55 | struct flex_array __arrayname = { { { \ | ||
| 56 | .element_size = (__element_size), \ | ||
| 57 | .total_nr_elements = (__total), \ | ||
| 58 | } } }; \ | ||
| 59 | static inline void __arrayname##_invalid_parameter(void) \ | ||
| 60 | { \ | ||
| 61 | BUILD_BUG_ON((__total) > FLEX_ARRAY_NR_BASE_PTRS * \ | ||
| 62 | FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \ | ||
| 63 | } | ||
| 64 | |||
| 65 | /** | ||
| 66 | * flex_array_alloc() - Creates a flexible array. | ||
| 67 | * @element_size: individual object size. | ||
| 68 | * @total: maximum number of objects which can be stored. | ||
| 69 | * @flags: GFP flags | ||
| 70 | * | ||
| 71 | * Return: Returns an object of structure flex_array. | ||
| 72 | */ | ||
| 73 | struct flex_array *flex_array_alloc(int element_size, unsigned int total, | ||
| 74 | gfp_t flags); | ||
| 75 | |||
| 76 | /** | ||
| 77 | * flex_array_prealloc() - Ensures that memory for the elements indexed in the | ||
| 78 | * range defined by start and nr_elements has been allocated. | ||
| 79 | * @fa: array to allocate memory to. | ||
| 80 | * @start: start address | ||
| 81 | * @nr_elements: number of elements to be allocated. | ||
| 82 | * @flags: GFP flags | ||
| 83 | * | ||
| 84 | */ | ||
| 85 | int flex_array_prealloc(struct flex_array *fa, unsigned int start, | ||
| 86 | unsigned int nr_elements, gfp_t flags); | ||
| 87 | |||
| 88 | /** | ||
| 89 | * flex_array_free() - Removes all elements of a flexible array. | ||
| 90 | * @fa: array to be freed. | ||
| 91 | */ | ||
| 92 | void flex_array_free(struct flex_array *fa); | ||
| 93 | |||
| 94 | /** | ||
| 95 | * flex_array_free_parts() - Removes all elements of a flexible array, but | ||
| 96 | * leaves the array itself in place. | ||
| 97 | * @fa: array to be emptied. | ||
| 98 | */ | ||
| 99 | void flex_array_free_parts(struct flex_array *fa); | ||
| 100 | |||
| 101 | /** | ||
| 102 | * flex_array_put() - Stores data into a flexible array. | ||
| 103 | * @fa: array where element is to be stored. | ||
| 104 | * @element_nr: position to copy, must be less than the maximum specified when | ||
| 105 | * the array was created. | ||
| 106 | * @src: data source to be copied into the array. | ||
| 107 | * @flags: GFP flags | ||
| 108 | * | ||
| 109 | * Return: Returns zero on success, a negative error code otherwise. | ||
| 110 | */ | ||
| 111 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, | ||
| 112 | gfp_t flags); | ||
| 113 | |||
| 114 | /** | ||
| 115 | * flex_array_clear() - Clears an individual element in the array, sets the | ||
| 116 | * given element to FLEX_ARRAY_FREE. | ||
| 117 | * @element_nr: element position to clear. | ||
| 118 | * @fa: array to which element to be cleared belongs. | ||
| 119 | * | ||
| 120 | * Return: Returns zero on success, -EINVAL otherwise. | ||
| 121 | */ | ||
| 122 | int flex_array_clear(struct flex_array *fa, unsigned int element_nr); | ||
| 123 | |||
| 124 | /** | ||
| 125 | * flex_array_get() - Retrieves data into a flexible array. | ||
| 126 | * | ||
| 127 | * @element_nr: Element position to retrieve data from. | ||
| 128 | * @fa: array from which data is to be retrieved. | ||
| 129 | * | ||
| 130 | * Return: Returns a pointer to the data element, or NULL if that | ||
| 131 | * particular element has never been allocated. | ||
| 132 | */ | ||
| 133 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr); | ||
| 134 | |||
| 135 | /** | ||
| 136 | * flex_array_shrink() - Reduces the allocated size of an array. | ||
| 137 | * @fa: array to shrink. | ||
| 138 | * | ||
| 139 | * Return: Returns number of pages of memory actually freed. | ||
| 140 | * | ||
| 141 | */ | ||
| 142 | int flex_array_shrink(struct flex_array *fa); | ||
| 143 | |||
| 144 | #define flex_array_put_ptr(fa, nr, src, gfp) \ | ||
| 145 | flex_array_put(fa, nr, (void *)&(src), gfp) | ||
| 146 | |||
| 147 | void *flex_array_get_ptr(struct flex_array *fa, unsigned int element_nr); | ||
| 148 | |||
| 149 | #endif /* _FLEX_ARRAY_H */ | ||
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h index 011965c08b93..6d775984905b 100644 --- a/include/linux/frontswap.h +++ b/include/linux/frontswap.h | |||
| @@ -7,6 +7,13 @@ | |||
| 7 | #include <linux/bitops.h> | 7 | #include <linux/bitops.h> |
| 8 | #include <linux/jump_label.h> | 8 | #include <linux/jump_label.h> |
| 9 | 9 | ||
| 10 | /* | ||
| 11 | * Return code to denote that requested number of | ||
| 12 | * frontswap pages are unused(moved to page cache). | ||
| 13 | * Used in in shmem_unuse and try_to_unuse. | ||
| 14 | */ | ||
| 15 | #define FRONTSWAP_PAGES_UNUSED 2 | ||
| 16 | |||
| 10 | struct frontswap_ops { | 17 | struct frontswap_ops { |
| 11 | void (*init)(unsigned); /* this swap type was just swapon'ed */ | 18 | void (*init)(unsigned); /* this swap type was just swapon'ed */ |
| 12 | int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ | 19 | int (*store)(unsigned, pgoff_t, struct page *); /* store a page */ |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 811c77743dad..dd28e7679089 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
| @@ -37,6 +37,9 @@ | |||
| 37 | #include <linux/uuid.h> | 37 | #include <linux/uuid.h> |
| 38 | #include <linux/errseq.h> | 38 | #include <linux/errseq.h> |
| 39 | #include <linux/ioprio.h> | 39 | #include <linux/ioprio.h> |
| 40 | #include <linux/fs_types.h> | ||
| 41 | #include <linux/build_bug.h> | ||
| 42 | #include <linux/stddef.h> | ||
| 40 | 43 | ||
| 41 | #include <asm/byteorder.h> | 44 | #include <asm/byteorder.h> |
| 42 | #include <uapi/linux/fs.h> | 45 | #include <uapi/linux/fs.h> |
| @@ -61,6 +64,8 @@ struct workqueue_struct; | |||
| 61 | struct iov_iter; | 64 | struct iov_iter; |
| 62 | struct fscrypt_info; | 65 | struct fscrypt_info; |
| 63 | struct fscrypt_operations; | 66 | struct fscrypt_operations; |
| 67 | struct fs_context; | ||
| 68 | struct fs_parameter_description; | ||
| 64 | 69 | ||
| 65 | extern void __init inode_init(void); | 70 | extern void __init inode_init(void); |
| 66 | extern void __init inode_init_early(void); | 71 | extern void __init inode_init_early(void); |
| @@ -153,6 +158,9 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset, | |||
| 153 | #define FMODE_OPENED ((__force fmode_t)0x80000) | 158 | #define FMODE_OPENED ((__force fmode_t)0x80000) |
| 154 | #define FMODE_CREATED ((__force fmode_t)0x100000) | 159 | #define FMODE_CREATED ((__force fmode_t)0x100000) |
| 155 | 160 | ||
| 161 | /* File is stream-like */ | ||
| 162 | #define FMODE_STREAM ((__force fmode_t)0x200000) | ||
| 163 | |||
| 156 | /* File was opened by fanotify and shouldn't generate fanotify events */ | 164 | /* File was opened by fanotify and shouldn't generate fanotify events */ |
| 157 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) | 165 | #define FMODE_NONOTIFY ((__force fmode_t)0x4000000) |
| 158 | 166 | ||
| @@ -304,13 +312,20 @@ enum rw_hint { | |||
| 304 | 312 | ||
| 305 | struct kiocb { | 313 | struct kiocb { |
| 306 | struct file *ki_filp; | 314 | struct file *ki_filp; |
| 315 | |||
| 316 | /* The 'ki_filp' pointer is shared in a union for aio */ | ||
| 317 | randomized_struct_fields_start | ||
| 318 | |||
| 307 | loff_t ki_pos; | 319 | loff_t ki_pos; |
| 308 | void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); | 320 | void (*ki_complete)(struct kiocb *iocb, long ret, long ret2); |
| 309 | void *private; | 321 | void *private; |
| 310 | int ki_flags; | 322 | int ki_flags; |
| 311 | u16 ki_hint; | 323 | u16 ki_hint; |
| 312 | u16 ki_ioprio; /* See linux/ioprio.h */ | 324 | u16 ki_ioprio; /* See linux/ioprio.h */ |
| 313 | } __randomize_layout; | 325 | unsigned int ki_cookie; /* for ->iopoll */ |
| 326 | |||
| 327 | randomized_struct_fields_end | ||
| 328 | }; | ||
| 314 | 329 | ||
| 315 | static inline bool is_sync_kiocb(struct kiocb *kiocb) | 330 | static inline bool is_sync_kiocb(struct kiocb *kiocb) |
| 316 | { | 331 | { |
| @@ -698,7 +713,7 @@ struct inode { | |||
| 698 | struct fsnotify_mark_connector __rcu *i_fsnotify_marks; | 713 | struct fsnotify_mark_connector __rcu *i_fsnotify_marks; |
| 699 | #endif | 714 | #endif |
| 700 | 715 | ||
| 701 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) | 716 | #ifdef CONFIG_FS_ENCRYPTION |
| 702 | struct fscrypt_info *i_crypt_info; | 717 | struct fscrypt_info *i_crypt_info; |
| 703 | #endif | 718 | #endif |
| 704 | 719 | ||
| @@ -951,7 +966,9 @@ static inline struct file *get_file(struct file *f) | |||
| 951 | atomic_long_inc(&f->f_count); | 966 | atomic_long_inc(&f->f_count); |
| 952 | return f; | 967 | return f; |
| 953 | } | 968 | } |
| 954 | #define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count) | 969 | #define get_file_rcu_many(x, cnt) \ |
| 970 | atomic_long_add_unless(&(x)->f_count, (cnt), 0) | ||
| 971 | #define get_file_rcu(x) get_file_rcu_many((x), 1) | ||
| 955 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) | 972 | #define fput_atomic(x) atomic_long_add_unless(&(x)->f_count, -1, 1) |
| 956 | #define file_count(x) atomic_long_read(&(x)->f_count) | 973 | #define file_count(x) atomic_long_read(&(x)->f_count) |
| 957 | 974 | ||
| @@ -1337,6 +1354,7 @@ extern int send_sigurg(struct fown_struct *fown); | |||
| 1337 | 1354 | ||
| 1338 | /* These sb flags are internal to the kernel */ | 1355 | /* These sb flags are internal to the kernel */ |
| 1339 | #define SB_SUBMOUNT (1<<26) | 1356 | #define SB_SUBMOUNT (1<<26) |
| 1357 | #define SB_FORCE (1<<27) | ||
| 1340 | #define SB_NOSEC (1<<28) | 1358 | #define SB_NOSEC (1<<28) |
| 1341 | #define SB_BORN (1<<29) | 1359 | #define SB_BORN (1<<29) |
| 1342 | #define SB_ACTIVE (1<<30) | 1360 | #define SB_ACTIVE (1<<30) |
| @@ -1403,7 +1421,7 @@ struct super_block { | |||
| 1403 | void *s_security; | 1421 | void *s_security; |
| 1404 | #endif | 1422 | #endif |
| 1405 | const struct xattr_handler **s_xattr; | 1423 | const struct xattr_handler **s_xattr; |
| 1406 | #if IS_ENABLED(CONFIG_FS_ENCRYPTION) | 1424 | #ifdef CONFIG_FS_ENCRYPTION |
| 1407 | const struct fscrypt_operations *s_cop; | 1425 | const struct fscrypt_operations *s_cop; |
| 1408 | #endif | 1426 | #endif |
| 1409 | struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ | 1427 | struct hlist_bl_head s_roots; /* alternate root dentries for NFS */ |
| @@ -1447,7 +1465,7 @@ struct super_block { | |||
| 1447 | * Filesystem subtype. If non-empty the filesystem type field | 1465 | * Filesystem subtype. If non-empty the filesystem type field |
| 1448 | * in /proc/mounts will be "type.subtype" | 1466 | * in /proc/mounts will be "type.subtype" |
| 1449 | */ | 1467 | */ |
| 1450 | char *s_subtype; | 1468 | const char *s_subtype; |
| 1451 | 1469 | ||
| 1452 | const struct dentry_operations *s_d_op; /* default d_op for dentries */ | 1470 | const struct dentry_operations *s_d_op; /* default d_op for dentries */ |
| 1453 | 1471 | ||
| @@ -1479,11 +1497,12 @@ struct super_block { | |||
| 1479 | struct user_namespace *s_user_ns; | 1497 | struct user_namespace *s_user_ns; |
| 1480 | 1498 | ||
| 1481 | /* | 1499 | /* |
| 1482 | * Keep the lru lists last in the structure so they always sit on their | 1500 | * The list_lru structure is essentially just a pointer to a table |
| 1483 | * own individual cachelines. | 1501 | * of per-node lru lists, each of which has its own spinlock. |
| 1502 | * There is no need to put them into separate cachelines. | ||
| 1484 | */ | 1503 | */ |
| 1485 | struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; | 1504 | struct list_lru s_dentry_lru; |
| 1486 | struct list_lru s_inode_lru ____cacheline_aligned_in_smp; | 1505 | struct list_lru s_inode_lru; |
| 1487 | struct rcu_head rcu; | 1506 | struct rcu_head rcu; |
| 1488 | struct work_struct destroy_work; | 1507 | struct work_struct destroy_work; |
| 1489 | 1508 | ||
| @@ -1700,22 +1719,6 @@ int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, | |||
| 1700 | int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); | 1719 | int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags); |
| 1701 | 1720 | ||
| 1702 | /* | 1721 | /* |
| 1703 | * File types | ||
| 1704 | * | ||
| 1705 | * NOTE! These match bits 12..15 of stat.st_mode | ||
| 1706 | * (ie "(i_mode >> 12) & 15"). | ||
| 1707 | */ | ||
| 1708 | #define DT_UNKNOWN 0 | ||
| 1709 | #define DT_FIFO 1 | ||
| 1710 | #define DT_CHR 2 | ||
| 1711 | #define DT_DIR 4 | ||
| 1712 | #define DT_BLK 6 | ||
| 1713 | #define DT_REG 8 | ||
| 1714 | #define DT_LNK 10 | ||
| 1715 | #define DT_SOCK 12 | ||
| 1716 | #define DT_WHT 14 | ||
| 1717 | |||
| 1718 | /* | ||
| 1719 | * This is the "filldir" function type, used by readdir() to let | 1722 | * This is the "filldir" function type, used by readdir() to let |
| 1720 | * the kernel specify what kind of dirent layout it wants to have. | 1723 | * the kernel specify what kind of dirent layout it wants to have. |
| 1721 | * This allows the kernel to read directories into kernel space or | 1724 | * This allows the kernel to read directories into kernel space or |
| @@ -1786,6 +1789,7 @@ struct file_operations { | |||
| 1786 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); | 1789 | ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *); |
| 1787 | ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); | 1790 | ssize_t (*read_iter) (struct kiocb *, struct iov_iter *); |
| 1788 | ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); | 1791 | ssize_t (*write_iter) (struct kiocb *, struct iov_iter *); |
| 1792 | int (*iopoll)(struct kiocb *kiocb, bool spin); | ||
| 1789 | int (*iterate) (struct file *, struct dir_context *); | 1793 | int (*iterate) (struct file *, struct dir_context *); |
| 1790 | int (*iterate_shared) (struct file *, struct dir_context *); | 1794 | int (*iterate_shared) (struct file *, struct dir_context *); |
| 1791 | __poll_t (*poll) (struct file *, struct poll_table_struct *); | 1795 | __poll_t (*poll) (struct file *, struct poll_table_struct *); |
| @@ -2084,7 +2088,7 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp) | |||
| 2084 | * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to | 2088 | * I_WB_SWITCH Cgroup bdi_writeback switching in progress. Used to |
| 2085 | * synchronize competing switching instances and to tell | 2089 | * synchronize competing switching instances and to tell |
| 2086 | * wb stat updates to grab the i_pages lock. See | 2090 | * wb stat updates to grab the i_pages lock. See |
| 2087 | * inode_switch_wb_work_fn() for details. | 2091 | * inode_switch_wbs_work_fn() for details. |
| 2088 | * | 2092 | * |
| 2089 | * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper | 2093 | * I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper |
| 2090 | * and work dirs among overlayfs mounts. | 2094 | * and work dirs among overlayfs mounts. |
| @@ -2172,6 +2176,8 @@ struct file_system_type { | |||
| 2172 | #define FS_HAS_SUBTYPE 4 | 2176 | #define FS_HAS_SUBTYPE 4 |
| 2173 | #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ | 2177 | #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ |
| 2174 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ | 2178 | #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ |
| 2179 | int (*init_fs_context)(struct fs_context *); | ||
| 2180 | const struct fs_parameter_description *parameters; | ||
| 2175 | struct dentry *(*mount) (struct file_system_type *, int, | 2181 | struct dentry *(*mount) (struct file_system_type *, int, |
| 2176 | const char *, void *); | 2182 | const char *, void *); |
| 2177 | void (*kill_sb) (struct super_block *); | 2183 | void (*kill_sb) (struct super_block *); |
| @@ -2227,8 +2233,12 @@ void kill_litter_super(struct super_block *sb); | |||
| 2227 | void deactivate_super(struct super_block *sb); | 2233 | void deactivate_super(struct super_block *sb); |
| 2228 | void deactivate_locked_super(struct super_block *sb); | 2234 | void deactivate_locked_super(struct super_block *sb); |
| 2229 | int set_anon_super(struct super_block *s, void *data); | 2235 | int set_anon_super(struct super_block *s, void *data); |
| 2236 | int set_anon_super_fc(struct super_block *s, struct fs_context *fc); | ||
| 2230 | int get_anon_bdev(dev_t *); | 2237 | int get_anon_bdev(dev_t *); |
| 2231 | void free_anon_bdev(dev_t); | 2238 | void free_anon_bdev(dev_t); |
| 2239 | struct super_block *sget_fc(struct fs_context *fc, | ||
| 2240 | int (*test)(struct super_block *, struct fs_context *), | ||
| 2241 | int (*set)(struct super_block *, struct fs_context *)); | ||
| 2232 | struct super_block *sget_userns(struct file_system_type *type, | 2242 | struct super_block *sget_userns(struct file_system_type *type, |
| 2233 | int (*test)(struct super_block *,void *), | 2243 | int (*test)(struct super_block *,void *), |
| 2234 | int (*set)(struct super_block *,void *), | 2244 | int (*set)(struct super_block *,void *), |
| @@ -2271,8 +2281,7 @@ mount_pseudo(struct file_system_type *fs_type, char *name, | |||
| 2271 | 2281 | ||
| 2272 | extern int register_filesystem(struct file_system_type *); | 2282 | extern int register_filesystem(struct file_system_type *); |
| 2273 | extern int unregister_filesystem(struct file_system_type *); | 2283 | extern int unregister_filesystem(struct file_system_type *); |
| 2274 | extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data); | 2284 | extern struct vfsmount *kern_mount(struct file_system_type *); |
| 2275 | #define kern_mount(type) kern_mount_data(type, NULL) | ||
| 2276 | extern void kern_unmount(struct vfsmount *mnt); | 2285 | extern void kern_unmount(struct vfsmount *mnt); |
| 2277 | extern int may_umount_tree(struct vfsmount *); | 2286 | extern int may_umount_tree(struct vfsmount *); |
| 2278 | extern int may_umount(struct vfsmount *); | 2287 | extern int may_umount(struct vfsmount *); |
| @@ -2486,6 +2495,7 @@ struct filename { | |||
| 2486 | struct audit_names *aname; | 2495 | struct audit_names *aname; |
| 2487 | const char iname[]; | 2496 | const char iname[]; |
| 2488 | }; | 2497 | }; |
| 2498 | static_assert(offsetof(struct filename, iname) % sizeof(long) == 0); | ||
| 2489 | 2499 | ||
| 2490 | extern long vfs_truncate(const struct path *, loff_t); | 2500 | extern long vfs_truncate(const struct path *, loff_t); |
| 2491 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, | 2501 | extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs, |
| @@ -3067,6 +3077,7 @@ extern loff_t no_seek_end_llseek_size(struct file *, loff_t, int, loff_t); | |||
| 3067 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); | 3077 | extern loff_t no_seek_end_llseek(struct file *, loff_t, int); |
| 3068 | extern int generic_file_open(struct inode * inode, struct file * filp); | 3078 | extern int generic_file_open(struct inode * inode, struct file * filp); |
| 3069 | extern int nonseekable_open(struct inode * inode, struct file * filp); | 3079 | extern int nonseekable_open(struct inode * inode, struct file * filp); |
| 3080 | extern int stream_open(struct inode * inode, struct file * filp); | ||
| 3070 | 3081 | ||
| 3071 | #ifdef CONFIG_BLOCK | 3082 | #ifdef CONFIG_BLOCK |
| 3072 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, | 3083 | typedef void (dio_submit_t)(struct bio *bio, struct inode *inode, |
| @@ -3514,4 +3525,13 @@ extern void inode_nohighmem(struct inode *inode); | |||
| 3514 | extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, | 3525 | extern int vfs_fadvise(struct file *file, loff_t offset, loff_t len, |
| 3515 | int advice); | 3526 | int advice); |
| 3516 | 3527 | ||
| 3528 | #if defined(CONFIG_IO_URING) | ||
| 3529 | extern struct sock *io_uring_get_socket(struct file *file); | ||
| 3530 | #else | ||
| 3531 | static inline struct sock *io_uring_get_socket(struct file *file) | ||
| 3532 | { | ||
| 3533 | return NULL; | ||
| 3534 | } | ||
| 3535 | #endif | ||
| 3536 | |||
| 3517 | #endif /* _LINUX_FS_H */ | 3537 | #endif /* _LINUX_FS_H */ |
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h new file mode 100644 index 000000000000..eaca452088fa --- /dev/null +++ b/include/linux/fs_context.h | |||
| @@ -0,0 +1,188 @@ | |||
| 1 | /* Filesystem superblock creation and reconfiguration context. | ||
| 2 | * | ||
| 3 | * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. | ||
| 4 | * Written by David Howells (dhowells@redhat.com) | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public Licence | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the Licence, or (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef _LINUX_FS_CONTEXT_H | ||
| 13 | #define _LINUX_FS_CONTEXT_H | ||
| 14 | |||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/errno.h> | ||
| 17 | #include <linux/security.h> | ||
| 18 | |||
| 19 | struct cred; | ||
| 20 | struct dentry; | ||
| 21 | struct file_operations; | ||
| 22 | struct file_system_type; | ||
| 23 | struct mnt_namespace; | ||
| 24 | struct net; | ||
| 25 | struct pid_namespace; | ||
| 26 | struct super_block; | ||
| 27 | struct user_namespace; | ||
| 28 | struct vfsmount; | ||
| 29 | struct path; | ||
| 30 | |||
| 31 | enum fs_context_purpose { | ||
| 32 | FS_CONTEXT_FOR_MOUNT, /* New superblock for explicit mount */ | ||
| 33 | FS_CONTEXT_FOR_SUBMOUNT, /* New superblock for automatic submount */ | ||
| 34 | FS_CONTEXT_FOR_RECONFIGURE, /* Superblock reconfiguration (remount) */ | ||
| 35 | }; | ||
| 36 | |||
| 37 | /* | ||
| 38 | * Type of parameter value. | ||
| 39 | */ | ||
| 40 | enum fs_value_type { | ||
| 41 | fs_value_is_undefined, | ||
| 42 | fs_value_is_flag, /* Value not given a value */ | ||
| 43 | fs_value_is_string, /* Value is a string */ | ||
| 44 | fs_value_is_blob, /* Value is a binary blob */ | ||
| 45 | fs_value_is_filename, /* Value is a filename* + dirfd */ | ||
| 46 | fs_value_is_filename_empty, /* Value is a filename* + dirfd + AT_EMPTY_PATH */ | ||
| 47 | fs_value_is_file, /* Value is a file* */ | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * Configuration parameter. | ||
| 52 | */ | ||
| 53 | struct fs_parameter { | ||
| 54 | const char *key; /* Parameter name */ | ||
| 55 | enum fs_value_type type:8; /* The type of value here */ | ||
| 56 | union { | ||
| 57 | char *string; | ||
| 58 | void *blob; | ||
| 59 | struct filename *name; | ||
| 60 | struct file *file; | ||
| 61 | }; | ||
| 62 | size_t size; | ||
| 63 | int dirfd; | ||
| 64 | }; | ||
| 65 | |||
| 66 | /* | ||
| 67 | * Filesystem context for holding the parameters used in the creation or | ||
| 68 | * reconfiguration of a superblock. | ||
| 69 | * | ||
| 70 | * Superblock creation fills in ->root whereas reconfiguration begins with this | ||
| 71 | * already set. | ||
| 72 | * | ||
| 73 | * See Documentation/filesystems/mounting.txt | ||
| 74 | */ | ||
| 75 | struct fs_context { | ||
| 76 | const struct fs_context_operations *ops; | ||
| 77 | struct file_system_type *fs_type; | ||
| 78 | void *fs_private; /* The filesystem's context */ | ||
| 79 | struct dentry *root; /* The root and superblock */ | ||
| 80 | struct user_namespace *user_ns; /* The user namespace for this mount */ | ||
| 81 | struct net *net_ns; /* The network namespace for this mount */ | ||
| 82 | const struct cred *cred; /* The mounter's credentials */ | ||
| 83 | const char *source; /* The source name (eg. dev path) */ | ||
| 84 | const char *subtype; /* The subtype to set on the superblock */ | ||
| 85 | void *security; /* Linux S&M options */ | ||
| 86 | void *s_fs_info; /* Proposed s_fs_info */ | ||
| 87 | unsigned int sb_flags; /* Proposed superblock flags (SB_*) */ | ||
| 88 | unsigned int sb_flags_mask; /* Superblock flags that were changed */ | ||
| 89 | unsigned int lsm_flags; /* Information flags from the fs to the LSM */ | ||
| 90 | enum fs_context_purpose purpose:8; | ||
| 91 | bool need_free:1; /* Need to call ops->free() */ | ||
| 92 | bool global:1; /* Goes into &init_user_ns */ | ||
| 93 | }; | ||
| 94 | |||
| 95 | struct fs_context_operations { | ||
| 96 | void (*free)(struct fs_context *fc); | ||
| 97 | int (*dup)(struct fs_context *fc, struct fs_context *src_fc); | ||
| 98 | int (*parse_param)(struct fs_context *fc, struct fs_parameter *param); | ||
| 99 | int (*parse_monolithic)(struct fs_context *fc, void *data); | ||
| 100 | int (*get_tree)(struct fs_context *fc); | ||
| 101 | int (*reconfigure)(struct fs_context *fc); | ||
| 102 | }; | ||
| 103 | |||
| 104 | /* | ||
| 105 | * fs_context manipulation functions. | ||
| 106 | */ | ||
| 107 | extern struct fs_context *fs_context_for_mount(struct file_system_type *fs_type, | ||
| 108 | unsigned int sb_flags); | ||
| 109 | extern struct fs_context *fs_context_for_reconfigure(struct dentry *dentry, | ||
| 110 | unsigned int sb_flags, | ||
| 111 | unsigned int sb_flags_mask); | ||
| 112 | extern struct fs_context *fs_context_for_submount(struct file_system_type *fs_type, | ||
| 113 | struct dentry *reference); | ||
| 114 | |||
| 115 | extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc); | ||
| 116 | extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param); | ||
| 117 | extern int vfs_parse_fs_string(struct fs_context *fc, const char *key, | ||
| 118 | const char *value, size_t v_size); | ||
| 119 | extern int generic_parse_monolithic(struct fs_context *fc, void *data); | ||
| 120 | extern int vfs_get_tree(struct fs_context *fc); | ||
| 121 | extern void put_fs_context(struct fs_context *fc); | ||
| 122 | |||
| 123 | /* | ||
| 124 | * sget() wrapper to be called from the ->get_tree() op. | ||
| 125 | */ | ||
| 126 | enum vfs_get_super_keying { | ||
| 127 | vfs_get_single_super, /* Only one such superblock may exist */ | ||
| 128 | vfs_get_keyed_super, /* Superblocks with different s_fs_info keys may exist */ | ||
| 129 | vfs_get_independent_super, /* Multiple independent superblocks may exist */ | ||
| 130 | }; | ||
| 131 | extern int vfs_get_super(struct fs_context *fc, | ||
| 132 | enum vfs_get_super_keying keying, | ||
| 133 | int (*fill_super)(struct super_block *sb, | ||
| 134 | struct fs_context *fc)); | ||
| 135 | |||
| 136 | extern const struct file_operations fscontext_fops; | ||
| 137 | |||
| 138 | #ifdef CONFIG_PRINTK | ||
| 139 | extern __attribute__((format(printf, 2, 3))) | ||
| 140 | void logfc(struct fs_context *fc, const char *fmt, ...); | ||
| 141 | #else | ||
| 142 | static inline __attribute__((format(printf, 2, 3))) | ||
| 143 | void logfc(struct fs_context *fc, const char *fmt, ...) | ||
| 144 | { | ||
| 145 | } | ||
| 146 | #endif | ||
| 147 | |||
| 148 | /** | ||
| 149 | * infof - Store supplementary informational message | ||
| 150 | * @fc: The context in which to log the informational message | ||
| 151 | * @fmt: The format string | ||
| 152 | * | ||
| 153 | * Store the supplementary informational message for the process if the process | ||
| 154 | * has enabled the facility. | ||
| 155 | */ | ||
| 156 | #define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); }) | ||
| 157 | |||
| 158 | /** | ||
| 159 | * warnf - Store supplementary warning message | ||
| 160 | * @fc: The context in which to log the error message | ||
| 161 | * @fmt: The format string | ||
| 162 | * | ||
| 163 | * Store the supplementary warning message for the process if the process has | ||
| 164 | * enabled the facility. | ||
| 165 | */ | ||
| 166 | #define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); }) | ||
| 167 | |||
| 168 | /** | ||
| 169 | * errorf - Store supplementary error message | ||
| 170 | * @fc: The context in which to log the error message | ||
| 171 | * @fmt: The format string | ||
| 172 | * | ||
| 173 | * Store the supplementary error message for the process if the process has | ||
| 174 | * enabled the facility. | ||
| 175 | */ | ||
| 176 | #define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); }) | ||
| 177 | |||
| 178 | /** | ||
| 179 | * invalf - Store supplementary invalid argument error message | ||
| 180 | * @fc: The context in which to log the error message | ||
| 181 | * @fmt: The format string | ||
| 182 | * | ||
| 183 | * Store the supplementary error message for the process if the process has | ||
| 184 | * enabled the facility and return -EINVAL. | ||
| 185 | */ | ||
| 186 | #define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; }) | ||
| 187 | |||
| 188 | #endif /* _LINUX_FS_CONTEXT_H */ | ||
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h new file mode 100644 index 000000000000..d966f96ffe62 --- /dev/null +++ b/include/linux/fs_parser.h | |||
| @@ -0,0 +1,151 @@ | |||
| 1 | /* Filesystem parameter description and parser | ||
| 2 | * | ||
| 3 | * Copyright (C) 2018 Red Hat, Inc. All Rights Reserved. | ||
| 4 | * Written by David Howells (dhowells@redhat.com) | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or | ||
| 7 | * modify it under the terms of the GNU General Public Licence | ||
| 8 | * as published by the Free Software Foundation; either version | ||
| 9 | * 2 of the Licence, or (at your option) any later version. | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef _LINUX_FS_PARSER_H | ||
| 13 | #define _LINUX_FS_PARSER_H | ||
| 14 | |||
| 15 | #include <linux/fs_context.h> | ||
| 16 | |||
| 17 | struct path; | ||
| 18 | |||
| 19 | struct constant_table { | ||
| 20 | const char *name; | ||
| 21 | int value; | ||
| 22 | }; | ||
| 23 | |||
| 24 | /* | ||
| 25 | * The type of parameter expected. | ||
| 26 | */ | ||
| 27 | enum fs_parameter_type { | ||
| 28 | __fs_param_wasnt_defined, | ||
| 29 | fs_param_is_flag, | ||
| 30 | fs_param_is_bool, | ||
| 31 | fs_param_is_u32, | ||
| 32 | fs_param_is_u32_octal, | ||
| 33 | fs_param_is_u32_hex, | ||
| 34 | fs_param_is_s32, | ||
| 35 | fs_param_is_u64, | ||
| 36 | fs_param_is_enum, | ||
| 37 | fs_param_is_string, | ||
| 38 | fs_param_is_blob, | ||
| 39 | fs_param_is_blockdev, | ||
| 40 | fs_param_is_path, | ||
| 41 | fs_param_is_fd, | ||
| 42 | nr__fs_parameter_type, | ||
| 43 | }; | ||
| 44 | |||
| 45 | /* | ||
| 46 | * Specification of the type of value a parameter wants. | ||
| 47 | * | ||
| 48 | * Note that the fsparam_flag(), fsparam_string(), fsparam_u32(), ... macros | ||
| 49 | * should be used to generate elements of this type. | ||
| 50 | */ | ||
| 51 | struct fs_parameter_spec { | ||
| 52 | const char *name; | ||
| 53 | u8 opt; /* Option number (returned by fs_parse()) */ | ||
| 54 | enum fs_parameter_type type:8; /* The desired parameter type */ | ||
| 55 | unsigned short flags; | ||
| 56 | #define fs_param_v_optional 0x0001 /* The value is optional */ | ||
| 57 | #define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */ | ||
| 58 | #define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */ | ||
| 59 | #define fs_param_deprecated 0x0008 /* The param is deprecated */ | ||
| 60 | }; | ||
| 61 | |||
| 62 | struct fs_parameter_enum { | ||
| 63 | u8 opt; /* Option number (as fs_parameter_spec::opt) */ | ||
| 64 | char name[14]; | ||
| 65 | u8 value; | ||
| 66 | }; | ||
| 67 | |||
| 68 | struct fs_parameter_description { | ||
| 69 | const char name[16]; /* Name for logging purposes */ | ||
| 70 | const struct fs_parameter_spec *specs; /* List of param specifications */ | ||
| 71 | const struct fs_parameter_enum *enums; /* Enum values */ | ||
| 72 | }; | ||
| 73 | |||
| 74 | /* | ||
| 75 | * Result of parse. | ||
| 76 | */ | ||
| 77 | struct fs_parse_result { | ||
| 78 | bool negated; /* T if param was "noxxx" */ | ||
| 79 | bool has_value; /* T if value supplied to param */ | ||
| 80 | union { | ||
| 81 | bool boolean; /* For spec_bool */ | ||
| 82 | int int_32; /* For spec_s32/spec_enum */ | ||
| 83 | unsigned int uint_32; /* For spec_u32{,_octal,_hex}/spec_enum */ | ||
| 84 | u64 uint_64; /* For spec_u64 */ | ||
| 85 | }; | ||
| 86 | }; | ||
| 87 | |||
| 88 | extern int fs_parse(struct fs_context *fc, | ||
| 89 | const struct fs_parameter_description *desc, | ||
| 90 | struct fs_parameter *value, | ||
| 91 | struct fs_parse_result *result); | ||
| 92 | extern int fs_lookup_param(struct fs_context *fc, | ||
| 93 | struct fs_parameter *param, | ||
| 94 | bool want_bdev, | ||
| 95 | struct path *_path); | ||
| 96 | |||
| 97 | extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size, | ||
| 98 | const char *name, int not_found); | ||
| 99 | #define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf)) | ||
| 100 | |||
| 101 | #ifdef CONFIG_VALIDATE_FS_PARSER | ||
| 102 | extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, | ||
| 103 | int low, int high, int special); | ||
| 104 | extern bool fs_validate_description(const struct fs_parameter_description *desc); | ||
| 105 | #else | ||
| 106 | static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size, | ||
| 107 | int low, int high, int special) | ||
| 108 | { return true; } | ||
| 109 | static inline bool fs_validate_description(const struct fs_parameter_description *desc) | ||
| 110 | { return true; } | ||
| 111 | #endif | ||
| 112 | |||
| 113 | /* | ||
| 114 | * Parameter type, name, index and flags element constructors. Use as: | ||
| 115 | * | ||
| 116 | * fsparam_xxxx("foo", Opt_foo) | ||
| 117 | * | ||
| 118 | * If existing helpers are not enough, direct use of __fsparam() would | ||
| 119 | * work, but any such case is probably a sign that new helper is needed. | ||
| 120 | * Helpers will remain stable; low-level implementation may change. | ||
| 121 | */ | ||
| 122 | #define __fsparam(TYPE, NAME, OPT, FLAGS) \ | ||
| 123 | { \ | ||
| 124 | .name = NAME, \ | ||
| 125 | .opt = OPT, \ | ||
| 126 | .type = TYPE, \ | ||
| 127 | .flags = FLAGS \ | ||
| 128 | } | ||
| 129 | |||
| 130 | #define fsparam_flag(NAME, OPT) __fsparam(fs_param_is_flag, NAME, OPT, 0) | ||
| 131 | #define fsparam_flag_no(NAME, OPT) \ | ||
| 132 | __fsparam(fs_param_is_flag, NAME, OPT, \ | ||
| 133 | fs_param_neg_with_no) | ||
| 134 | #define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0) | ||
| 135 | #define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0) | ||
| 136 | #define fsparam_u32oct(NAME, OPT) \ | ||
| 137 | __fsparam(fs_param_is_u32_octal, NAME, OPT, 0) | ||
| 138 | #define fsparam_u32hex(NAME, OPT) \ | ||
| 139 | __fsparam(fs_param_is_u32_hex, NAME, OPT, 0) | ||
| 140 | #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0) | ||
| 141 | #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0) | ||
| 142 | #define fsparam_enum(NAME, OPT) __fsparam(fs_param_is_enum, NAME, OPT, 0) | ||
| 143 | #define fsparam_string(NAME, OPT) \ | ||
| 144 | __fsparam(fs_param_is_string, NAME, OPT, 0) | ||
| 145 | #define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0) | ||
| 146 | #define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0) | ||
| 147 | #define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0) | ||
| 148 | #define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0) | ||
| 149 | |||
| 150 | |||
| 151 | #endif /* _LINUX_FS_PARSER_H */ | ||
diff --git a/include/linux/fs_types.h b/include/linux/fs_types.h new file mode 100644 index 000000000000..54816791196f --- /dev/null +++ b/include/linux/fs_types.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _LINUX_FS_TYPES_H | ||
| 3 | #define _LINUX_FS_TYPES_H | ||
| 4 | |||
| 5 | /* | ||
| 6 | * This is a header for the common implementation of dirent | ||
| 7 | * to fs on-disk file type conversion. Although the fs on-disk | ||
| 8 | * bits are specific to every file system, in practice, many | ||
| 9 | * file systems use the exact same on-disk format to describe | ||
| 10 | * the lower 3 file type bits that represent the 7 POSIX file | ||
| 11 | * types. | ||
| 12 | * | ||
| 13 | * It is important to note that the definitions in this | ||
| 14 | * header MUST NOT change. This would break both the | ||
| 15 | * userspace ABI and the on-disk format of filesystems | ||
| 16 | * using this code. | ||
| 17 | * | ||
| 18 | * All those file systems can use this generic code for the | ||
| 19 | * conversions. | ||
| 20 | */ | ||
| 21 | |||
| 22 | /* | ||
| 23 | * struct dirent file types | ||
| 24 | * exposed to user via getdents(2), readdir(3) | ||
| 25 | * | ||
| 26 | * These match bits 12..15 of stat.st_mode | ||
| 27 | * (ie "(i_mode >> 12) & 15"). | ||
| 28 | */ | ||
| 29 | #define S_DT_SHIFT 12 | ||
| 30 | #define S_DT(mode) (((mode) & S_IFMT) >> S_DT_SHIFT) | ||
| 31 | #define S_DT_MASK (S_IFMT >> S_DT_SHIFT) | ||
| 32 | |||
| 33 | /* these are defined by POSIX and also present in glibc's dirent.h */ | ||
| 34 | #define DT_UNKNOWN 0 | ||
| 35 | #define DT_FIFO 1 | ||
| 36 | #define DT_CHR 2 | ||
| 37 | #define DT_DIR 4 | ||
| 38 | #define DT_BLK 6 | ||
| 39 | #define DT_REG 8 | ||
| 40 | #define DT_LNK 10 | ||
| 41 | #define DT_SOCK 12 | ||
| 42 | #define DT_WHT 14 | ||
| 43 | |||
| 44 | #define DT_MAX (S_DT_MASK + 1) /* 16 */ | ||
| 45 | |||
| 46 | /* | ||
| 47 | * fs on-disk file types. | ||
| 48 | * Only the low 3 bits are used for the POSIX file types. | ||
| 49 | * Other bits are reserved for fs private use. | ||
| 50 | * These definitions are shared and used by multiple filesystems, | ||
| 51 | * and MUST NOT change under any circumstances. | ||
| 52 | * | ||
| 53 | * Note that no fs currently stores the whiteout type on-disk, | ||
| 54 | * so whiteout dirents are exposed to user as DT_CHR. | ||
| 55 | */ | ||
| 56 | #define FT_UNKNOWN 0 | ||
| 57 | #define FT_REG_FILE 1 | ||
| 58 | #define FT_DIR 2 | ||
| 59 | #define FT_CHRDEV 3 | ||
| 60 | #define FT_BLKDEV 4 | ||
| 61 | #define FT_FIFO 5 | ||
| 62 | #define FT_SOCK 6 | ||
| 63 | #define FT_SYMLINK 7 | ||
| 64 | |||
| 65 | #define FT_MAX 8 | ||
| 66 | |||
| 67 | /* | ||
| 68 | * declarations for helper functions, accompanying implementation | ||
| 69 | * is in fs/fs_types.c | ||
| 70 | */ | ||
| 71 | extern unsigned char fs_ftype_to_dtype(unsigned int filetype); | ||
| 72 | extern unsigned char fs_umode_to_ftype(umode_t mode); | ||
| 73 | extern unsigned char fs_umode_to_dtype(umode_t mode); | ||
| 74 | |||
| 75 | #endif | ||
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h index 952ab97af325..e5194fc3983e 100644 --- a/include/linux/fscrypt.h +++ b/include/linux/fscrypt.h | |||
| @@ -2,9 +2,8 @@ | |||
| 2 | /* | 2 | /* |
| 3 | * fscrypt.h: declarations for per-file encryption | 3 | * fscrypt.h: declarations for per-file encryption |
| 4 | * | 4 | * |
| 5 | * Filesystems that implement per-file encryption include this header | 5 | * Filesystems that implement per-file encryption must include this header |
| 6 | * file with the __FS_HAS_ENCRYPTION set according to whether that filesystem | 6 | * file. |
| 7 | * is being built with encryption support or not. | ||
| 8 | * | 7 | * |
| 9 | * Copyright (C) 2015, Google, Inc. | 8 | * Copyright (C) 2015, Google, Inc. |
| 10 | * | 9 | * |
| @@ -15,6 +14,8 @@ | |||
| 15 | #define _LINUX_FSCRYPT_H | 14 | #define _LINUX_FSCRYPT_H |
| 16 | 15 | ||
| 17 | #include <linux/fs.h> | 16 | #include <linux/fs.h> |
| 17 | #include <linux/mm.h> | ||
| 18 | #include <linux/slab.h> | ||
| 18 | 19 | ||
| 19 | #define FS_CRYPTO_BLOCK_SIZE 16 | 20 | #define FS_CRYPTO_BLOCK_SIZE 16 |
| 20 | 21 | ||
| @@ -42,11 +43,410 @@ struct fscrypt_name { | |||
| 42 | /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ | 43 | /* Maximum value for the third parameter of fscrypt_operations.set_context(). */ |
| 43 | #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 | 44 | #define FSCRYPT_SET_CONTEXT_MAX_SIZE 28 |
| 44 | 45 | ||
| 45 | #if __FS_HAS_ENCRYPTION | 46 | #ifdef CONFIG_FS_ENCRYPTION |
| 46 | #include <linux/fscrypt_supp.h> | 47 | /* |
| 47 | #else | 48 | * fscrypt superblock flags |
| 48 | #include <linux/fscrypt_notsupp.h> | 49 | */ |
| 49 | #endif | 50 | #define FS_CFLG_OWN_PAGES (1U << 1) |
| 51 | |||
| 52 | /* | ||
| 53 | * crypto operations for filesystems | ||
| 54 | */ | ||
| 55 | struct fscrypt_operations { | ||
| 56 | unsigned int flags; | ||
| 57 | const char *key_prefix; | ||
| 58 | int (*get_context)(struct inode *, void *, size_t); | ||
| 59 | int (*set_context)(struct inode *, const void *, size_t, void *); | ||
| 60 | bool (*dummy_context)(struct inode *); | ||
| 61 | bool (*empty_dir)(struct inode *); | ||
| 62 | unsigned int max_namelen; | ||
| 63 | }; | ||
| 64 | |||
| 65 | struct fscrypt_ctx { | ||
| 66 | union { | ||
| 67 | struct { | ||
| 68 | struct page *bounce_page; /* Ciphertext page */ | ||
| 69 | struct page *control_page; /* Original page */ | ||
| 70 | } w; | ||
| 71 | struct { | ||
| 72 | struct bio *bio; | ||
| 73 | struct work_struct work; | ||
| 74 | } r; | ||
| 75 | struct list_head free_list; /* Free list */ | ||
| 76 | }; | ||
| 77 | u8 flags; /* Flags */ | ||
| 78 | }; | ||
| 79 | |||
| 80 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
| 81 | { | ||
| 82 | return (inode->i_crypt_info != NULL); | ||
| 83 | } | ||
| 84 | |||
| 85 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
| 86 | { | ||
| 87 | return inode->i_sb->s_cop->dummy_context && | ||
| 88 | inode->i_sb->s_cop->dummy_context(inode); | ||
| 89 | } | ||
| 90 | |||
| 91 | /* crypto.c */ | ||
| 92 | extern void fscrypt_enqueue_decrypt_work(struct work_struct *); | ||
| 93 | extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); | ||
| 94 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); | ||
| 95 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, | ||
| 96 | unsigned int, unsigned int, | ||
| 97 | u64, gfp_t); | ||
| 98 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, | ||
| 99 | unsigned int, u64); | ||
| 100 | |||
| 101 | static inline struct page *fscrypt_control_page(struct page *page) | ||
| 102 | { | ||
| 103 | return ((struct fscrypt_ctx *)page_private(page))->w.control_page; | ||
| 104 | } | ||
| 105 | |||
| 106 | extern void fscrypt_restore_control_page(struct page *); | ||
| 107 | |||
| 108 | /* policy.c */ | ||
| 109 | extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); | ||
| 110 | extern int fscrypt_ioctl_get_policy(struct file *, void __user *); | ||
| 111 | extern int fscrypt_has_permitted_context(struct inode *, struct inode *); | ||
| 112 | extern int fscrypt_inherit_context(struct inode *, struct inode *, | ||
| 113 | void *, bool); | ||
| 114 | /* keyinfo.c */ | ||
| 115 | extern int fscrypt_get_encryption_info(struct inode *); | ||
| 116 | extern void fscrypt_put_encryption_info(struct inode *); | ||
| 117 | |||
| 118 | /* fname.c */ | ||
| 119 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, | ||
| 120 | int lookup, struct fscrypt_name *); | ||
| 121 | |||
| 122 | static inline void fscrypt_free_filename(struct fscrypt_name *fname) | ||
| 123 | { | ||
| 124 | kfree(fname->crypto_buf.name); | ||
| 125 | } | ||
| 126 | |||
| 127 | extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, | ||
| 128 | struct fscrypt_str *); | ||
| 129 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); | ||
| 130 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, | ||
| 131 | const struct fscrypt_str *, struct fscrypt_str *); | ||
| 132 | |||
| 133 | #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 | ||
| 134 | |||
| 135 | /* Extracts the second-to-last ciphertext block; see explanation below */ | ||
| 136 | #define FSCRYPT_FNAME_DIGEST(name, len) \ | ||
| 137 | ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ | ||
| 138 | FS_CRYPTO_BLOCK_SIZE)) | ||
| 139 | |||
| 140 | #define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE | ||
| 141 | |||
| 142 | /** | ||
| 143 | * fscrypt_digested_name - alternate identifier for an on-disk filename | ||
| 144 | * | ||
| 145 | * When userspace lists an encrypted directory without access to the key, | ||
| 146 | * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE | ||
| 147 | * bytes are shown in this abbreviated form (base64-encoded) rather than as the | ||
| 148 | * full ciphertext (base64-encoded). This is necessary to allow supporting | ||
| 149 | * filenames up to NAME_MAX bytes, since base64 encoding expands the length. | ||
| 150 | * | ||
| 151 | * To make it possible for filesystems to still find the correct directory entry | ||
| 152 | * despite not knowing the full on-disk name, we encode any filesystem-specific | ||
| 153 | * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, | ||
| 154 | * followed by the second-to-last ciphertext block of the filename. Due to the | ||
| 155 | * use of the CBC-CTS encryption mode, the second-to-last ciphertext block | ||
| 156 | * depends on the full plaintext. (Note that ciphertext stealing causes the | ||
| 157 | * last two blocks to appear "flipped".) This makes accidental collisions very | ||
| 158 | * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they | ||
| 159 | * share the same filesystem-specific hashes. | ||
| 160 | * | ||
| 161 | * However, this scheme isn't immune to intentional collisions, which can be | ||
| 162 | * created by anyone able to create arbitrary plaintext filenames and view them | ||
| 163 | * without the key. Making the "digest" be a real cryptographic hash like | ||
| 164 | * SHA-256 over the full ciphertext would prevent this, although it would be | ||
| 165 | * less efficient and harder to implement, especially since the filesystem would | ||
| 166 | * need to calculate it for each directory entry examined during a search. | ||
| 167 | */ | ||
| 168 | struct fscrypt_digested_name { | ||
| 169 | u32 hash; | ||
| 170 | u32 minor_hash; | ||
| 171 | u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; | ||
| 172 | }; | ||
| 173 | |||
| 174 | /** | ||
| 175 | * fscrypt_match_name() - test whether the given name matches a directory entry | ||
| 176 | * @fname: the name being searched for | ||
| 177 | * @de_name: the name from the directory entry | ||
| 178 | * @de_name_len: the length of @de_name in bytes | ||
| 179 | * | ||
| 180 | * Normally @fname->disk_name will be set, and in that case we simply compare | ||
| 181 | * that to the name stored in the directory entry. The only exception is that | ||
| 182 | * if we don't have the key for an encrypted directory and a filename in it is | ||
| 183 | * very long, then we won't have the full disk_name and we'll instead need to | ||
| 184 | * match against the fscrypt_digested_name. | ||
| 185 | * | ||
| 186 | * Return: %true if the name matches, otherwise %false. | ||
| 187 | */ | ||
| 188 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | ||
| 189 | const u8 *de_name, u32 de_name_len) | ||
| 190 | { | ||
| 191 | if (unlikely(!fname->disk_name.name)) { | ||
| 192 | const struct fscrypt_digested_name *n = | ||
| 193 | (const void *)fname->crypto_buf.name; | ||
| 194 | if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) | ||
| 195 | return false; | ||
| 196 | if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) | ||
| 197 | return false; | ||
| 198 | return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), | ||
| 199 | n->digest, FSCRYPT_FNAME_DIGEST_SIZE); | ||
| 200 | } | ||
| 201 | |||
| 202 | if (de_name_len != fname->disk_name.len) | ||
| 203 | return false; | ||
| 204 | return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); | ||
| 205 | } | ||
| 206 | |||
| 207 | /* bio.c */ | ||
| 208 | extern void fscrypt_decrypt_bio(struct bio *); | ||
| 209 | extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | ||
| 210 | struct bio *bio); | ||
| 211 | extern void fscrypt_pullback_bio_page(struct page **, bool); | ||
| 212 | extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, | ||
| 213 | unsigned int); | ||
| 214 | |||
| 215 | /* hooks.c */ | ||
| 216 | extern int fscrypt_file_open(struct inode *inode, struct file *filp); | ||
| 217 | extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir); | ||
| 218 | extern int __fscrypt_prepare_rename(struct inode *old_dir, | ||
| 219 | struct dentry *old_dentry, | ||
| 220 | struct inode *new_dir, | ||
| 221 | struct dentry *new_dentry, | ||
| 222 | unsigned int flags); | ||
| 223 | extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); | ||
| 224 | extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, | ||
| 225 | unsigned int max_len, | ||
| 226 | struct fscrypt_str *disk_link); | ||
| 227 | extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, | ||
| 228 | unsigned int len, | ||
| 229 | struct fscrypt_str *disk_link); | ||
| 230 | extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, | ||
| 231 | unsigned int max_size, | ||
| 232 | struct delayed_call *done); | ||
| 233 | #else /* !CONFIG_FS_ENCRYPTION */ | ||
| 234 | |||
| 235 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
| 236 | { | ||
| 237 | return false; | ||
| 238 | } | ||
| 239 | |||
| 240 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
| 241 | { | ||
| 242 | return false; | ||
| 243 | } | ||
| 244 | |||
| 245 | /* crypto.c */ | ||
| 246 | static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) | ||
| 247 | { | ||
| 248 | } | ||
| 249 | |||
| 250 | static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, | ||
| 251 | gfp_t gfp_flags) | ||
| 252 | { | ||
| 253 | return ERR_PTR(-EOPNOTSUPP); | ||
| 254 | } | ||
| 255 | |||
| 256 | static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | ||
| 257 | { | ||
| 258 | return; | ||
| 259 | } | ||
| 260 | |||
| 261 | static inline struct page *fscrypt_encrypt_page(const struct inode *inode, | ||
| 262 | struct page *page, | ||
| 263 | unsigned int len, | ||
| 264 | unsigned int offs, | ||
| 265 | u64 lblk_num, gfp_t gfp_flags) | ||
| 266 | { | ||
| 267 | return ERR_PTR(-EOPNOTSUPP); | ||
| 268 | } | ||
| 269 | |||
| 270 | static inline int fscrypt_decrypt_page(const struct inode *inode, | ||
| 271 | struct page *page, | ||
| 272 | unsigned int len, unsigned int offs, | ||
| 273 | u64 lblk_num) | ||
| 274 | { | ||
| 275 | return -EOPNOTSUPP; | ||
| 276 | } | ||
| 277 | |||
| 278 | static inline struct page *fscrypt_control_page(struct page *page) | ||
| 279 | { | ||
| 280 | WARN_ON_ONCE(1); | ||
| 281 | return ERR_PTR(-EINVAL); | ||
| 282 | } | ||
| 283 | |||
| 284 | static inline void fscrypt_restore_control_page(struct page *page) | ||
| 285 | { | ||
| 286 | return; | ||
| 287 | } | ||
| 288 | |||
| 289 | /* policy.c */ | ||
| 290 | static inline int fscrypt_ioctl_set_policy(struct file *filp, | ||
| 291 | const void __user *arg) | ||
| 292 | { | ||
| 293 | return -EOPNOTSUPP; | ||
| 294 | } | ||
| 295 | |||
| 296 | static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) | ||
| 297 | { | ||
| 298 | return -EOPNOTSUPP; | ||
| 299 | } | ||
| 300 | |||
| 301 | static inline int fscrypt_has_permitted_context(struct inode *parent, | ||
| 302 | struct inode *child) | ||
| 303 | { | ||
| 304 | return 0; | ||
| 305 | } | ||
| 306 | |||
| 307 | static inline int fscrypt_inherit_context(struct inode *parent, | ||
| 308 | struct inode *child, | ||
| 309 | void *fs_data, bool preload) | ||
| 310 | { | ||
| 311 | return -EOPNOTSUPP; | ||
| 312 | } | ||
| 313 | |||
| 314 | /* keyinfo.c */ | ||
| 315 | static inline int fscrypt_get_encryption_info(struct inode *inode) | ||
| 316 | { | ||
| 317 | return -EOPNOTSUPP; | ||
| 318 | } | ||
| 319 | |||
| 320 | static inline void fscrypt_put_encryption_info(struct inode *inode) | ||
| 321 | { | ||
| 322 | return; | ||
| 323 | } | ||
| 324 | |||
| 325 | /* fname.c */ | ||
| 326 | static inline int fscrypt_setup_filename(struct inode *dir, | ||
| 327 | const struct qstr *iname, | ||
| 328 | int lookup, struct fscrypt_name *fname) | ||
| 329 | { | ||
| 330 | if (IS_ENCRYPTED(dir)) | ||
| 331 | return -EOPNOTSUPP; | ||
| 332 | |||
| 333 | memset(fname, 0, sizeof(struct fscrypt_name)); | ||
| 334 | fname->usr_fname = iname; | ||
| 335 | fname->disk_name.name = (unsigned char *)iname->name; | ||
| 336 | fname->disk_name.len = iname->len; | ||
| 337 | return 0; | ||
| 338 | } | ||
| 339 | |||
| 340 | static inline void fscrypt_free_filename(struct fscrypt_name *fname) | ||
| 341 | { | ||
| 342 | return; | ||
| 343 | } | ||
| 344 | |||
| 345 | static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, | ||
| 346 | u32 max_encrypted_len, | ||
| 347 | struct fscrypt_str *crypto_str) | ||
| 348 | { | ||
| 349 | return -EOPNOTSUPP; | ||
| 350 | } | ||
| 351 | |||
| 352 | static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) | ||
| 353 | { | ||
| 354 | return; | ||
| 355 | } | ||
| 356 | |||
| 357 | static inline int fscrypt_fname_disk_to_usr(struct inode *inode, | ||
| 358 | u32 hash, u32 minor_hash, | ||
| 359 | const struct fscrypt_str *iname, | ||
| 360 | struct fscrypt_str *oname) | ||
| 361 | { | ||
| 362 | return -EOPNOTSUPP; | ||
| 363 | } | ||
| 364 | |||
| 365 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | ||
| 366 | const u8 *de_name, u32 de_name_len) | ||
| 367 | { | ||
| 368 | /* Encryption support disabled; use standard comparison */ | ||
| 369 | if (de_name_len != fname->disk_name.len) | ||
| 370 | return false; | ||
| 371 | return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); | ||
| 372 | } | ||
| 373 | |||
| 374 | /* bio.c */ | ||
| 375 | static inline void fscrypt_decrypt_bio(struct bio *bio) | ||
| 376 | { | ||
| 377 | } | ||
| 378 | |||
| 379 | static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | ||
| 380 | struct bio *bio) | ||
| 381 | { | ||
| 382 | } | ||
| 383 | |||
| 384 | static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
| 385 | { | ||
| 386 | return; | ||
| 387 | } | ||
| 388 | |||
| 389 | static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | ||
| 390 | sector_t pblk, unsigned int len) | ||
| 391 | { | ||
| 392 | return -EOPNOTSUPP; | ||
| 393 | } | ||
| 394 | |||
| 395 | /* hooks.c */ | ||
| 396 | |||
| 397 | static inline int fscrypt_file_open(struct inode *inode, struct file *filp) | ||
| 398 | { | ||
| 399 | if (IS_ENCRYPTED(inode)) | ||
| 400 | return -EOPNOTSUPP; | ||
| 401 | return 0; | ||
| 402 | } | ||
| 403 | |||
| 404 | static inline int __fscrypt_prepare_link(struct inode *inode, | ||
| 405 | struct inode *dir) | ||
| 406 | { | ||
| 407 | return -EOPNOTSUPP; | ||
| 408 | } | ||
| 409 | |||
| 410 | static inline int __fscrypt_prepare_rename(struct inode *old_dir, | ||
| 411 | struct dentry *old_dentry, | ||
| 412 | struct inode *new_dir, | ||
| 413 | struct dentry *new_dentry, | ||
| 414 | unsigned int flags) | ||
| 415 | { | ||
| 416 | return -EOPNOTSUPP; | ||
| 417 | } | ||
| 418 | |||
| 419 | static inline int __fscrypt_prepare_lookup(struct inode *dir, | ||
| 420 | struct dentry *dentry) | ||
| 421 | { | ||
| 422 | return -EOPNOTSUPP; | ||
| 423 | } | ||
| 424 | |||
| 425 | static inline int __fscrypt_prepare_symlink(struct inode *dir, | ||
| 426 | unsigned int len, | ||
| 427 | unsigned int max_len, | ||
| 428 | struct fscrypt_str *disk_link) | ||
| 429 | { | ||
| 430 | return -EOPNOTSUPP; | ||
| 431 | } | ||
| 432 | |||
| 433 | |||
| 434 | static inline int __fscrypt_encrypt_symlink(struct inode *inode, | ||
| 435 | const char *target, | ||
| 436 | unsigned int len, | ||
| 437 | struct fscrypt_str *disk_link) | ||
| 438 | { | ||
| 439 | return -EOPNOTSUPP; | ||
| 440 | } | ||
| 441 | |||
| 442 | static inline const char *fscrypt_get_symlink(struct inode *inode, | ||
| 443 | const void *caddr, | ||
| 444 | unsigned int max_size, | ||
| 445 | struct delayed_call *done) | ||
| 446 | { | ||
| 447 | return ERR_PTR(-EOPNOTSUPP); | ||
| 448 | } | ||
| 449 | #endif /* !CONFIG_FS_ENCRYPTION */ | ||
| 50 | 450 | ||
| 51 | /** | 451 | /** |
| 52 | * fscrypt_require_key - require an inode's encryption key | 452 | * fscrypt_require_key - require an inode's encryption key |
| @@ -89,7 +489,7 @@ static inline int fscrypt_require_key(struct inode *inode) | |||
| 89 | * in an encrypted directory tree use the same encryption policy. | 489 | * in an encrypted directory tree use the same encryption policy. |
| 90 | * | 490 | * |
| 91 | * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, | 491 | * Return: 0 on success, -ENOKEY if the directory's encryption key is missing, |
| 92 | * -EPERM if the link would result in an inconsistent encryption policy, or | 492 | * -EXDEV if the link would result in an inconsistent encryption policy, or |
| 93 | * another -errno code. | 493 | * another -errno code. |
| 94 | */ | 494 | */ |
| 95 | static inline int fscrypt_prepare_link(struct dentry *old_dentry, | 495 | static inline int fscrypt_prepare_link(struct dentry *old_dentry, |
| @@ -119,7 +519,7 @@ static inline int fscrypt_prepare_link(struct dentry *old_dentry, | |||
| 119 | * We also verify that the rename will not violate the constraint that all files | 519 | * We also verify that the rename will not violate the constraint that all files |
| 120 | * in an encrypted directory tree use the same encryption policy. | 520 | * in an encrypted directory tree use the same encryption policy. |
| 121 | * | 521 | * |
| 122 | * Return: 0 on success, -ENOKEY if an encryption key is missing, -EPERM if the | 522 | * Return: 0 on success, -ENOKEY if an encryption key is missing, -EXDEV if the |
| 123 | * rename would cause inconsistent encryption policies, or another -errno code. | 523 | * rename would cause inconsistent encryption policies, or another -errno code. |
| 124 | */ | 524 | */ |
| 125 | static inline int fscrypt_prepare_rename(struct inode *old_dir, | 525 | static inline int fscrypt_prepare_rename(struct inode *old_dir, |
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h deleted file mode 100644 index ee8b43e4c15a..000000000000 --- a/include/linux/fscrypt_notsupp.h +++ /dev/null | |||
| @@ -1,231 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * fscrypt_notsupp.h | ||
| 4 | * | ||
| 5 | * This stubs out the fscrypt functions for filesystems configured without | ||
| 6 | * encryption support. | ||
| 7 | * | ||
| 8 | * Do not include this file directly. Use fscrypt.h instead! | ||
| 9 | */ | ||
| 10 | #ifndef _LINUX_FSCRYPT_H | ||
| 11 | #error "Incorrect include of linux/fscrypt_notsupp.h!" | ||
| 12 | #endif | ||
| 13 | |||
| 14 | #ifndef _LINUX_FSCRYPT_NOTSUPP_H | ||
| 15 | #define _LINUX_FSCRYPT_NOTSUPP_H | ||
| 16 | |||
| 17 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
| 18 | { | ||
| 19 | return false; | ||
| 20 | } | ||
| 21 | |||
| 22 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
| 23 | { | ||
| 24 | return false; | ||
| 25 | } | ||
| 26 | |||
| 27 | /* crypto.c */ | ||
| 28 | static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work) | ||
| 29 | { | ||
| 30 | } | ||
| 31 | |||
| 32 | static inline struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *inode, | ||
| 33 | gfp_t gfp_flags) | ||
| 34 | { | ||
| 35 | return ERR_PTR(-EOPNOTSUPP); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx) | ||
| 39 | { | ||
| 40 | return; | ||
| 41 | } | ||
| 42 | |||
| 43 | static inline struct page *fscrypt_encrypt_page(const struct inode *inode, | ||
| 44 | struct page *page, | ||
| 45 | unsigned int len, | ||
| 46 | unsigned int offs, | ||
| 47 | u64 lblk_num, gfp_t gfp_flags) | ||
| 48 | { | ||
| 49 | return ERR_PTR(-EOPNOTSUPP); | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline int fscrypt_decrypt_page(const struct inode *inode, | ||
| 53 | struct page *page, | ||
| 54 | unsigned int len, unsigned int offs, | ||
| 55 | u64 lblk_num) | ||
| 56 | { | ||
| 57 | return -EOPNOTSUPP; | ||
| 58 | } | ||
| 59 | |||
| 60 | static inline struct page *fscrypt_control_page(struct page *page) | ||
| 61 | { | ||
| 62 | WARN_ON_ONCE(1); | ||
| 63 | return ERR_PTR(-EINVAL); | ||
| 64 | } | ||
| 65 | |||
| 66 | static inline void fscrypt_restore_control_page(struct page *page) | ||
| 67 | { | ||
| 68 | return; | ||
| 69 | } | ||
| 70 | |||
| 71 | /* policy.c */ | ||
| 72 | static inline int fscrypt_ioctl_set_policy(struct file *filp, | ||
| 73 | const void __user *arg) | ||
| 74 | { | ||
| 75 | return -EOPNOTSUPP; | ||
| 76 | } | ||
| 77 | |||
| 78 | static inline int fscrypt_ioctl_get_policy(struct file *filp, void __user *arg) | ||
| 79 | { | ||
| 80 | return -EOPNOTSUPP; | ||
| 81 | } | ||
| 82 | |||
| 83 | static inline int fscrypt_has_permitted_context(struct inode *parent, | ||
| 84 | struct inode *child) | ||
| 85 | { | ||
| 86 | return 0; | ||
| 87 | } | ||
| 88 | |||
| 89 | static inline int fscrypt_inherit_context(struct inode *parent, | ||
| 90 | struct inode *child, | ||
| 91 | void *fs_data, bool preload) | ||
| 92 | { | ||
| 93 | return -EOPNOTSUPP; | ||
| 94 | } | ||
| 95 | |||
| 96 | /* keyinfo.c */ | ||
| 97 | static inline int fscrypt_get_encryption_info(struct inode *inode) | ||
| 98 | { | ||
| 99 | return -EOPNOTSUPP; | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline void fscrypt_put_encryption_info(struct inode *inode) | ||
| 103 | { | ||
| 104 | return; | ||
| 105 | } | ||
| 106 | |||
| 107 | /* fname.c */ | ||
| 108 | static inline int fscrypt_setup_filename(struct inode *dir, | ||
| 109 | const struct qstr *iname, | ||
| 110 | int lookup, struct fscrypt_name *fname) | ||
| 111 | { | ||
| 112 | if (IS_ENCRYPTED(dir)) | ||
| 113 | return -EOPNOTSUPP; | ||
| 114 | |||
| 115 | memset(fname, 0, sizeof(struct fscrypt_name)); | ||
| 116 | fname->usr_fname = iname; | ||
| 117 | fname->disk_name.name = (unsigned char *)iname->name; | ||
| 118 | fname->disk_name.len = iname->len; | ||
| 119 | return 0; | ||
| 120 | } | ||
| 121 | |||
| 122 | static inline void fscrypt_free_filename(struct fscrypt_name *fname) | ||
| 123 | { | ||
| 124 | return; | ||
| 125 | } | ||
| 126 | |||
| 127 | static inline int fscrypt_fname_alloc_buffer(const struct inode *inode, | ||
| 128 | u32 max_encrypted_len, | ||
| 129 | struct fscrypt_str *crypto_str) | ||
| 130 | { | ||
| 131 | return -EOPNOTSUPP; | ||
| 132 | } | ||
| 133 | |||
| 134 | static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str) | ||
| 135 | { | ||
| 136 | return; | ||
| 137 | } | ||
| 138 | |||
| 139 | static inline int fscrypt_fname_disk_to_usr(struct inode *inode, | ||
| 140 | u32 hash, u32 minor_hash, | ||
| 141 | const struct fscrypt_str *iname, | ||
| 142 | struct fscrypt_str *oname) | ||
| 143 | { | ||
| 144 | return -EOPNOTSUPP; | ||
| 145 | } | ||
| 146 | |||
| 147 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | ||
| 148 | const u8 *de_name, u32 de_name_len) | ||
| 149 | { | ||
| 150 | /* Encryption support disabled; use standard comparison */ | ||
| 151 | if (de_name_len != fname->disk_name.len) | ||
| 152 | return false; | ||
| 153 | return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); | ||
| 154 | } | ||
| 155 | |||
| 156 | /* bio.c */ | ||
| 157 | static inline void fscrypt_decrypt_bio(struct bio *bio) | ||
| 158 | { | ||
| 159 | } | ||
| 160 | |||
| 161 | static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | ||
| 162 | struct bio *bio) | ||
| 163 | { | ||
| 164 | } | ||
| 165 | |||
| 166 | static inline void fscrypt_pullback_bio_page(struct page **page, bool restore) | ||
| 167 | { | ||
| 168 | return; | ||
| 169 | } | ||
| 170 | |||
| 171 | static inline int fscrypt_zeroout_range(const struct inode *inode, pgoff_t lblk, | ||
| 172 | sector_t pblk, unsigned int len) | ||
| 173 | { | ||
| 174 | return -EOPNOTSUPP; | ||
| 175 | } | ||
| 176 | |||
| 177 | /* hooks.c */ | ||
| 178 | |||
| 179 | static inline int fscrypt_file_open(struct inode *inode, struct file *filp) | ||
| 180 | { | ||
| 181 | if (IS_ENCRYPTED(inode)) | ||
| 182 | return -EOPNOTSUPP; | ||
| 183 | return 0; | ||
| 184 | } | ||
| 185 | |||
| 186 | static inline int __fscrypt_prepare_link(struct inode *inode, | ||
| 187 | struct inode *dir) | ||
| 188 | { | ||
| 189 | return -EOPNOTSUPP; | ||
| 190 | } | ||
| 191 | |||
| 192 | static inline int __fscrypt_prepare_rename(struct inode *old_dir, | ||
| 193 | struct dentry *old_dentry, | ||
| 194 | struct inode *new_dir, | ||
| 195 | struct dentry *new_dentry, | ||
| 196 | unsigned int flags) | ||
| 197 | { | ||
| 198 | return -EOPNOTSUPP; | ||
| 199 | } | ||
| 200 | |||
| 201 | static inline int __fscrypt_prepare_lookup(struct inode *dir, | ||
| 202 | struct dentry *dentry) | ||
| 203 | { | ||
| 204 | return -EOPNOTSUPP; | ||
| 205 | } | ||
| 206 | |||
| 207 | static inline int __fscrypt_prepare_symlink(struct inode *dir, | ||
| 208 | unsigned int len, | ||
| 209 | unsigned int max_len, | ||
| 210 | struct fscrypt_str *disk_link) | ||
| 211 | { | ||
| 212 | return -EOPNOTSUPP; | ||
| 213 | } | ||
| 214 | |||
| 215 | static inline int __fscrypt_encrypt_symlink(struct inode *inode, | ||
| 216 | const char *target, | ||
| 217 | unsigned int len, | ||
| 218 | struct fscrypt_str *disk_link) | ||
| 219 | { | ||
| 220 | return -EOPNOTSUPP; | ||
| 221 | } | ||
| 222 | |||
| 223 | static inline const char *fscrypt_get_symlink(struct inode *inode, | ||
| 224 | const void *caddr, | ||
| 225 | unsigned int max_size, | ||
| 226 | struct delayed_call *done) | ||
| 227 | { | ||
| 228 | return ERR_PTR(-EOPNOTSUPP); | ||
| 229 | } | ||
| 230 | |||
| 231 | #endif /* _LINUX_FSCRYPT_NOTSUPP_H */ | ||
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h deleted file mode 100644 index 6456c6b2005f..000000000000 --- a/include/linux/fscrypt_supp.h +++ /dev/null | |||
| @@ -1,204 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * fscrypt_supp.h | ||
| 4 | * | ||
| 5 | * Do not include this file directly. Use fscrypt.h instead! | ||
| 6 | */ | ||
| 7 | #ifndef _LINUX_FSCRYPT_H | ||
| 8 | #error "Incorrect include of linux/fscrypt_supp.h!" | ||
| 9 | #endif | ||
| 10 | |||
| 11 | #ifndef _LINUX_FSCRYPT_SUPP_H | ||
| 12 | #define _LINUX_FSCRYPT_SUPP_H | ||
| 13 | |||
| 14 | #include <linux/mm.h> | ||
| 15 | #include <linux/slab.h> | ||
| 16 | |||
| 17 | /* | ||
| 18 | * fscrypt superblock flags | ||
| 19 | */ | ||
| 20 | #define FS_CFLG_OWN_PAGES (1U << 1) | ||
| 21 | |||
| 22 | /* | ||
| 23 | * crypto operations for filesystems | ||
| 24 | */ | ||
| 25 | struct fscrypt_operations { | ||
| 26 | unsigned int flags; | ||
| 27 | const char *key_prefix; | ||
| 28 | int (*get_context)(struct inode *, void *, size_t); | ||
| 29 | int (*set_context)(struct inode *, const void *, size_t, void *); | ||
| 30 | bool (*dummy_context)(struct inode *); | ||
| 31 | bool (*empty_dir)(struct inode *); | ||
| 32 | unsigned int max_namelen; | ||
| 33 | }; | ||
| 34 | |||
| 35 | struct fscrypt_ctx { | ||
| 36 | union { | ||
| 37 | struct { | ||
| 38 | struct page *bounce_page; /* Ciphertext page */ | ||
| 39 | struct page *control_page; /* Original page */ | ||
| 40 | } w; | ||
| 41 | struct { | ||
| 42 | struct bio *bio; | ||
| 43 | struct work_struct work; | ||
| 44 | } r; | ||
| 45 | struct list_head free_list; /* Free list */ | ||
| 46 | }; | ||
| 47 | u8 flags; /* Flags */ | ||
| 48 | }; | ||
| 49 | |||
| 50 | static inline bool fscrypt_has_encryption_key(const struct inode *inode) | ||
| 51 | { | ||
| 52 | return (inode->i_crypt_info != NULL); | ||
| 53 | } | ||
| 54 | |||
| 55 | static inline bool fscrypt_dummy_context_enabled(struct inode *inode) | ||
| 56 | { | ||
| 57 | return inode->i_sb->s_cop->dummy_context && | ||
| 58 | inode->i_sb->s_cop->dummy_context(inode); | ||
| 59 | } | ||
| 60 | |||
| 61 | /* crypto.c */ | ||
| 62 | extern void fscrypt_enqueue_decrypt_work(struct work_struct *); | ||
| 63 | extern struct fscrypt_ctx *fscrypt_get_ctx(const struct inode *, gfp_t); | ||
| 64 | extern void fscrypt_release_ctx(struct fscrypt_ctx *); | ||
| 65 | extern struct page *fscrypt_encrypt_page(const struct inode *, struct page *, | ||
| 66 | unsigned int, unsigned int, | ||
| 67 | u64, gfp_t); | ||
| 68 | extern int fscrypt_decrypt_page(const struct inode *, struct page *, unsigned int, | ||
| 69 | unsigned int, u64); | ||
| 70 | |||
| 71 | static inline struct page *fscrypt_control_page(struct page *page) | ||
| 72 | { | ||
| 73 | return ((struct fscrypt_ctx *)page_private(page))->w.control_page; | ||
| 74 | } | ||
| 75 | |||
| 76 | extern void fscrypt_restore_control_page(struct page *); | ||
| 77 | |||
| 78 | /* policy.c */ | ||
| 79 | extern int fscrypt_ioctl_set_policy(struct file *, const void __user *); | ||
| 80 | extern int fscrypt_ioctl_get_policy(struct file *, void __user *); | ||
| 81 | extern int fscrypt_has_permitted_context(struct inode *, struct inode *); | ||
| 82 | extern int fscrypt_inherit_context(struct inode *, struct inode *, | ||
| 83 | void *, bool); | ||
| 84 | /* keyinfo.c */ | ||
| 85 | extern int fscrypt_get_encryption_info(struct inode *); | ||
| 86 | extern void fscrypt_put_encryption_info(struct inode *); | ||
| 87 | |||
| 88 | /* fname.c */ | ||
| 89 | extern int fscrypt_setup_filename(struct inode *, const struct qstr *, | ||
| 90 | int lookup, struct fscrypt_name *); | ||
| 91 | |||
| 92 | static inline void fscrypt_free_filename(struct fscrypt_name *fname) | ||
| 93 | { | ||
| 94 | kfree(fname->crypto_buf.name); | ||
| 95 | } | ||
| 96 | |||
| 97 | extern int fscrypt_fname_alloc_buffer(const struct inode *, u32, | ||
| 98 | struct fscrypt_str *); | ||
| 99 | extern void fscrypt_fname_free_buffer(struct fscrypt_str *); | ||
| 100 | extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32, | ||
| 101 | const struct fscrypt_str *, struct fscrypt_str *); | ||
| 102 | |||
| 103 | #define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32 | ||
| 104 | |||
| 105 | /* Extracts the second-to-last ciphertext block; see explanation below */ | ||
| 106 | #define FSCRYPT_FNAME_DIGEST(name, len) \ | ||
| 107 | ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \ | ||
| 108 | FS_CRYPTO_BLOCK_SIZE)) | ||
| 109 | |||
| 110 | #define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE | ||
| 111 | |||
| 112 | /** | ||
| 113 | * fscrypt_digested_name - alternate identifier for an on-disk filename | ||
| 114 | * | ||
| 115 | * When userspace lists an encrypted directory without access to the key, | ||
| 116 | * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE | ||
| 117 | * bytes are shown in this abbreviated form (base64-encoded) rather than as the | ||
| 118 | * full ciphertext (base64-encoded). This is necessary to allow supporting | ||
| 119 | * filenames up to NAME_MAX bytes, since base64 encoding expands the length. | ||
| 120 | * | ||
| 121 | * To make it possible for filesystems to still find the correct directory entry | ||
| 122 | * despite not knowing the full on-disk name, we encode any filesystem-specific | ||
| 123 | * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups, | ||
| 124 | * followed by the second-to-last ciphertext block of the filename. Due to the | ||
| 125 | * use of the CBC-CTS encryption mode, the second-to-last ciphertext block | ||
| 126 | * depends on the full plaintext. (Note that ciphertext stealing causes the | ||
| 127 | * last two blocks to appear "flipped".) This makes accidental collisions very | ||
| 128 | * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they | ||
| 129 | * share the same filesystem-specific hashes. | ||
| 130 | * | ||
| 131 | * However, this scheme isn't immune to intentional collisions, which can be | ||
| 132 | * created by anyone able to create arbitrary plaintext filenames and view them | ||
| 133 | * without the key. Making the "digest" be a real cryptographic hash like | ||
| 134 | * SHA-256 over the full ciphertext would prevent this, although it would be | ||
| 135 | * less efficient and harder to implement, especially since the filesystem would | ||
| 136 | * need to calculate it for each directory entry examined during a search. | ||
| 137 | */ | ||
| 138 | struct fscrypt_digested_name { | ||
| 139 | u32 hash; | ||
| 140 | u32 minor_hash; | ||
| 141 | u8 digest[FSCRYPT_FNAME_DIGEST_SIZE]; | ||
| 142 | }; | ||
| 143 | |||
| 144 | /** | ||
| 145 | * fscrypt_match_name() - test whether the given name matches a directory entry | ||
| 146 | * @fname: the name being searched for | ||
| 147 | * @de_name: the name from the directory entry | ||
| 148 | * @de_name_len: the length of @de_name in bytes | ||
| 149 | * | ||
| 150 | * Normally @fname->disk_name will be set, and in that case we simply compare | ||
| 151 | * that to the name stored in the directory entry. The only exception is that | ||
| 152 | * if we don't have the key for an encrypted directory and a filename in it is | ||
| 153 | * very long, then we won't have the full disk_name and we'll instead need to | ||
| 154 | * match against the fscrypt_digested_name. | ||
| 155 | * | ||
| 156 | * Return: %true if the name matches, otherwise %false. | ||
| 157 | */ | ||
| 158 | static inline bool fscrypt_match_name(const struct fscrypt_name *fname, | ||
| 159 | const u8 *de_name, u32 de_name_len) | ||
| 160 | { | ||
| 161 | if (unlikely(!fname->disk_name.name)) { | ||
| 162 | const struct fscrypt_digested_name *n = | ||
| 163 | (const void *)fname->crypto_buf.name; | ||
| 164 | if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_')) | ||
| 165 | return false; | ||
| 166 | if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE) | ||
| 167 | return false; | ||
| 168 | return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len), | ||
| 169 | n->digest, FSCRYPT_FNAME_DIGEST_SIZE); | ||
| 170 | } | ||
| 171 | |||
| 172 | if (de_name_len != fname->disk_name.len) | ||
| 173 | return false; | ||
| 174 | return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len); | ||
| 175 | } | ||
| 176 | |||
| 177 | /* bio.c */ | ||
| 178 | extern void fscrypt_decrypt_bio(struct bio *); | ||
| 179 | extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx, | ||
| 180 | struct bio *bio); | ||
| 181 | extern void fscrypt_pullback_bio_page(struct page **, bool); | ||
| 182 | extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t, | ||
| 183 | unsigned int); | ||
| 184 | |||
| 185 | /* hooks.c */ | ||
| 186 | extern int fscrypt_file_open(struct inode *inode, struct file *filp); | ||
| 187 | extern int __fscrypt_prepare_link(struct inode *inode, struct inode *dir); | ||
| 188 | extern int __fscrypt_prepare_rename(struct inode *old_dir, | ||
| 189 | struct dentry *old_dentry, | ||
| 190 | struct inode *new_dir, | ||
| 191 | struct dentry *new_dentry, | ||
| 192 | unsigned int flags); | ||
| 193 | extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry); | ||
| 194 | extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len, | ||
| 195 | unsigned int max_len, | ||
| 196 | struct fscrypt_str *disk_link); | ||
| 197 | extern int __fscrypt_encrypt_symlink(struct inode *inode, const char *target, | ||
| 198 | unsigned int len, | ||
| 199 | struct fscrypt_str *disk_link); | ||
| 200 | extern const char *fscrypt_get_symlink(struct inode *inode, const void *caddr, | ||
| 201 | unsigned int max_size, | ||
| 202 | struct delayed_call *done); | ||
| 203 | |||
| 204 | #endif /* _LINUX_FSCRYPT_SUPP_H */ | ||
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h index 941b11811f85..1fc0edd71c52 100644 --- a/include/linux/fsl/guts.h +++ b/include/linux/fsl/guts.h | |||
| @@ -135,8 +135,6 @@ struct ccsr_guts { | |||
| 135 | u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ | 135 | u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ |
| 136 | } __attribute__ ((packed)); | 136 | } __attribute__ ((packed)); |
| 137 | 137 | ||
| 138 | u32 fsl_guts_get_svr(void); | ||
| 139 | |||
| 140 | /* Alternate function signal multiplex control */ | 138 | /* Alternate function signal multiplex control */ |
| 141 | #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) | 139 | #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) |
| 142 | 140 | ||
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h index 741f567253ef..975553a9f75d 100644 --- a/include/linux/fsl/mc.h +++ b/include/linux/fsl/mc.h | |||
| @@ -193,6 +193,7 @@ struct fsl_mc_device { | |||
| 193 | struct resource *regions; | 193 | struct resource *regions; |
| 194 | struct fsl_mc_device_irq **irqs; | 194 | struct fsl_mc_device_irq **irqs; |
| 195 | struct fsl_mc_resource *resource; | 195 | struct fsl_mc_resource *resource; |
| 196 | struct device_link *consumer_link; | ||
| 196 | }; | 197 | }; |
| 197 | 198 | ||
| 198 | #define to_fsl_mc_device(_dev) \ | 199 | #define to_fsl_mc_device(_dev) \ |
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h index c1f003aadcce..992bf9fa1729 100644 --- a/include/linux/fsl/ptp_qoriq.h +++ b/include/linux/fsl/ptp_qoriq.h | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #define __PTP_QORIQ_H__ | 7 | #define __PTP_QORIQ_H__ |
| 8 | 8 | ||
| 9 | #include <linux/io.h> | 9 | #include <linux/io.h> |
| 10 | #include <linux/interrupt.h> | ||
| 10 | #include <linux/ptp_clock_kernel.h> | 11 | #include <linux/ptp_clock_kernel.h> |
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| @@ -49,7 +50,7 @@ struct etts_regs { | |||
| 49 | u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ | 50 | u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */ |
| 50 | }; | 51 | }; |
| 51 | 52 | ||
| 52 | struct qoriq_ptp_registers { | 53 | struct ptp_qoriq_registers { |
| 53 | struct ctrl_regs __iomem *ctrl_regs; | 54 | struct ctrl_regs __iomem *ctrl_regs; |
| 54 | struct alarm_regs __iomem *alarm_regs; | 55 | struct alarm_regs __iomem *alarm_regs; |
| 55 | struct fiper_regs __iomem *fiper_regs; | 56 | struct fiper_regs __iomem *fiper_regs; |
| @@ -57,15 +58,15 @@ struct qoriq_ptp_registers { | |||
| 57 | }; | 58 | }; |
| 58 | 59 | ||
| 59 | /* Offset definitions for the four register groups */ | 60 | /* Offset definitions for the four register groups */ |
| 60 | #define CTRL_REGS_OFFSET 0x0 | 61 | #define ETSEC_CTRL_REGS_OFFSET 0x0 |
| 61 | #define ALARM_REGS_OFFSET 0x40 | 62 | #define ETSEC_ALARM_REGS_OFFSET 0x40 |
| 62 | #define FIPER_REGS_OFFSET 0x80 | 63 | #define ETSEC_FIPER_REGS_OFFSET 0x80 |
| 63 | #define ETTS_REGS_OFFSET 0xa0 | 64 | #define ETSEC_ETTS_REGS_OFFSET 0xa0 |
| 64 | 65 | ||
| 65 | #define FMAN_CTRL_REGS_OFFSET 0x80 | 66 | #define CTRL_REGS_OFFSET 0x80 |
| 66 | #define FMAN_ALARM_REGS_OFFSET 0xb8 | 67 | #define ALARM_REGS_OFFSET 0xb8 |
| 67 | #define FMAN_FIPER_REGS_OFFSET 0xd0 | 68 | #define FIPER_REGS_OFFSET 0xd0 |
| 68 | #define FMAN_ETTS_REGS_OFFSET 0xe0 | 69 | #define ETTS_REGS_OFFSET 0xe0 |
| 69 | 70 | ||
| 70 | 71 | ||
| 71 | /* Bit definitions for the TMR_CTRL register */ | 72 | /* Bit definitions for the TMR_CTRL register */ |
| @@ -120,6 +121,8 @@ struct qoriq_ptp_registers { | |||
| 120 | /* Bit definitions for the TMR_STAT register */ | 121 | /* Bit definitions for the TMR_STAT register */ |
| 121 | #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ | 122 | #define STAT_VEC_SHIFT (0) /* Timer general purpose status vector */ |
| 122 | #define STAT_VEC_MASK (0x3f) | 123 | #define STAT_VEC_MASK (0x3f) |
| 124 | #define ETS1_VLD (1<<24) | ||
| 125 | #define ETS2_VLD (1<<25) | ||
| 123 | 126 | ||
| 124 | /* Bit definitions for the TMR_PRSC register */ | 127 | /* Bit definitions for the TMR_PRSC register */ |
| 125 | #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ | 128 | #define PRSC_OCK_SHIFT (0) /* Output clock division/prescale factor. */ |
| @@ -134,13 +137,16 @@ struct qoriq_ptp_registers { | |||
| 134 | #define DEFAULT_FIPER1_PERIOD 1000000000 | 137 | #define DEFAULT_FIPER1_PERIOD 1000000000 |
| 135 | #define DEFAULT_FIPER2_PERIOD 100000 | 138 | #define DEFAULT_FIPER2_PERIOD 100000 |
| 136 | 139 | ||
| 137 | struct qoriq_ptp { | 140 | struct ptp_qoriq { |
| 138 | void __iomem *base; | 141 | void __iomem *base; |
| 139 | struct qoriq_ptp_registers regs; | 142 | struct ptp_qoriq_registers regs; |
| 140 | spinlock_t lock; /* protects regs */ | 143 | spinlock_t lock; /* protects regs */ |
| 141 | struct ptp_clock *clock; | 144 | struct ptp_clock *clock; |
| 142 | struct ptp_clock_info caps; | 145 | struct ptp_clock_info caps; |
| 143 | struct resource *rsrc; | 146 | struct resource *rsrc; |
| 147 | struct dentry *debugfs_root; | ||
| 148 | struct device *dev; | ||
| 149 | bool extts_fifo_support; | ||
| 144 | int irq; | 150 | int irq; |
| 145 | int phc_index; | 151 | int phc_index; |
| 146 | u64 alarm_interval; /* for periodic alarm */ | 152 | u64 alarm_interval; /* for periodic alarm */ |
| @@ -151,19 +157,49 @@ struct qoriq_ptp { | |||
| 151 | u32 cksel; | 157 | u32 cksel; |
| 152 | u32 tmr_fiper1; | 158 | u32 tmr_fiper1; |
| 153 | u32 tmr_fiper2; | 159 | u32 tmr_fiper2; |
| 160 | u32 (*read)(unsigned __iomem *addr); | ||
| 161 | void (*write)(unsigned __iomem *addr, u32 val); | ||
| 154 | }; | 162 | }; |
| 155 | 163 | ||
| 156 | static inline u32 qoriq_read(unsigned __iomem *addr) | 164 | static inline u32 qoriq_read_be(unsigned __iomem *addr) |
| 157 | { | 165 | { |
| 158 | u32 val; | 166 | return ioread32be(addr); |
| 159 | |||
| 160 | val = ioread32be(addr); | ||
| 161 | return val; | ||
| 162 | } | 167 | } |
| 163 | 168 | ||
| 164 | static inline void qoriq_write(unsigned __iomem *addr, u32 val) | 169 | static inline void qoriq_write_be(unsigned __iomem *addr, u32 val) |
| 165 | { | 170 | { |
| 166 | iowrite32be(val, addr); | 171 | iowrite32be(val, addr); |
| 167 | } | 172 | } |
| 168 | 173 | ||
| 174 | static inline u32 qoriq_read_le(unsigned __iomem *addr) | ||
| 175 | { | ||
| 176 | return ioread32(addr); | ||
| 177 | } | ||
| 178 | |||
| 179 | static inline void qoriq_write_le(unsigned __iomem *addr, u32 val) | ||
| 180 | { | ||
| 181 | iowrite32(val, addr); | ||
| 182 | } | ||
| 183 | |||
| 184 | irqreturn_t ptp_qoriq_isr(int irq, void *priv); | ||
| 185 | int ptp_qoriq_init(struct ptp_qoriq *ptp_qoriq, void __iomem *base, | ||
| 186 | const struct ptp_clock_info *caps); | ||
| 187 | void ptp_qoriq_free(struct ptp_qoriq *ptp_qoriq); | ||
| 188 | int ptp_qoriq_adjfine(struct ptp_clock_info *ptp, long scaled_ppm); | ||
| 189 | int ptp_qoriq_adjtime(struct ptp_clock_info *ptp, s64 delta); | ||
| 190 | int ptp_qoriq_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts); | ||
| 191 | int ptp_qoriq_settime(struct ptp_clock_info *ptp, | ||
| 192 | const struct timespec64 *ts); | ||
| 193 | int ptp_qoriq_enable(struct ptp_clock_info *ptp, | ||
| 194 | struct ptp_clock_request *rq, int on); | ||
| 195 | #ifdef CONFIG_DEBUG_FS | ||
| 196 | void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq); | ||
| 197 | void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq); | ||
| 198 | #else | ||
| 199 | static inline void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq) | ||
| 200 | { } | ||
| 201 | static inline void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq) | ||
| 202 | { } | ||
| 203 | #endif | ||
| 204 | |||
| 169 | #endif | 205 | #endif |
diff --git a/include/linux/fsl_devices.h b/include/linux/fsl_devices.h index 60cef8227534..5da56a674f2f 100644 --- a/include/linux/fsl_devices.h +++ b/include/linux/fsl_devices.h | |||
| @@ -98,10 +98,11 @@ struct fsl_usb2_platform_data { | |||
| 98 | 98 | ||
| 99 | unsigned suspended:1; | 99 | unsigned suspended:1; |
| 100 | unsigned already_suspended:1; | 100 | unsigned already_suspended:1; |
| 101 | unsigned has_fsl_erratum_a007792:1; | 101 | unsigned has_fsl_erratum_a007792:1; |
| 102 | unsigned has_fsl_erratum_a005275:1; | 102 | unsigned has_fsl_erratum_14:1; |
| 103 | unsigned has_fsl_erratum_a005275:1; | ||
| 103 | unsigned has_fsl_erratum_a005697:1; | 104 | unsigned has_fsl_erratum_a005697:1; |
| 104 | unsigned check_phy_clk_valid:1; | 105 | unsigned check_phy_clk_valid:1; |
| 105 | 106 | ||
| 106 | /* register save area for suspend/resume */ | 107 | /* register save area for suspend/resume */ |
| 107 | u32 pm_command; | 108 | u32 pm_command; |
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h index 2ccb08cb5d6a..09587e2860b5 100644 --- a/include/linux/fsnotify.h +++ b/include/linux/fsnotify.h | |||
| @@ -17,8 +17,22 @@ | |||
| 17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
| 18 | #include <linux/bug.h> | 18 | #include <linux/bug.h> |
| 19 | 19 | ||
| 20 | /* | ||
| 21 | * Notify this @dir inode about a change in the directory entry @dentry. | ||
| 22 | * | ||
| 23 | * Unlike fsnotify_parent(), the event will be reported regardless of the | ||
| 24 | * FS_EVENT_ON_CHILD mask on the parent inode. | ||
| 25 | */ | ||
| 26 | static inline int fsnotify_dirent(struct inode *dir, struct dentry *dentry, | ||
| 27 | __u32 mask) | ||
| 28 | { | ||
| 29 | return fsnotify(dir, mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, | ||
| 30 | dentry->d_name.name, 0); | ||
| 31 | } | ||
| 32 | |||
| 20 | /* Notify this dentry's parent about a child's events. */ | 33 | /* Notify this dentry's parent about a child's events. */ |
| 21 | static inline int fsnotify_parent(const struct path *path, struct dentry *dentry, __u32 mask) | 34 | static inline int fsnotify_parent(const struct path *path, |
| 35 | struct dentry *dentry, __u32 mask) | ||
| 22 | { | 36 | { |
| 23 | if (!dentry) | 37 | if (!dentry) |
| 24 | dentry = path->dentry; | 38 | dentry = path->dentry; |
| @@ -65,6 +79,9 @@ static inline int fsnotify_perm(struct file *file, int mask) | |||
| 65 | fsnotify_mask = FS_ACCESS_PERM; | 79 | fsnotify_mask = FS_ACCESS_PERM; |
| 66 | } | 80 | } |
| 67 | 81 | ||
| 82 | if (S_ISDIR(inode->i_mode)) | ||
| 83 | fsnotify_mask |= FS_ISDIR; | ||
| 84 | |||
| 68 | return fsnotify_path(inode, path, fsnotify_mask); | 85 | return fsnotify_path(inode, path, fsnotify_mask); |
| 69 | } | 86 | } |
| 70 | 87 | ||
| @@ -73,7 +90,12 @@ static inline int fsnotify_perm(struct file *file, int mask) | |||
| 73 | */ | 90 | */ |
| 74 | static inline void fsnotify_link_count(struct inode *inode) | 91 | static inline void fsnotify_link_count(struct inode *inode) |
| 75 | { | 92 | { |
| 76 | fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 93 | __u32 mask = FS_ATTRIB; |
| 94 | |||
| 95 | if (S_ISDIR(inode->i_mode)) | ||
| 96 | mask |= FS_ISDIR; | ||
| 97 | |||
| 98 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
| 77 | } | 99 | } |
| 78 | 100 | ||
| 79 | /* | 101 | /* |
| @@ -81,12 +103,14 @@ static inline void fsnotify_link_count(struct inode *inode) | |||
| 81 | */ | 103 | */ |
| 82 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | 104 | static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, |
| 83 | const unsigned char *old_name, | 105 | const unsigned char *old_name, |
| 84 | int isdir, struct inode *target, struct dentry *moved) | 106 | int isdir, struct inode *target, |
| 107 | struct dentry *moved) | ||
| 85 | { | 108 | { |
| 86 | struct inode *source = moved->d_inode; | 109 | struct inode *source = moved->d_inode; |
| 87 | u32 fs_cookie = fsnotify_get_cookie(); | 110 | u32 fs_cookie = fsnotify_get_cookie(); |
| 88 | __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM); | 111 | __u32 old_dir_mask = FS_MOVED_FROM; |
| 89 | __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO); | 112 | __u32 new_dir_mask = FS_MOVED_TO; |
| 113 | __u32 mask = FS_MOVE_SELF; | ||
| 90 | const unsigned char *new_name = moved->d_name.name; | 114 | const unsigned char *new_name = moved->d_name.name; |
| 91 | 115 | ||
| 92 | if (old_dir == new_dir) | 116 | if (old_dir == new_dir) |
| @@ -95,6 +119,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
| 95 | if (isdir) { | 119 | if (isdir) { |
| 96 | old_dir_mask |= FS_ISDIR; | 120 | old_dir_mask |= FS_ISDIR; |
| 97 | new_dir_mask |= FS_ISDIR; | 121 | new_dir_mask |= FS_ISDIR; |
| 122 | mask |= FS_ISDIR; | ||
| 98 | } | 123 | } |
| 99 | 124 | ||
| 100 | fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, | 125 | fsnotify(old_dir, old_dir_mask, source, FSNOTIFY_EVENT_INODE, old_name, |
| @@ -106,7 +131,7 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir, | |||
| 106 | fsnotify_link_count(target); | 131 | fsnotify_link_count(target); |
| 107 | 132 | ||
| 108 | if (source) | 133 | if (source) |
| 109 | fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 134 | fsnotify(source, mask, source, FSNOTIFY_EVENT_INODE, NULL, 0); |
| 110 | audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); | 135 | audit_inode_child(new_dir, moved, AUDIT_TYPE_CHILD_CREATE); |
| 111 | } | 136 | } |
| 112 | 137 | ||
| @@ -128,15 +153,35 @@ static inline void fsnotify_vfsmount_delete(struct vfsmount *mnt) | |||
| 128 | 153 | ||
| 129 | /* | 154 | /* |
| 130 | * fsnotify_nameremove - a filename was removed from a directory | 155 | * fsnotify_nameremove - a filename was removed from a directory |
| 156 | * | ||
| 157 | * This is mostly called under parent vfs inode lock so name and | ||
| 158 | * dentry->d_parent should be stable. However there are some corner cases where | ||
| 159 | * inode lock is not held. So to be on the safe side and be reselient to future | ||
| 160 | * callers and out of tree users of d_delete(), we do not assume that d_parent | ||
| 161 | * and d_name are stable and we use dget_parent() and | ||
| 162 | * take_dentry_name_snapshot() to grab stable references. | ||
| 131 | */ | 163 | */ |
| 132 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | 164 | static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) |
| 133 | { | 165 | { |
| 166 | struct dentry *parent; | ||
| 167 | struct name_snapshot name; | ||
| 134 | __u32 mask = FS_DELETE; | 168 | __u32 mask = FS_DELETE; |
| 135 | 169 | ||
| 170 | /* d_delete() of pseudo inode? (e.g. __ns_get_path() playing tricks) */ | ||
| 171 | if (IS_ROOT(dentry)) | ||
| 172 | return; | ||
| 173 | |||
| 136 | if (isdir) | 174 | if (isdir) |
| 137 | mask |= FS_ISDIR; | 175 | mask |= FS_ISDIR; |
| 138 | 176 | ||
| 139 | fsnotify_parent(NULL, dentry, mask); | 177 | parent = dget_parent(dentry); |
| 178 | take_dentry_name_snapshot(&name, dentry); | ||
| 179 | |||
| 180 | fsnotify(d_inode(parent), mask, d_inode(dentry), FSNOTIFY_EVENT_INODE, | ||
| 181 | name.name, 0); | ||
| 182 | |||
| 183 | release_dentry_name_snapshot(&name); | ||
| 184 | dput(parent); | ||
| 140 | } | 185 | } |
| 141 | 186 | ||
| 142 | /* | 187 | /* |
| @@ -144,7 +189,12 @@ static inline void fsnotify_nameremove(struct dentry *dentry, int isdir) | |||
| 144 | */ | 189 | */ |
| 145 | static inline void fsnotify_inoderemove(struct inode *inode) | 190 | static inline void fsnotify_inoderemove(struct inode *inode) |
| 146 | { | 191 | { |
| 147 | fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | 192 | __u32 mask = FS_DELETE_SELF; |
| 193 | |||
| 194 | if (S_ISDIR(inode->i_mode)) | ||
| 195 | mask |= FS_ISDIR; | ||
| 196 | |||
| 197 | fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0); | ||
| 148 | __fsnotify_inode_delete(inode); | 198 | __fsnotify_inode_delete(inode); |
| 149 | } | 199 | } |
| 150 | 200 | ||
| @@ -155,7 +205,7 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry) | |||
| 155 | { | 205 | { |
| 156 | audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); | 206 | audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); |
| 157 | 207 | ||
| 158 | fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | 208 | fsnotify_dirent(inode, dentry, FS_CREATE); |
| 159 | } | 209 | } |
| 160 | 210 | ||
| 161 | /* | 211 | /* |
| @@ -176,12 +226,9 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct | |||
| 176 | */ | 226 | */ |
| 177 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) | 227 | static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry) |
| 178 | { | 228 | { |
| 179 | __u32 mask = (FS_CREATE | FS_ISDIR); | ||
| 180 | struct inode *d_inode = dentry->d_inode; | ||
| 181 | |||
| 182 | audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); | 229 | audit_inode_child(inode, dentry, AUDIT_TYPE_CHILD_CREATE); |
| 183 | 230 | ||
| 184 | fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0); | 231 | fsnotify_dirent(inode, dentry, FS_CREATE | FS_ISDIR); |
| 185 | } | 232 | } |
| 186 | 233 | ||
| 187 | /* | 234 | /* |
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h index 7639774e7475..dfc28fcb4de8 100644 --- a/include/linux/fsnotify_backend.h +++ b/include/linux/fsnotify_backend.h | |||
| @@ -59,27 +59,33 @@ | |||
| 59 | * dnotify and inotify. */ | 59 | * dnotify and inotify. */ |
| 60 | #define FS_EVENT_ON_CHILD 0x08000000 | 60 | #define FS_EVENT_ON_CHILD 0x08000000 |
| 61 | 61 | ||
| 62 | /* This is a list of all events that may get sent to a parernt based on fs event | ||
| 63 | * happening to inodes inside that directory */ | ||
| 64 | #define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ | ||
| 65 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ | ||
| 66 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ | ||
| 67 | FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM | \ | ||
| 68 | FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) | ||
| 69 | |||
| 70 | #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) | 62 | #define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) |
| 71 | 63 | ||
| 64 | /* | ||
| 65 | * Directory entry modification events - reported only to directory | ||
| 66 | * where entry is modified and not to a watching parent. | ||
| 67 | * The watching parent may get an FS_ATTRIB|FS_EVENT_ON_CHILD event | ||
| 68 | * when a directory entry inside a child subdir changes. | ||
| 69 | */ | ||
| 70 | #define ALL_FSNOTIFY_DIRENT_EVENTS (FS_CREATE | FS_DELETE | FS_MOVE) | ||
| 71 | |||
| 72 | #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ | 72 | #define ALL_FSNOTIFY_PERM_EVENTS (FS_OPEN_PERM | FS_ACCESS_PERM | \ |
| 73 | FS_OPEN_EXEC_PERM) | 73 | FS_OPEN_EXEC_PERM) |
| 74 | 74 | ||
| 75 | /* | ||
| 76 | * This is a list of all events that may get sent to a parent based on fs event | ||
| 77 | * happening to inodes inside that directory. | ||
| 78 | */ | ||
| 79 | #define FS_EVENTS_POSS_ON_CHILD (ALL_FSNOTIFY_PERM_EVENTS | \ | ||
| 80 | FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ | ||
| 81 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | \ | ||
| 82 | FS_OPEN | FS_OPEN_EXEC) | ||
| 83 | |||
| 75 | /* Events that can be reported to backends */ | 84 | /* Events that can be reported to backends */ |
| 76 | #define ALL_FSNOTIFY_EVENTS (FS_ACCESS | FS_MODIFY | FS_ATTRIB | \ | 85 | #define ALL_FSNOTIFY_EVENTS (ALL_FSNOTIFY_DIRENT_EVENTS | \ |
| 77 | FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN | \ | 86 | FS_EVENTS_POSS_ON_CHILD | \ |
| 78 | FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE | \ | 87 | FS_DELETE_SELF | FS_MOVE_SELF | FS_DN_RENAME | \ |
| 79 | FS_DELETE | FS_DELETE_SELF | FS_MOVE_SELF | \ | 88 | FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED) |
| 80 | FS_UNMOUNT | FS_Q_OVERFLOW | FS_IN_IGNORED | \ | ||
| 81 | FS_OPEN_PERM | FS_ACCESS_PERM | FS_DN_RENAME | \ | ||
| 82 | FS_OPEN_EXEC | FS_OPEN_EXEC_PERM) | ||
| 83 | 89 | ||
| 84 | /* Extra flags that may be reported with event or control handling of events */ | 90 | /* Extra flags that may be reported with event or control handling of events */ |
| 85 | #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ | 91 | #define ALL_FSNOTIFY_FLAGS (FS_EXCL_UNLINK | FS_ISDIR | FS_IN_ONESHOT | \ |
| @@ -129,7 +135,6 @@ struct fsnotify_event { | |||
| 129 | struct list_head list; | 135 | struct list_head list; |
| 130 | /* inode may ONLY be dereferenced during handle_event(). */ | 136 | /* inode may ONLY be dereferenced during handle_event(). */ |
| 131 | struct inode *inode; /* either the inode the event happened to or its parent */ | 137 | struct inode *inode; /* either the inode the event happened to or its parent */ |
| 132 | u32 mask; /* the type of access, bitwise OR for FS_* event types */ | ||
| 133 | }; | 138 | }; |
| 134 | 139 | ||
| 135 | /* | 140 | /* |
| @@ -288,6 +293,7 @@ typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t; | |||
| 288 | struct fsnotify_mark_connector { | 293 | struct fsnotify_mark_connector { |
| 289 | spinlock_t lock; | 294 | spinlock_t lock; |
| 290 | unsigned int type; /* Type of object [lock] */ | 295 | unsigned int type; /* Type of object [lock] */ |
| 296 | __kernel_fsid_t fsid; /* fsid of filesystem containing object */ | ||
| 291 | union { | 297 | union { |
| 292 | /* Object pointer [lock] */ | 298 | /* Object pointer [lock] */ |
| 293 | fsnotify_connp_t *obj; | 299 | fsnotify_connp_t *obj; |
| @@ -416,6 +422,9 @@ extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group); | |||
| 416 | extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); | 422 | extern struct fsnotify_event *fsnotify_peek_first_event(struct fsnotify_group *group); |
| 417 | /* return AND dequeue the first event on the notification queue */ | 423 | /* return AND dequeue the first event on the notification queue */ |
| 418 | extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); | 424 | extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group *group); |
| 425 | /* Remove event queued in the notification list */ | ||
| 426 | extern void fsnotify_remove_queued_event(struct fsnotify_group *group, | ||
| 427 | struct fsnotify_event *event); | ||
| 419 | 428 | ||
| 420 | /* functions used to manipulate the marks attached to inodes */ | 429 | /* functions used to manipulate the marks attached to inodes */ |
| 421 | 430 | ||
| @@ -428,28 +437,35 @@ extern void fsnotify_init_mark(struct fsnotify_mark *mark, | |||
| 428 | /* Find mark belonging to given group in the list of marks */ | 437 | /* Find mark belonging to given group in the list of marks */ |
| 429 | extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, | 438 | extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp, |
| 430 | struct fsnotify_group *group); | 439 | struct fsnotify_group *group); |
| 440 | /* Get cached fsid of filesystem containing object */ | ||
| 441 | extern int fsnotify_get_conn_fsid(const struct fsnotify_mark_connector *conn, | ||
| 442 | __kernel_fsid_t *fsid); | ||
| 431 | /* attach the mark to the object */ | 443 | /* attach the mark to the object */ |
| 432 | extern int fsnotify_add_mark(struct fsnotify_mark *mark, | 444 | extern int fsnotify_add_mark(struct fsnotify_mark *mark, |
| 433 | fsnotify_connp_t *connp, unsigned int type, | 445 | fsnotify_connp_t *connp, unsigned int type, |
| 434 | int allow_dups); | 446 | int allow_dups, __kernel_fsid_t *fsid); |
| 435 | extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, | 447 | extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, |
| 436 | fsnotify_connp_t *connp, unsigned int type, | 448 | fsnotify_connp_t *connp, |
| 437 | int allow_dups); | 449 | unsigned int type, int allow_dups, |
| 450 | __kernel_fsid_t *fsid); | ||
| 451 | |||
| 438 | /* attach the mark to the inode */ | 452 | /* attach the mark to the inode */ |
| 439 | static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, | 453 | static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark, |
| 440 | struct inode *inode, | 454 | struct inode *inode, |
| 441 | int allow_dups) | 455 | int allow_dups) |
| 442 | { | 456 | { |
| 443 | return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, | 457 | return fsnotify_add_mark(mark, &inode->i_fsnotify_marks, |
| 444 | FSNOTIFY_OBJ_TYPE_INODE, allow_dups); | 458 | FSNOTIFY_OBJ_TYPE_INODE, allow_dups, NULL); |
| 445 | } | 459 | } |
| 446 | static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, | 460 | static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark, |
| 447 | struct inode *inode, | 461 | struct inode *inode, |
| 448 | int allow_dups) | 462 | int allow_dups) |
| 449 | { | 463 | { |
| 450 | return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, | 464 | return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks, |
| 451 | FSNOTIFY_OBJ_TYPE_INODE, allow_dups); | 465 | FSNOTIFY_OBJ_TYPE_INODE, allow_dups, |
| 466 | NULL); | ||
| 452 | } | 467 | } |
| 468 | |||
| 453 | /* given a group and a mark, flag mark to be freed when all references are dropped */ | 469 | /* given a group and a mark, flag mark to be freed when all references are dropped */ |
| 454 | extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, | 470 | extern void fsnotify_destroy_mark(struct fsnotify_mark *mark, |
| 455 | struct fsnotify_group *group); | 471 | struct fsnotify_group *group); |
| @@ -479,9 +495,12 @@ extern void fsnotify_put_mark(struct fsnotify_mark *mark); | |||
| 479 | extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); | 495 | extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info); |
| 480 | extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); | 496 | extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info); |
| 481 | 497 | ||
| 482 | /* put here because inotify does some weird stuff when destroying watches */ | 498 | static inline void fsnotify_init_event(struct fsnotify_event *event, |
| 483 | extern void fsnotify_init_event(struct fsnotify_event *event, | 499 | struct inode *inode) |
| 484 | struct inode *to_tell, u32 mask); | 500 | { |
| 501 | INIT_LIST_HEAD(&event->list); | ||
| 502 | event->inode = inode; | ||
| 503 | } | ||
| 485 | 504 | ||
| 486 | #else | 505 | #else |
| 487 | 506 | ||
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h new file mode 100644 index 000000000000..3a91130a4fbd --- /dev/null +++ b/include/linux/generic-radix-tree.h | |||
| @@ -0,0 +1,231 @@ | |||
| 1 | #ifndef _LINUX_GENERIC_RADIX_TREE_H | ||
| 2 | #define _LINUX_GENERIC_RADIX_TREE_H | ||
| 3 | |||
| 4 | /** | ||
| 5 | * DOC: Generic radix trees/sparse arrays: | ||
| 6 | * | ||
| 7 | * Very simple and minimalistic, supporting arbitrary size entries up to | ||
| 8 | * PAGE_SIZE. | ||
| 9 | * | ||
| 10 | * A genradix is defined with the type it will store, like so: | ||
| 11 | * | ||
| 12 | * static GENRADIX(struct foo) foo_genradix; | ||
| 13 | * | ||
| 14 | * The main operations are: | ||
| 15 | * | ||
| 16 | * - genradix_init(radix) - initialize an empty genradix | ||
| 17 | * | ||
| 18 | * - genradix_free(radix) - free all memory owned by the genradix and | ||
| 19 | * reinitialize it | ||
| 20 | * | ||
| 21 | * - genradix_ptr(radix, idx) - gets a pointer to the entry at idx, returning | ||
| 22 | * NULL if that entry does not exist | ||
| 23 | * | ||
| 24 | * - genradix_ptr_alloc(radix, idx, gfp) - gets a pointer to an entry, | ||
| 25 | * allocating it if necessary | ||
| 26 | * | ||
| 27 | * - genradix_for_each(radix, iter, p) - iterate over each entry in a genradix | ||
| 28 | * | ||
| 29 | * The radix tree allocates one page of entries at a time, so entries may exist | ||
| 30 | * that were never explicitly allocated - they will be initialized to all | ||
| 31 | * zeroes. | ||
| 32 | * | ||
| 33 | * Internally, a genradix is just a radix tree of pages, and indexing works in | ||
| 34 | * terms of byte offsets. The wrappers in this header file use sizeof on the | ||
| 35 | * type the radix contains to calculate a byte offset from the index - see | ||
| 36 | * __idx_to_offset. | ||
| 37 | */ | ||
| 38 | |||
| 39 | #include <asm/page.h> | ||
| 40 | #include <linux/bug.h> | ||
| 41 | #include <linux/kernel.h> | ||
| 42 | #include <linux/log2.h> | ||
| 43 | |||
| 44 | struct genradix_root; | ||
| 45 | |||
| 46 | struct __genradix { | ||
| 47 | struct genradix_root __rcu *root; | ||
| 48 | }; | ||
| 49 | |||
| 50 | /* | ||
| 51 | * NOTE: currently, sizeof(_type) must not be larger than PAGE_SIZE: | ||
| 52 | */ | ||
| 53 | |||
| 54 | #define __GENRADIX_INITIALIZER \ | ||
| 55 | { \ | ||
| 56 | .tree = { \ | ||
| 57 | .root = NULL, \ | ||
| 58 | } \ | ||
| 59 | } | ||
| 60 | |||
| 61 | /* | ||
| 62 | * We use a 0 size array to stash the type we're storing without taking any | ||
| 63 | * space at runtime - then the various accessor macros can use typeof() to get | ||
| 64 | * to it for casts/sizeof - we also force the alignment so that storing a type | ||
| 65 | * with a ridiculous alignment doesn't blow up the alignment or size of the | ||
| 66 | * genradix. | ||
| 67 | */ | ||
| 68 | |||
| 69 | #define GENRADIX(_type) \ | ||
| 70 | struct { \ | ||
| 71 | struct __genradix tree; \ | ||
| 72 | _type type[0] __aligned(1); \ | ||
| 73 | } | ||
| 74 | |||
| 75 | #define DEFINE_GENRADIX(_name, _type) \ | ||
| 76 | GENRADIX(_type) _name = __GENRADIX_INITIALIZER | ||
| 77 | |||
| 78 | /** | ||
| 79 | * genradix_init - initialize a genradix | ||
| 80 | * @_radix: genradix to initialize | ||
| 81 | * | ||
| 82 | * Does not fail | ||
| 83 | */ | ||
| 84 | #define genradix_init(_radix) \ | ||
| 85 | do { \ | ||
| 86 | *(_radix) = (typeof(*_radix)) __GENRADIX_INITIALIZER; \ | ||
| 87 | } while (0) | ||
| 88 | |||
| 89 | void __genradix_free(struct __genradix *); | ||
| 90 | |||
| 91 | /** | ||
| 92 | * genradix_free: free all memory owned by a genradix | ||
| 93 | * @_radix: the genradix to free | ||
| 94 | * | ||
| 95 | * After freeing, @_radix will be reinitialized and empty | ||
| 96 | */ | ||
| 97 | #define genradix_free(_radix) __genradix_free(&(_radix)->tree) | ||
| 98 | |||
| 99 | static inline size_t __idx_to_offset(size_t idx, size_t obj_size) | ||
| 100 | { | ||
| 101 | if (__builtin_constant_p(obj_size)) | ||
| 102 | BUILD_BUG_ON(obj_size > PAGE_SIZE); | ||
| 103 | else | ||
| 104 | BUG_ON(obj_size > PAGE_SIZE); | ||
| 105 | |||
| 106 | if (!is_power_of_2(obj_size)) { | ||
| 107 | size_t objs_per_page = PAGE_SIZE / obj_size; | ||
| 108 | |||
| 109 | return (idx / objs_per_page) * PAGE_SIZE + | ||
| 110 | (idx % objs_per_page) * obj_size; | ||
| 111 | } else { | ||
| 112 | return idx * obj_size; | ||
| 113 | } | ||
| 114 | } | ||
| 115 | |||
| 116 | #define __genradix_cast(_radix) (typeof((_radix)->type[0]) *) | ||
| 117 | #define __genradix_obj_size(_radix) sizeof((_radix)->type[0]) | ||
| 118 | #define __genradix_idx_to_offset(_radix, _idx) \ | ||
| 119 | __idx_to_offset(_idx, __genradix_obj_size(_radix)) | ||
| 120 | |||
| 121 | void *__genradix_ptr(struct __genradix *, size_t); | ||
| 122 | |||
| 123 | /** | ||
| 124 | * genradix_ptr - get a pointer to a genradix entry | ||
| 125 | * @_radix: genradix to access | ||
| 126 | * @_idx: index to fetch | ||
| 127 | * | ||
| 128 | * Returns a pointer to entry at @_idx, or NULL if that entry does not exist. | ||
| 129 | */ | ||
| 130 | #define genradix_ptr(_radix, _idx) \ | ||
| 131 | (__genradix_cast(_radix) \ | ||
| 132 | __genradix_ptr(&(_radix)->tree, \ | ||
| 133 | __genradix_idx_to_offset(_radix, _idx))) | ||
| 134 | |||
| 135 | void *__genradix_ptr_alloc(struct __genradix *, size_t, gfp_t); | ||
| 136 | |||
| 137 | /** | ||
| 138 | * genradix_ptr_alloc - get a pointer to a genradix entry, allocating it | ||
| 139 | * if necessary | ||
| 140 | * @_radix: genradix to access | ||
| 141 | * @_idx: index to fetch | ||
| 142 | * @_gfp: gfp mask | ||
| 143 | * | ||
| 144 | * Returns a pointer to entry at @_idx, or NULL on allocation failure | ||
| 145 | */ | ||
| 146 | #define genradix_ptr_alloc(_radix, _idx, _gfp) \ | ||
| 147 | (__genradix_cast(_radix) \ | ||
| 148 | __genradix_ptr_alloc(&(_radix)->tree, \ | ||
| 149 | __genradix_idx_to_offset(_radix, _idx), \ | ||
| 150 | _gfp)) | ||
| 151 | |||
| 152 | struct genradix_iter { | ||
| 153 | size_t offset; | ||
| 154 | size_t pos; | ||
| 155 | }; | ||
| 156 | |||
| 157 | /** | ||
| 158 | * genradix_iter_init - initialize a genradix_iter | ||
| 159 | * @_radix: genradix that will be iterated over | ||
| 160 | * @_idx: index to start iterating from | ||
| 161 | */ | ||
| 162 | #define genradix_iter_init(_radix, _idx) \ | ||
| 163 | ((struct genradix_iter) { \ | ||
| 164 | .pos = (_idx), \ | ||
| 165 | .offset = __genradix_idx_to_offset((_radix), (_idx)),\ | ||
| 166 | }) | ||
| 167 | |||
| 168 | void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t); | ||
| 169 | |||
| 170 | /** | ||
| 171 | * genradix_iter_peek - get first entry at or above iterator's current | ||
| 172 | * position | ||
| 173 | * @_iter: a genradix_iter | ||
| 174 | * @_radix: genradix being iterated over | ||
| 175 | * | ||
| 176 | * If no more entries exist at or above @_iter's current position, returns NULL | ||
| 177 | */ | ||
| 178 | #define genradix_iter_peek(_iter, _radix) \ | ||
| 179 | (__genradix_cast(_radix) \ | ||
| 180 | __genradix_iter_peek(_iter, &(_radix)->tree, \ | ||
| 181 | PAGE_SIZE / __genradix_obj_size(_radix))) | ||
| 182 | |||
| 183 | static inline void __genradix_iter_advance(struct genradix_iter *iter, | ||
| 184 | size_t obj_size) | ||
| 185 | { | ||
| 186 | iter->offset += obj_size; | ||
| 187 | |||
| 188 | if (!is_power_of_2(obj_size) && | ||
| 189 | (iter->offset & (PAGE_SIZE - 1)) + obj_size > PAGE_SIZE) | ||
| 190 | iter->offset = round_up(iter->offset, PAGE_SIZE); | ||
| 191 | |||
| 192 | iter->pos++; | ||
| 193 | } | ||
| 194 | |||
| 195 | #define genradix_iter_advance(_iter, _radix) \ | ||
| 196 | __genradix_iter_advance(_iter, __genradix_obj_size(_radix)) | ||
| 197 | |||
| 198 | #define genradix_for_each_from(_radix, _iter, _p, _start) \ | ||
| 199 | for (_iter = genradix_iter_init(_radix, _start); \ | ||
| 200 | (_p = genradix_iter_peek(&_iter, _radix)) != NULL; \ | ||
| 201 | genradix_iter_advance(&_iter, _radix)) | ||
| 202 | |||
| 203 | /** | ||
| 204 | * genradix_for_each - iterate over entry in a genradix | ||
| 205 | * @_radix: genradix to iterate over | ||
| 206 | * @_iter: a genradix_iter to track current position | ||
| 207 | * @_p: pointer to genradix entry type | ||
| 208 | * | ||
| 209 | * On every iteration, @_p will point to the current entry, and @_iter.pos | ||
| 210 | * will be the current entry's index. | ||
| 211 | */ | ||
| 212 | #define genradix_for_each(_radix, _iter, _p) \ | ||
| 213 | genradix_for_each_from(_radix, _iter, _p, 0) | ||
| 214 | |||
| 215 | int __genradix_prealloc(struct __genradix *, size_t, gfp_t); | ||
| 216 | |||
| 217 | /** | ||
| 218 | * genradix_prealloc - preallocate entries in a generic radix tree | ||
| 219 | * @_radix: genradix to preallocate | ||
| 220 | * @_nr: number of entries to preallocate | ||
| 221 | * @_gfp: gfp mask | ||
| 222 | * | ||
| 223 | * Returns 0 on success, -ENOMEM on failure | ||
| 224 | */ | ||
| 225 | #define genradix_prealloc(_radix, _nr, _gfp) \ | ||
| 226 | __genradix_prealloc(&(_radix)->tree, \ | ||
| 227 | __genradix_idx_to_offset(_radix, _nr + 1),\ | ||
| 228 | _gfp) | ||
| 229 | |||
| 230 | |||
| 231 | #endif /* _LINUX_GENERIC_RADIX_TREE_H */ | ||
diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 5f5e25fd6149..fdab7de7490d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h | |||
| @@ -24,21 +24,21 @@ struct vm_area_struct; | |||
| 24 | #define ___GFP_HIGH 0x20u | 24 | #define ___GFP_HIGH 0x20u |
| 25 | #define ___GFP_IO 0x40u | 25 | #define ___GFP_IO 0x40u |
| 26 | #define ___GFP_FS 0x80u | 26 | #define ___GFP_FS 0x80u |
| 27 | #define ___GFP_WRITE 0x100u | 27 | #define ___GFP_ZERO 0x100u |
| 28 | #define ___GFP_NOWARN 0x200u | 28 | #define ___GFP_ATOMIC 0x200u |
| 29 | #define ___GFP_RETRY_MAYFAIL 0x400u | 29 | #define ___GFP_DIRECT_RECLAIM 0x400u |
| 30 | #define ___GFP_NOFAIL 0x800u | 30 | #define ___GFP_KSWAPD_RECLAIM 0x800u |
| 31 | #define ___GFP_NORETRY 0x1000u | 31 | #define ___GFP_WRITE 0x1000u |
| 32 | #define ___GFP_MEMALLOC 0x2000u | 32 | #define ___GFP_NOWARN 0x2000u |
| 33 | #define ___GFP_COMP 0x4000u | 33 | #define ___GFP_RETRY_MAYFAIL 0x4000u |
| 34 | #define ___GFP_ZERO 0x8000u | 34 | #define ___GFP_NOFAIL 0x8000u |
| 35 | #define ___GFP_NOMEMALLOC 0x10000u | 35 | #define ___GFP_NORETRY 0x10000u |
| 36 | #define ___GFP_HARDWALL 0x20000u | 36 | #define ___GFP_MEMALLOC 0x20000u |
| 37 | #define ___GFP_THISNODE 0x40000u | 37 | #define ___GFP_COMP 0x40000u |
| 38 | #define ___GFP_ATOMIC 0x80000u | 38 | #define ___GFP_NOMEMALLOC 0x80000u |
| 39 | #define ___GFP_ACCOUNT 0x100000u | 39 | #define ___GFP_HARDWALL 0x100000u |
| 40 | #define ___GFP_DIRECT_RECLAIM 0x200000u | 40 | #define ___GFP_THISNODE 0x200000u |
| 41 | #define ___GFP_KSWAPD_RECLAIM 0x400000u | 41 | #define ___GFP_ACCOUNT 0x400000u |
| 42 | #ifdef CONFIG_LOCKDEP | 42 | #ifdef CONFIG_LOCKDEP |
| 43 | #define ___GFP_NOLOCKDEP 0x800000u | 43 | #define ___GFP_NOLOCKDEP 0x800000u |
| 44 | #else | 44 | #else |
diff --git a/include/linux/gnss.h b/include/linux/gnss.h index 43546977098c..36968a0f33e8 100644 --- a/include/linux/gnss.h +++ b/include/linux/gnss.h | |||
| @@ -22,6 +22,7 @@ enum gnss_type { | |||
| 22 | GNSS_TYPE_NMEA = 0, | 22 | GNSS_TYPE_NMEA = 0, |
| 23 | GNSS_TYPE_SIRF, | 23 | GNSS_TYPE_SIRF, |
| 24 | GNSS_TYPE_UBX, | 24 | GNSS_TYPE_UBX, |
| 25 | GNSS_TYPE_MTK, | ||
| 25 | 26 | ||
| 26 | GNSS_TYPE_COUNT | 27 | GNSS_TYPE_COUNT |
| 27 | }; | 28 | }; |
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h index 07cddbf45186..01497910f023 100644 --- a/include/linux/gpio/driver.h +++ b/include/linux/gpio/driver.h | |||
| @@ -472,6 +472,11 @@ int gpiochip_irq_map(struct irq_domain *d, unsigned int irq, | |||
| 472 | irq_hw_number_t hwirq); | 472 | irq_hw_number_t hwirq); |
| 473 | void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); | 473 | void gpiochip_irq_unmap(struct irq_domain *d, unsigned int irq); |
| 474 | 474 | ||
| 475 | int gpiochip_irq_domain_activate(struct irq_domain *domain, | ||
| 476 | struct irq_data *data, bool reserve); | ||
| 477 | void gpiochip_irq_domain_deactivate(struct irq_domain *domain, | ||
| 478 | struct irq_data *data); | ||
| 479 | |||
| 475 | void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, | 480 | void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip, |
| 476 | struct irq_chip *irqchip, | 481 | struct irq_chip *irqchip, |
| 477 | unsigned int parent_irq, | 482 | unsigned int parent_irq, |
diff --git a/include/linux/gpio/machine.h b/include/linux/gpio/machine.h index daa44eac9241..69673be10213 100644 --- a/include/linux/gpio/machine.h +++ b/include/linux/gpio/machine.h | |||
| @@ -12,6 +12,8 @@ enum gpio_lookup_flags { | |||
| 12 | GPIO_OPEN_SOURCE = (1 << 2), | 12 | GPIO_OPEN_SOURCE = (1 << 2), |
| 13 | GPIO_PERSISTENT = (0 << 3), | 13 | GPIO_PERSISTENT = (0 << 3), |
| 14 | GPIO_TRANSITORY = (1 << 3), | 14 | GPIO_TRANSITORY = (1 << 3), |
| 15 | GPIO_PULL_UP = (1 << 4), | ||
| 16 | GPIO_PULL_DOWN = (1 << 5), | ||
| 15 | }; | 17 | }; |
| 16 | 18 | ||
| 17 | /** | 19 | /** |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 0fbbcdf0c178..da0af631ded5 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -60,8 +60,14 @@ extern void irq_enter(void); | |||
| 60 | */ | 60 | */ |
| 61 | extern void irq_exit(void); | 61 | extern void irq_exit(void); |
| 62 | 62 | ||
| 63 | #ifndef arch_nmi_enter | ||
| 64 | #define arch_nmi_enter() do { } while (0) | ||
| 65 | #define arch_nmi_exit() do { } while (0) | ||
| 66 | #endif | ||
| 67 | |||
| 63 | #define nmi_enter() \ | 68 | #define nmi_enter() \ |
| 64 | do { \ | 69 | do { \ |
| 70 | arch_nmi_enter(); \ | ||
| 65 | printk_nmi_enter(); \ | 71 | printk_nmi_enter(); \ |
| 66 | lockdep_off(); \ | 72 | lockdep_off(); \ |
| 67 | ftrace_nmi_enter(); \ | 73 | ftrace_nmi_enter(); \ |
| @@ -80,6 +86,7 @@ extern void irq_exit(void); | |||
| 80 | ftrace_nmi_exit(); \ | 86 | ftrace_nmi_exit(); \ |
| 81 | lockdep_on(); \ | 87 | lockdep_on(); \ |
| 82 | printk_nmi_exit(); \ | 88 | printk_nmi_exit(); \ |
| 89 | arch_nmi_exit(); \ | ||
| 83 | } while (0) | 90 | } while (0) |
| 84 | 91 | ||
| 85 | #endif /* LINUX_HARDIRQ_H */ | 92 | #endif /* LINUX_HARDIRQ_H */ |
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h index d2bacf502429..927ad6451105 100644 --- a/include/linux/hdmi.h +++ b/include/linux/hdmi.h | |||
| @@ -27,6 +27,21 @@ | |||
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/device.h> | 28 | #include <linux/device.h> |
| 29 | 29 | ||
| 30 | enum hdmi_packet_type { | ||
| 31 | HDMI_PACKET_TYPE_NULL = 0x00, | ||
| 32 | HDMI_PACKET_TYPE_AUDIO_CLOCK_REGEN = 0x01, | ||
| 33 | HDMI_PACKET_TYPE_AUDIO_SAMPLE = 0x02, | ||
| 34 | HDMI_PACKET_TYPE_GENERAL_CONTROL = 0x03, | ||
| 35 | HDMI_PACKET_TYPE_ACP = 0x04, | ||
| 36 | HDMI_PACKET_TYPE_ISRC1 = 0x05, | ||
| 37 | HDMI_PACKET_TYPE_ISRC2 = 0x06, | ||
| 38 | HDMI_PACKET_TYPE_ONE_BIT_AUDIO_SAMPLE = 0x07, | ||
| 39 | HDMI_PACKET_TYPE_DST_AUDIO = 0x08, | ||
| 40 | HDMI_PACKET_TYPE_HBR_AUDIO_STREAM = 0x09, | ||
| 41 | HDMI_PACKET_TYPE_GAMUT_METADATA = 0x0a, | ||
| 42 | /* + enum hdmi_infoframe_type */ | ||
| 43 | }; | ||
| 44 | |||
| 30 | enum hdmi_infoframe_type { | 45 | enum hdmi_infoframe_type { |
| 31 | HDMI_INFOFRAME_TYPE_VENDOR = 0x81, | 46 | HDMI_INFOFRAME_TYPE_VENDOR = 0x81, |
| 32 | HDMI_INFOFRAME_TYPE_AVI = 0x82, | 47 | HDMI_INFOFRAME_TYPE_AVI = 0x82, |
diff --git a/include/linux/hid-debug.h b/include/linux/hid-debug.h index 8663f216c563..2d6100edf204 100644 --- a/include/linux/hid-debug.h +++ b/include/linux/hid-debug.h | |||
| @@ -24,7 +24,10 @@ | |||
| 24 | 24 | ||
| 25 | #ifdef CONFIG_DEBUG_FS | 25 | #ifdef CONFIG_DEBUG_FS |
| 26 | 26 | ||
| 27 | #include <linux/kfifo.h> | ||
| 28 | |||
| 27 | #define HID_DEBUG_BUFSIZE 512 | 29 | #define HID_DEBUG_BUFSIZE 512 |
| 30 | #define HID_DEBUG_FIFOSIZE 512 | ||
| 28 | 31 | ||
| 29 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); | 32 | void hid_dump_input(struct hid_device *, struct hid_usage *, __s32); |
| 30 | void hid_dump_report(struct hid_device *, int , u8 *, int); | 33 | void hid_dump_report(struct hid_device *, int , u8 *, int); |
| @@ -37,11 +40,8 @@ void hid_debug_init(void); | |||
| 37 | void hid_debug_exit(void); | 40 | void hid_debug_exit(void); |
| 38 | void hid_debug_event(struct hid_device *, char *); | 41 | void hid_debug_event(struct hid_device *, char *); |
| 39 | 42 | ||
| 40 | |||
| 41 | struct hid_debug_list { | 43 | struct hid_debug_list { |
| 42 | char *hid_debug_buf; | 44 | DECLARE_KFIFO_PTR(hid_debug_fifo, char); |
| 43 | int head; | ||
| 44 | int tail; | ||
| 45 | struct fasync_struct *fasync; | 45 | struct fasync_struct *fasync; |
| 46 | struct hid_device *hdev; | 46 | struct hid_device *hdev; |
| 47 | struct list_head node; | 47 | struct list_head node; |
| @@ -64,4 +64,3 @@ struct hid_debug_list { | |||
| 64 | #endif | 64 | #endif |
| 65 | 65 | ||
| 66 | #endif | 66 | #endif |
| 67 | |||
diff --git a/include/linux/hid.h b/include/linux/hid.h index d99287327ef2..f9707d1dcb58 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h | |||
| @@ -430,7 +430,7 @@ struct hid_local { | |||
| 430 | */ | 430 | */ |
| 431 | 431 | ||
| 432 | struct hid_collection { | 432 | struct hid_collection { |
| 433 | struct hid_collection *parent; | 433 | int parent_idx; /* device->collection */ |
| 434 | unsigned type; | 434 | unsigned type; |
| 435 | unsigned usage; | 435 | unsigned usage; |
| 436 | unsigned level; | 436 | unsigned level; |
| @@ -658,7 +658,6 @@ struct hid_parser { | |||
| 658 | unsigned int *collection_stack; | 658 | unsigned int *collection_stack; |
| 659 | unsigned int collection_stack_ptr; | 659 | unsigned int collection_stack_ptr; |
| 660 | unsigned int collection_stack_size; | 660 | unsigned int collection_stack_size; |
| 661 | struct hid_collection *active_collection; | ||
| 662 | struct hid_device *device; | 661 | struct hid_device *device; |
| 663 | unsigned int scan_flags; | 662 | unsigned int scan_flags; |
| 664 | }; | 663 | }; |
diff --git a/include/linux/hmm.h b/include/linux/hmm.h index 66f9ebbb1df3..ad50b7b4f141 100644 --- a/include/linux/hmm.h +++ b/include/linux/hmm.h | |||
| @@ -468,7 +468,7 @@ struct hmm_devmem_ops { | |||
| 468 | * Note that mmap semaphore is held in read mode at least when this | 468 | * Note that mmap semaphore is held in read mode at least when this |
| 469 | * callback occurs, hence the vma is valid upon callback entry. | 469 | * callback occurs, hence the vma is valid upon callback entry. |
| 470 | */ | 470 | */ |
| 471 | int (*fault)(struct hmm_devmem *devmem, | 471 | vm_fault_t (*fault)(struct hmm_devmem *devmem, |
| 472 | struct vm_area_struct *vma, | 472 | struct vm_area_struct *vma, |
| 473 | unsigned long addr, | 473 | unsigned long addr, |
| 474 | const struct page *page, | 474 | const struct page *page, |
| @@ -511,7 +511,7 @@ struct hmm_devmem_ops { | |||
| 511 | * chunk, as an optimization. It must, however, prioritize the faulting address | 511 | * chunk, as an optimization. It must, however, prioritize the faulting address |
| 512 | * over all the others. | 512 | * over all the others. |
| 513 | */ | 513 | */ |
| 514 | typedef int (*dev_page_fault_t)(struct vm_area_struct *vma, | 514 | typedef vm_fault_t (*dev_page_fault_t)(struct vm_area_struct *vma, |
| 515 | unsigned long addr, | 515 | unsigned long addr, |
| 516 | const struct page *page, | 516 | const struct page *page, |
| 517 | unsigned int flags, | 517 | unsigned int flags, |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 087fd5f48c91..11943b60f208 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
| @@ -203,7 +203,6 @@ static inline void hugetlb_show_meminfo(void) | |||
| 203 | #define pud_huge(x) 0 | 203 | #define pud_huge(x) 0 |
| 204 | #define is_hugepage_only_range(mm, addr, len) 0 | 204 | #define is_hugepage_only_range(mm, addr, len) 0 |
| 205 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) | 205 | #define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; }) |
| 206 | #define hugetlb_fault(mm, vma, addr, flags) ({ BUG(); 0; }) | ||
| 207 | #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ | 206 | #define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \ |
| 208 | src_addr, pagep) ({ BUG(); 0; }) | 207 | src_addr, pagep) ({ BUG(); 0; }) |
| 209 | #define huge_pte_offset(mm, address, sz) 0 | 208 | #define huge_pte_offset(mm, address, sz) 0 |
| @@ -234,6 +233,13 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb, | |||
| 234 | { | 233 | { |
| 235 | BUG(); | 234 | BUG(); |
| 236 | } | 235 | } |
| 236 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, | ||
| 237 | struct vm_area_struct *vma, unsigned long address, | ||
| 238 | unsigned int flags) | ||
| 239 | { | ||
| 240 | BUG(); | ||
| 241 | return 0; | ||
| 242 | } | ||
| 237 | 243 | ||
| 238 | #endif /* !CONFIG_HUGETLB_PAGE */ | 244 | #endif /* !CONFIG_HUGETLB_PAGE */ |
| 239 | /* | 245 | /* |
| @@ -371,6 +377,8 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, | |||
| 371 | nodemask_t *nmask); | 377 | nodemask_t *nmask); |
| 372 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, | 378 | struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, |
| 373 | unsigned long address); | 379 | unsigned long address); |
| 380 | struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask, | ||
| 381 | int nid, nodemask_t *nmask); | ||
| 374 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, | 382 | int huge_add_to_page_cache(struct page *page, struct address_space *mapping, |
| 375 | pgoff_t idx); | 383 | pgoff_t idx); |
| 376 | 384 | ||
| @@ -493,17 +501,54 @@ static inline pgoff_t basepage_index(struct page *page) | |||
| 493 | extern int dissolve_free_huge_page(struct page *page); | 501 | extern int dissolve_free_huge_page(struct page *page); |
| 494 | extern int dissolve_free_huge_pages(unsigned long start_pfn, | 502 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
| 495 | unsigned long end_pfn); | 503 | unsigned long end_pfn); |
| 496 | static inline bool hugepage_migration_supported(struct hstate *h) | 504 | |
| 497 | { | ||
| 498 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION | 505 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
| 506 | #ifndef arch_hugetlb_migration_supported | ||
| 507 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
| 508 | { | ||
| 499 | if ((huge_page_shift(h) == PMD_SHIFT) || | 509 | if ((huge_page_shift(h) == PMD_SHIFT) || |
| 500 | (huge_page_shift(h) == PGDIR_SHIFT)) | 510 | (huge_page_shift(h) == PUD_SHIFT) || |
| 511 | (huge_page_shift(h) == PGDIR_SHIFT)) | ||
| 501 | return true; | 512 | return true; |
| 502 | else | 513 | else |
| 503 | return false; | 514 | return false; |
| 515 | } | ||
| 516 | #endif | ||
| 504 | #else | 517 | #else |
| 518 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) | ||
| 519 | { | ||
| 505 | return false; | 520 | return false; |
| 521 | } | ||
| 506 | #endif | 522 | #endif |
| 523 | |||
| 524 | static inline bool hugepage_migration_supported(struct hstate *h) | ||
| 525 | { | ||
| 526 | return arch_hugetlb_migration_supported(h); | ||
| 527 | } | ||
| 528 | |||
| 529 | /* | ||
| 530 | * Movability check is different as compared to migration check. | ||
| 531 | * It determines whether or not a huge page should be placed on | ||
| 532 | * movable zone or not. Movability of any huge page should be | ||
| 533 | * required only if huge page size is supported for migration. | ||
| 534 | * There wont be any reason for the huge page to be movable if | ||
| 535 | * it is not migratable to start with. Also the size of the huge | ||
| 536 | * page should be large enough to be placed under a movable zone | ||
| 537 | * and still feasible enough to be migratable. Just the presence | ||
| 538 | * in movable zone does not make the migration feasible. | ||
| 539 | * | ||
| 540 | * So even though large huge page sizes like the gigantic ones | ||
| 541 | * are migratable they should not be movable because its not | ||
| 542 | * feasible to migrate them from movable zone. | ||
| 543 | */ | ||
| 544 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
| 545 | { | ||
| 546 | if (!hugepage_migration_supported(h)) | ||
| 547 | return false; | ||
| 548 | |||
| 549 | if (hstate_is_gigantic(h)) | ||
| 550 | return false; | ||
| 551 | return true; | ||
| 507 | } | 552 | } |
| 508 | 553 | ||
| 509 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 554 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| @@ -543,6 +588,26 @@ static inline void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr | |||
| 543 | set_huge_pte_at(mm, addr, ptep, pte); | 588 | set_huge_pte_at(mm, addr, ptep, pte); |
| 544 | } | 589 | } |
| 545 | #endif | 590 | #endif |
| 591 | |||
| 592 | #ifndef huge_ptep_modify_prot_start | ||
| 593 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start | ||
| 594 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, | ||
| 595 | unsigned long addr, pte_t *ptep) | ||
| 596 | { | ||
| 597 | return huge_ptep_get_and_clear(vma->vm_mm, addr, ptep); | ||
| 598 | } | ||
| 599 | #endif | ||
| 600 | |||
| 601 | #ifndef huge_ptep_modify_prot_commit | ||
| 602 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit | ||
| 603 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, | ||
| 604 | unsigned long addr, pte_t *ptep, | ||
| 605 | pte_t old_pte, pte_t pte) | ||
| 606 | { | ||
| 607 | set_huge_pte_at(vma->vm_mm, addr, ptep, pte); | ||
| 608 | } | ||
| 609 | #endif | ||
| 610 | |||
| 546 | #else /* CONFIG_HUGETLB_PAGE */ | 611 | #else /* CONFIG_HUGETLB_PAGE */ |
| 547 | struct hstate {}; | 612 | struct hstate {}; |
| 548 | #define alloc_huge_page(v, a, r) NULL | 613 | #define alloc_huge_page(v, a, r) NULL |
| @@ -602,6 +667,11 @@ static inline bool hugepage_migration_supported(struct hstate *h) | |||
| 602 | return false; | 667 | return false; |
| 603 | } | 668 | } |
| 604 | 669 | ||
| 670 | static inline bool hugepage_movable_supported(struct hstate *h) | ||
| 671 | { | ||
| 672 | return false; | ||
| 673 | } | ||
| 674 | |||
| 605 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, | 675 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
| 606 | struct mm_struct *mm, pte_t *pte) | 676 | struct mm_struct *mm, pte_t *pte) |
| 607 | { | 677 | { |
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h index f0885cc01db6..64698ec8f2ac 100644 --- a/include/linux/hyperv.h +++ b/include/linux/hyperv.h | |||
| @@ -222,8 +222,8 @@ static inline u32 hv_get_avail_to_write_percent( | |||
| 222 | * struct contains the fundamental information about an offer. | 222 | * struct contains the fundamental information about an offer. |
| 223 | */ | 223 | */ |
| 224 | struct vmbus_channel_offer { | 224 | struct vmbus_channel_offer { |
| 225 | uuid_le if_type; | 225 | guid_t if_type; |
| 226 | uuid_le if_instance; | 226 | guid_t if_instance; |
| 227 | 227 | ||
| 228 | /* | 228 | /* |
| 229 | * These two fields are not currently used. | 229 | * These two fields are not currently used. |
| @@ -614,8 +614,8 @@ struct vmbus_channel_initiate_contact { | |||
| 614 | /* Hyper-V socket: guest's connect()-ing to host */ | 614 | /* Hyper-V socket: guest's connect()-ing to host */ |
| 615 | struct vmbus_channel_tl_connect_request { | 615 | struct vmbus_channel_tl_connect_request { |
| 616 | struct vmbus_channel_message_header header; | 616 | struct vmbus_channel_message_header header; |
| 617 | uuid_le guest_endpoint_id; | 617 | guid_t guest_endpoint_id; |
| 618 | uuid_le host_service_id; | 618 | guid_t host_service_id; |
| 619 | } __packed; | 619 | } __packed; |
| 620 | 620 | ||
| 621 | struct vmbus_channel_version_response { | 621 | struct vmbus_channel_version_response { |
| @@ -714,7 +714,7 @@ enum vmbus_device_type { | |||
| 714 | 714 | ||
| 715 | struct vmbus_device { | 715 | struct vmbus_device { |
| 716 | u16 dev_type; | 716 | u16 dev_type; |
| 717 | uuid_le guid; | 717 | guid_t guid; |
| 718 | bool perf_device; | 718 | bool perf_device; |
| 719 | }; | 719 | }; |
| 720 | 720 | ||
| @@ -751,6 +751,19 @@ struct vmbus_channel { | |||
| 751 | u64 interrupts; /* Host to Guest interrupts */ | 751 | u64 interrupts; /* Host to Guest interrupts */ |
| 752 | u64 sig_events; /* Guest to Host events */ | 752 | u64 sig_events; /* Guest to Host events */ |
| 753 | 753 | ||
| 754 | /* | ||
| 755 | * Guest to host interrupts caused by the outbound ring buffer changing | ||
| 756 | * from empty to not empty. | ||
| 757 | */ | ||
| 758 | u64 intr_out_empty; | ||
| 759 | |||
| 760 | /* | ||
| 761 | * Indicates that a full outbound ring buffer was encountered. The flag | ||
| 762 | * is set to true when a full outbound ring buffer is encountered and | ||
| 763 | * set to false when a write to the outbound ring buffer is completed. | ||
| 764 | */ | ||
| 765 | bool out_full_flag; | ||
| 766 | |||
| 754 | /* Channel callback's invoked in softirq context */ | 767 | /* Channel callback's invoked in softirq context */ |
| 755 | struct tasklet_struct callback_event; | 768 | struct tasklet_struct callback_event; |
| 756 | void (*onchannel_callback)(void *context); | 769 | void (*onchannel_callback)(void *context); |
| @@ -903,6 +916,24 @@ struct vmbus_channel { | |||
| 903 | * vmbus_connection.work_queue and hang: see vmbus_process_offer(). | 916 | * vmbus_connection.work_queue and hang: see vmbus_process_offer(). |
| 904 | */ | 917 | */ |
| 905 | struct work_struct add_channel_work; | 918 | struct work_struct add_channel_work; |
| 919 | |||
| 920 | /* | ||
| 921 | * Guest to host interrupts caused by the inbound ring buffer changing | ||
| 922 | * from full to not full while a packet is waiting. | ||
| 923 | */ | ||
| 924 | u64 intr_in_full; | ||
| 925 | |||
| 926 | /* | ||
| 927 | * The total number of write operations that encountered a full | ||
| 928 | * outbound ring buffer. | ||
| 929 | */ | ||
| 930 | u64 out_full_total; | ||
| 931 | |||
| 932 | /* | ||
| 933 | * The number of write operations that were the first to encounter a | ||
| 934 | * full outbound ring buffer. | ||
| 935 | */ | ||
| 936 | u64 out_full_first; | ||
| 906 | }; | 937 | }; |
| 907 | 938 | ||
| 908 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) | 939 | static inline bool is_hvsock_channel(const struct vmbus_channel *c) |
| @@ -936,6 +967,21 @@ static inline void *get_per_channel_state(struct vmbus_channel *c) | |||
| 936 | static inline void set_channel_pending_send_size(struct vmbus_channel *c, | 967 | static inline void set_channel_pending_send_size(struct vmbus_channel *c, |
| 937 | u32 size) | 968 | u32 size) |
| 938 | { | 969 | { |
| 970 | unsigned long flags; | ||
| 971 | |||
| 972 | if (size) { | ||
| 973 | spin_lock_irqsave(&c->outbound.ring_lock, flags); | ||
| 974 | ++c->out_full_total; | ||
| 975 | |||
| 976 | if (!c->out_full_flag) { | ||
| 977 | ++c->out_full_first; | ||
| 978 | c->out_full_flag = true; | ||
| 979 | } | ||
| 980 | spin_unlock_irqrestore(&c->outbound.ring_lock, flags); | ||
| 981 | } else { | ||
| 982 | c->out_full_flag = false; | ||
| 983 | } | ||
| 984 | |||
| 939 | c->outbound.ring_buffer->pending_send_sz = size; | 985 | c->outbound.ring_buffer->pending_send_sz = size; |
| 940 | } | 986 | } |
| 941 | 987 | ||
| @@ -1096,7 +1142,7 @@ struct hv_driver { | |||
| 1096 | bool hvsock; | 1142 | bool hvsock; |
| 1097 | 1143 | ||
| 1098 | /* the device type supported by this driver */ | 1144 | /* the device type supported by this driver */ |
| 1099 | uuid_le dev_type; | 1145 | guid_t dev_type; |
| 1100 | const struct hv_vmbus_device_id *id_table; | 1146 | const struct hv_vmbus_device_id *id_table; |
| 1101 | 1147 | ||
| 1102 | struct device_driver driver; | 1148 | struct device_driver driver; |
| @@ -1116,10 +1162,10 @@ struct hv_driver { | |||
| 1116 | /* Base device object */ | 1162 | /* Base device object */ |
| 1117 | struct hv_device { | 1163 | struct hv_device { |
| 1118 | /* the device type id of this device */ | 1164 | /* the device type id of this device */ |
| 1119 | uuid_le dev_type; | 1165 | guid_t dev_type; |
| 1120 | 1166 | ||
| 1121 | /* the device instance id of this device */ | 1167 | /* the device instance id of this device */ |
| 1122 | uuid_le dev_instance; | 1168 | guid_t dev_instance; |
| 1123 | u16 vendor_id; | 1169 | u16 vendor_id; |
| 1124 | u16 device_id; | 1170 | u16 device_id; |
| 1125 | 1171 | ||
| @@ -1159,8 +1205,9 @@ struct hv_ring_buffer_debug_info { | |||
| 1159 | u32 bytes_avail_towrite; | 1205 | u32 bytes_avail_towrite; |
| 1160 | }; | 1206 | }; |
| 1161 | 1207 | ||
| 1162 | void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, | 1208 | |
| 1163 | struct hv_ring_buffer_debug_info *debug_info); | 1209 | int hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info, |
| 1210 | struct hv_ring_buffer_debug_info *debug_info); | ||
| 1164 | 1211 | ||
| 1165 | /* Vmbus interface */ | 1212 | /* Vmbus interface */ |
| 1166 | #define vmbus_driver_register(driver) \ | 1213 | #define vmbus_driver_register(driver) \ |
| @@ -1187,102 +1234,102 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); | |||
| 1187 | * {f8615163-df3e-46c5-913f-f2d2f965ed0e} | 1234 | * {f8615163-df3e-46c5-913f-f2d2f965ed0e} |
| 1188 | */ | 1235 | */ |
| 1189 | #define HV_NIC_GUID \ | 1236 | #define HV_NIC_GUID \ |
| 1190 | .guid = UUID_LE(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ | 1237 | .guid = GUID_INIT(0xf8615163, 0xdf3e, 0x46c5, 0x91, 0x3f, \ |
| 1191 | 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) | 1238 | 0xf2, 0xd2, 0xf9, 0x65, 0xed, 0x0e) |
| 1192 | 1239 | ||
| 1193 | /* | 1240 | /* |
| 1194 | * IDE GUID | 1241 | * IDE GUID |
| 1195 | * {32412632-86cb-44a2-9b5c-50d1417354f5} | 1242 | * {32412632-86cb-44a2-9b5c-50d1417354f5} |
| 1196 | */ | 1243 | */ |
| 1197 | #define HV_IDE_GUID \ | 1244 | #define HV_IDE_GUID \ |
| 1198 | .guid = UUID_LE(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ | 1245 | .guid = GUID_INIT(0x32412632, 0x86cb, 0x44a2, 0x9b, 0x5c, \ |
| 1199 | 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) | 1246 | 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5) |
| 1200 | 1247 | ||
| 1201 | /* | 1248 | /* |
| 1202 | * SCSI GUID | 1249 | * SCSI GUID |
| 1203 | * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} | 1250 | * {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} |
| 1204 | */ | 1251 | */ |
| 1205 | #define HV_SCSI_GUID \ | 1252 | #define HV_SCSI_GUID \ |
| 1206 | .guid = UUID_LE(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ | 1253 | .guid = GUID_INIT(0xba6163d9, 0x04a1, 0x4d29, 0xb6, 0x05, \ |
| 1207 | 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) | 1254 | 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f) |
| 1208 | 1255 | ||
| 1209 | /* | 1256 | /* |
| 1210 | * Shutdown GUID | 1257 | * Shutdown GUID |
| 1211 | * {0e0b6031-5213-4934-818b-38d90ced39db} | 1258 | * {0e0b6031-5213-4934-818b-38d90ced39db} |
| 1212 | */ | 1259 | */ |
| 1213 | #define HV_SHUTDOWN_GUID \ | 1260 | #define HV_SHUTDOWN_GUID \ |
| 1214 | .guid = UUID_LE(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ | 1261 | .guid = GUID_INIT(0x0e0b6031, 0x5213, 0x4934, 0x81, 0x8b, \ |
| 1215 | 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) | 1262 | 0x38, 0xd9, 0x0c, 0xed, 0x39, 0xdb) |
| 1216 | 1263 | ||
| 1217 | /* | 1264 | /* |
| 1218 | * Time Synch GUID | 1265 | * Time Synch GUID |
| 1219 | * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} | 1266 | * {9527E630-D0AE-497b-ADCE-E80AB0175CAF} |
| 1220 | */ | 1267 | */ |
| 1221 | #define HV_TS_GUID \ | 1268 | #define HV_TS_GUID \ |
| 1222 | .guid = UUID_LE(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ | 1269 | .guid = GUID_INIT(0x9527e630, 0xd0ae, 0x497b, 0xad, 0xce, \ |
| 1223 | 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) | 1270 | 0xe8, 0x0a, 0xb0, 0x17, 0x5c, 0xaf) |
| 1224 | 1271 | ||
| 1225 | /* | 1272 | /* |
| 1226 | * Heartbeat GUID | 1273 | * Heartbeat GUID |
| 1227 | * {57164f39-9115-4e78-ab55-382f3bd5422d} | 1274 | * {57164f39-9115-4e78-ab55-382f3bd5422d} |
| 1228 | */ | 1275 | */ |
| 1229 | #define HV_HEART_BEAT_GUID \ | 1276 | #define HV_HEART_BEAT_GUID \ |
| 1230 | .guid = UUID_LE(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ | 1277 | .guid = GUID_INIT(0x57164f39, 0x9115, 0x4e78, 0xab, 0x55, \ |
| 1231 | 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) | 1278 | 0x38, 0x2f, 0x3b, 0xd5, 0x42, 0x2d) |
| 1232 | 1279 | ||
| 1233 | /* | 1280 | /* |
| 1234 | * KVP GUID | 1281 | * KVP GUID |
| 1235 | * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} | 1282 | * {a9a0f4e7-5a45-4d96-b827-8a841e8c03e6} |
| 1236 | */ | 1283 | */ |
| 1237 | #define HV_KVP_GUID \ | 1284 | #define HV_KVP_GUID \ |
| 1238 | .guid = UUID_LE(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ | 1285 | .guid = GUID_INIT(0xa9a0f4e7, 0x5a45, 0x4d96, 0xb8, 0x27, \ |
| 1239 | 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) | 1286 | 0x8a, 0x84, 0x1e, 0x8c, 0x03, 0xe6) |
| 1240 | 1287 | ||
| 1241 | /* | 1288 | /* |
| 1242 | * Dynamic memory GUID | 1289 | * Dynamic memory GUID |
| 1243 | * {525074dc-8985-46e2-8057-a307dc18a502} | 1290 | * {525074dc-8985-46e2-8057-a307dc18a502} |
| 1244 | */ | 1291 | */ |
| 1245 | #define HV_DM_GUID \ | 1292 | #define HV_DM_GUID \ |
| 1246 | .guid = UUID_LE(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ | 1293 | .guid = GUID_INIT(0x525074dc, 0x8985, 0x46e2, 0x80, 0x57, \ |
| 1247 | 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) | 1294 | 0xa3, 0x07, 0xdc, 0x18, 0xa5, 0x02) |
| 1248 | 1295 | ||
| 1249 | /* | 1296 | /* |
| 1250 | * Mouse GUID | 1297 | * Mouse GUID |
| 1251 | * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} | 1298 | * {cfa8b69e-5b4a-4cc0-b98b-8ba1a1f3f95a} |
| 1252 | */ | 1299 | */ |
| 1253 | #define HV_MOUSE_GUID \ | 1300 | #define HV_MOUSE_GUID \ |
| 1254 | .guid = UUID_LE(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ | 1301 | .guid = GUID_INIT(0xcfa8b69e, 0x5b4a, 0x4cc0, 0xb9, 0x8b, \ |
| 1255 | 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) | 1302 | 0x8b, 0xa1, 0xa1, 0xf3, 0xf9, 0x5a) |
| 1256 | 1303 | ||
| 1257 | /* | 1304 | /* |
| 1258 | * Keyboard GUID | 1305 | * Keyboard GUID |
| 1259 | * {f912ad6d-2b17-48ea-bd65-f927a61c7684} | 1306 | * {f912ad6d-2b17-48ea-bd65-f927a61c7684} |
| 1260 | */ | 1307 | */ |
| 1261 | #define HV_KBD_GUID \ | 1308 | #define HV_KBD_GUID \ |
| 1262 | .guid = UUID_LE(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ | 1309 | .guid = GUID_INIT(0xf912ad6d, 0x2b17, 0x48ea, 0xbd, 0x65, \ |
| 1263 | 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) | 1310 | 0xf9, 0x27, 0xa6, 0x1c, 0x76, 0x84) |
| 1264 | 1311 | ||
| 1265 | /* | 1312 | /* |
| 1266 | * VSS (Backup/Restore) GUID | 1313 | * VSS (Backup/Restore) GUID |
| 1267 | */ | 1314 | */ |
| 1268 | #define HV_VSS_GUID \ | 1315 | #define HV_VSS_GUID \ |
| 1269 | .guid = UUID_LE(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ | 1316 | .guid = GUID_INIT(0x35fa2e29, 0xea23, 0x4236, 0x96, 0xae, \ |
| 1270 | 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) | 1317 | 0x3a, 0x6e, 0xba, 0xcb, 0xa4, 0x40) |
| 1271 | /* | 1318 | /* |
| 1272 | * Synthetic Video GUID | 1319 | * Synthetic Video GUID |
| 1273 | * {DA0A7802-E377-4aac-8E77-0558EB1073F8} | 1320 | * {DA0A7802-E377-4aac-8E77-0558EB1073F8} |
| 1274 | */ | 1321 | */ |
| 1275 | #define HV_SYNTHVID_GUID \ | 1322 | #define HV_SYNTHVID_GUID \ |
| 1276 | .guid = UUID_LE(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ | 1323 | .guid = GUID_INIT(0xda0a7802, 0xe377, 0x4aac, 0x8e, 0x77, \ |
| 1277 | 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) | 1324 | 0x05, 0x58, 0xeb, 0x10, 0x73, 0xf8) |
| 1278 | 1325 | ||
| 1279 | /* | 1326 | /* |
| 1280 | * Synthetic FC GUID | 1327 | * Synthetic FC GUID |
| 1281 | * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} | 1328 | * {2f9bcc4a-0069-4af3-b76b-6fd0be528cda} |
| 1282 | */ | 1329 | */ |
| 1283 | #define HV_SYNTHFC_GUID \ | 1330 | #define HV_SYNTHFC_GUID \ |
| 1284 | .guid = UUID_LE(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ | 1331 | .guid = GUID_INIT(0x2f9bcc4a, 0x0069, 0x4af3, 0xb7, 0x6b, \ |
| 1285 | 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) | 1332 | 0x6f, 0xd0, 0xbe, 0x52, 0x8c, 0xda) |
| 1286 | 1333 | ||
| 1287 | /* | 1334 | /* |
| 1288 | * Guest File Copy Service | 1335 | * Guest File Copy Service |
| @@ -1290,16 +1337,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); | |||
| 1290 | */ | 1337 | */ |
| 1291 | 1338 | ||
| 1292 | #define HV_FCOPY_GUID \ | 1339 | #define HV_FCOPY_GUID \ |
| 1293 | .guid = UUID_LE(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ | 1340 | .guid = GUID_INIT(0x34d14be3, 0xdee4, 0x41c8, 0x9a, 0xe7, \ |
| 1294 | 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) | 1341 | 0x6b, 0x17, 0x49, 0x77, 0xc1, 0x92) |
| 1295 | 1342 | ||
| 1296 | /* | 1343 | /* |
| 1297 | * NetworkDirect. This is the guest RDMA service. | 1344 | * NetworkDirect. This is the guest RDMA service. |
| 1298 | * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} | 1345 | * {8c2eaf3d-32a7-4b09-ab99-bd1f1c86b501} |
| 1299 | */ | 1346 | */ |
| 1300 | #define HV_ND_GUID \ | 1347 | #define HV_ND_GUID \ |
| 1301 | .guid = UUID_LE(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ | 1348 | .guid = GUID_INIT(0x8c2eaf3d, 0x32a7, 0x4b09, 0xab, 0x99, \ |
| 1302 | 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) | 1349 | 0xbd, 0x1f, 0x1c, 0x86, 0xb5, 0x01) |
| 1303 | 1350 | ||
| 1304 | /* | 1351 | /* |
| 1305 | * PCI Express Pass Through | 1352 | * PCI Express Pass Through |
| @@ -1307,8 +1354,8 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); | |||
| 1307 | */ | 1354 | */ |
| 1308 | 1355 | ||
| 1309 | #define HV_PCIE_GUID \ | 1356 | #define HV_PCIE_GUID \ |
| 1310 | .guid = UUID_LE(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ | 1357 | .guid = GUID_INIT(0x44c4f61d, 0x4444, 0x4400, 0x9d, 0x52, \ |
| 1311 | 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) | 1358 | 0x80, 0x2e, 0x27, 0xed, 0xe1, 0x9f) |
| 1312 | 1359 | ||
| 1313 | /* | 1360 | /* |
| 1314 | * Linux doesn't support the 3 devices: the first two are for | 1361 | * Linux doesn't support the 3 devices: the first two are for |
| @@ -1320,16 +1367,16 @@ void vmbus_free_mmio(resource_size_t start, resource_size_t size); | |||
| 1320 | */ | 1367 | */ |
| 1321 | 1368 | ||
| 1322 | #define HV_AVMA1_GUID \ | 1369 | #define HV_AVMA1_GUID \ |
| 1323 | .guid = UUID_LE(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ | 1370 | .guid = GUID_INIT(0xf8e65716, 0x3cb3, 0x4a06, 0x9a, 0x60, \ |
| 1324 | 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) | 1371 | 0x18, 0x89, 0xc5, 0xcc, 0xca, 0xb5) |
| 1325 | 1372 | ||
| 1326 | #define HV_AVMA2_GUID \ | 1373 | #define HV_AVMA2_GUID \ |
| 1327 | .guid = UUID_LE(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ | 1374 | .guid = GUID_INIT(0x3375baf4, 0x9e15, 0x4b30, 0xb7, 0x65, \ |
| 1328 | 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) | 1375 | 0x67, 0xac, 0xb1, 0x0d, 0x60, 0x7b) |
| 1329 | 1376 | ||
| 1330 | #define HV_RDV_GUID \ | 1377 | #define HV_RDV_GUID \ |
| 1331 | .guid = UUID_LE(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ | 1378 | .guid = GUID_INIT(0x276aacf4, 0xac15, 0x426c, 0x98, 0xdd, \ |
| 1332 | 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) | 1379 | 0x75, 0x21, 0xad, 0x3f, 0x01, 0xfe) |
| 1333 | 1380 | ||
| 1334 | /* | 1381 | /* |
| 1335 | * Common header for Hyper-V ICs | 1382 | * Common header for Hyper-V ICs |
| @@ -1431,7 +1478,7 @@ struct ictimesync_ref_data { | |||
| 1431 | struct hyperv_service_callback { | 1478 | struct hyperv_service_callback { |
| 1432 | u8 msg_type; | 1479 | u8 msg_type; |
| 1433 | char *log_msg; | 1480 | char *log_msg; |
| 1434 | uuid_le data; | 1481 | guid_t data; |
| 1435 | struct vmbus_channel *channel; | 1482 | struct vmbus_channel *channel; |
| 1436 | void (*callback)(void *context); | 1483 | void (*callback)(void *context); |
| 1437 | }; | 1484 | }; |
| @@ -1451,8 +1498,8 @@ void vmbus_setevent(struct vmbus_channel *channel); | |||
| 1451 | 1498 | ||
| 1452 | extern __u32 vmbus_proto_version; | 1499 | extern __u32 vmbus_proto_version; |
| 1453 | 1500 | ||
| 1454 | int vmbus_send_tl_connect_request(const uuid_le *shv_guest_servie_id, | 1501 | int vmbus_send_tl_connect_request(const guid_t *shv_guest_servie_id, |
| 1455 | const uuid_le *shv_host_servie_id); | 1502 | const guid_t *shv_host_servie_id); |
| 1456 | void vmbus_set_event(struct vmbus_channel *channel); | 1503 | void vmbus_set_event(struct vmbus_channel *channel); |
| 1457 | 1504 | ||
| 1458 | /* Get the start of the ring buffer. */ | 1505 | /* Get the start of the ring buffer. */ |
diff --git a/include/linux/i2c-algo-bit.h b/include/linux/i2c-algo-bit.h index 63904ba6887e..69045df78e2d 100644 --- a/include/linux/i2c-algo-bit.h +++ b/include/linux/i2c-algo-bit.h | |||
| @@ -1,30 +1,17 @@ | |||
| 1 | /* ------------------------------------------------------------------------- */ | 1 | /* SPDX-License-Identifier: GPL-2.0+ */ |
| 2 | /* i2c-algo-bit.h i2c driver algorithms for bit-shift adapters */ | 2 | /* |
| 3 | /* ------------------------------------------------------------------------- */ | 3 | * i2c-algo-bit.h: i2c driver algorithms for bit-shift adapters |
| 4 | /* Copyright (C) 1995-99 Simon G. Vogl | 4 | * |
| 5 | 5 | * Copyright (C) 1995-99 Simon G. Vogl | |
| 6 | This program is free software; you can redistribute it and/or modify | 6 | * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even |
| 7 | it under the terms of the GNU General Public License as published by | 7 | * Frodo Looijaard <frodol@dds.nl> |
| 8 | the Free Software Foundation; either version 2 of the License, or | 8 | */ |
| 9 | (at your option) any later version. | ||
| 10 | |||
| 11 | This program is distributed in the hope that it will be useful, | ||
| 12 | but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | GNU General Public License for more details. | ||
| 15 | |||
| 16 | You should have received a copy of the GNU General Public License | ||
| 17 | along with this program; if not, write to the Free Software | ||
| 18 | Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, | ||
| 19 | MA 02110-1301 USA. */ | ||
| 20 | /* ------------------------------------------------------------------------- */ | ||
| 21 | |||
| 22 | /* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even | ||
| 23 | Frodo Looijaard <frodol@dds.nl> */ | ||
| 24 | 9 | ||
| 25 | #ifndef _LINUX_I2C_ALGO_BIT_H | 10 | #ifndef _LINUX_I2C_ALGO_BIT_H |
| 26 | #define _LINUX_I2C_ALGO_BIT_H | 11 | #define _LINUX_I2C_ALGO_BIT_H |
| 27 | 12 | ||
| 13 | #include <linux/i2c.h> | ||
| 14 | |||
| 28 | /* --- Defines for bit-adapters --------------------------------------- */ | 15 | /* --- Defines for bit-adapters --------------------------------------- */ |
| 29 | /* | 16 | /* |
| 30 | * This struct contains the hw-dependent functions of bit-style adapters to | 17 | * This struct contains the hw-dependent functions of bit-style adapters to |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index 65b4eaed1d96..383510b4f083 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -333,6 +333,7 @@ struct i2c_client { | |||
| 333 | char name[I2C_NAME_SIZE]; | 333 | char name[I2C_NAME_SIZE]; |
| 334 | struct i2c_adapter *adapter; /* the adapter we sit on */ | 334 | struct i2c_adapter *adapter; /* the adapter we sit on */ |
| 335 | struct device dev; /* the device structure */ | 335 | struct device dev; /* the device structure */ |
| 336 | int init_irq; /* irq set at initialization */ | ||
| 336 | int irq; /* irq issued by device */ | 337 | int irq; /* irq issued by device */ |
| 337 | struct list_head detected; | 338 | struct list_head detected; |
| 338 | #if IS_ENABLED(CONFIG_I2C_SLAVE) | 339 | #if IS_ENABLED(CONFIG_I2C_SLAVE) |
| @@ -680,6 +681,8 @@ struct i2c_adapter { | |||
| 680 | int timeout; /* in jiffies */ | 681 | int timeout; /* in jiffies */ |
| 681 | int retries; | 682 | int retries; |
| 682 | struct device dev; /* the adapter device */ | 683 | struct device dev; /* the adapter device */ |
| 684 | unsigned long locked_flags; /* owned by the I2C core */ | ||
| 685 | #define I2C_ALF_IS_SUSPENDED 0 | ||
| 683 | 686 | ||
| 684 | int nr; | 687 | int nr; |
| 685 | char name[48]; | 688 | char name[48]; |
| @@ -762,6 +765,38 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags) | |||
| 762 | adapter->lock_ops->unlock_bus(adapter, flags); | 765 | adapter->lock_ops->unlock_bus(adapter, flags); |
| 763 | } | 766 | } |
| 764 | 767 | ||
| 768 | /** | ||
| 769 | * i2c_mark_adapter_suspended - Report suspended state of the adapter to the core | ||
| 770 | * @adap: Adapter to mark as suspended | ||
| 771 | * | ||
| 772 | * When using this helper to mark an adapter as suspended, the core will reject | ||
| 773 | * further transfers to this adapter. The usage of this helper is optional but | ||
| 774 | * recommended for devices having distinct handlers for system suspend and | ||
| 775 | * runtime suspend. More complex devices are free to implement custom solutions | ||
| 776 | * to reject transfers when suspended. | ||
| 777 | */ | ||
| 778 | static inline void i2c_mark_adapter_suspended(struct i2c_adapter *adap) | ||
| 779 | { | ||
| 780 | i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); | ||
| 781 | set_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); | ||
| 782 | i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); | ||
| 783 | } | ||
| 784 | |||
| 785 | /** | ||
| 786 | * i2c_mark_adapter_resumed - Report resumed state of the adapter to the core | ||
| 787 | * @adap: Adapter to mark as resumed | ||
| 788 | * | ||
| 789 | * When using this helper to mark an adapter as resumed, the core will allow | ||
| 790 | * further transfers to this adapter. See also further notes to | ||
| 791 | * @i2c_mark_adapter_suspended(). | ||
| 792 | */ | ||
| 793 | static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap) | ||
| 794 | { | ||
| 795 | i2c_lock_bus(adap, I2C_LOCK_ROOT_ADAPTER); | ||
| 796 | clear_bit(I2C_ALF_IS_SUSPENDED, &adap->locked_flags); | ||
| 797 | i2c_unlock_bus(adap, I2C_LOCK_ROOT_ADAPTER); | ||
| 798 | } | ||
| 799 | |||
| 765 | /*flags for the client struct: */ | 800 | /*flags for the client struct: */ |
| 766 | #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ | 801 | #define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */ |
| 767 | #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ | 802 | #define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */ |
| @@ -933,11 +968,21 @@ static inline int of_i2c_get_board_info(struct device *dev, | |||
| 933 | 968 | ||
| 934 | #endif /* CONFIG_OF */ | 969 | #endif /* CONFIG_OF */ |
| 935 | 970 | ||
| 971 | struct acpi_resource; | ||
| 972 | struct acpi_resource_i2c_serialbus; | ||
| 973 | |||
| 936 | #if IS_ENABLED(CONFIG_ACPI) | 974 | #if IS_ENABLED(CONFIG_ACPI) |
| 975 | bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, | ||
| 976 | struct acpi_resource_i2c_serialbus **i2c); | ||
| 937 | u32 i2c_acpi_find_bus_speed(struct device *dev); | 977 | u32 i2c_acpi_find_bus_speed(struct device *dev); |
| 938 | struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, | 978 | struct i2c_client *i2c_acpi_new_device(struct device *dev, int index, |
| 939 | struct i2c_board_info *info); | 979 | struct i2c_board_info *info); |
| 940 | #else | 980 | #else |
| 981 | static inline bool i2c_acpi_get_i2c_resource(struct acpi_resource *ares, | ||
| 982 | struct acpi_resource_i2c_serialbus **i2c) | ||
| 983 | { | ||
| 984 | return false; | ||
| 985 | } | ||
| 941 | static inline u32 i2c_acpi_find_bus_speed(struct device *dev) | 986 | static inline u32 i2c_acpi_find_bus_speed(struct device *dev) |
| 942 | { | 987 | { |
| 943 | return 0; | 988 | return 0; |
diff --git a/include/linux/ide.h b/include/linux/ide.h index e7d29ae633cd..971cf76a78a0 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
| @@ -615,6 +615,7 @@ struct ide_drive_s { | |||
| 615 | 615 | ||
| 616 | /* current sense rq and buffer */ | 616 | /* current sense rq and buffer */ |
| 617 | bool sense_rq_armed; | 617 | bool sense_rq_armed; |
| 618 | bool sense_rq_active; | ||
| 618 | struct request *sense_rq; | 619 | struct request *sense_rq; |
| 619 | struct request_sense sense_data; | 620 | struct request_sense sense_data; |
| 620 | 621 | ||
| @@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); | |||
| 1219 | extern void ide_timer_expiry(struct timer_list *t); | 1220 | extern void ide_timer_expiry(struct timer_list *t); |
| 1220 | extern irqreturn_t ide_intr(int irq, void *dev_id); | 1221 | extern irqreturn_t ide_intr(int irq, void *dev_id); |
| 1221 | extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); | 1222 | extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); |
| 1223 | extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool); | ||
| 1222 | extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); | 1224 | extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); |
| 1223 | 1225 | ||
| 1224 | void ide_init_disk(struct gendisk *, ide_drive_t *); | 1226 | void ide_init_disk(struct gendisk *, ide_drive_t *); |
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h index 3b04e72315e1..48703ec60d06 100644 --- a/include/linux/ieee80211.h +++ b/include/linux/ieee80211.h | |||
| @@ -8,7 +8,7 @@ | |||
| 8 | * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> | 8 | * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net> |
| 9 | * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH | 9 | * Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH |
| 10 | * Copyright (c) 2016 - 2017 Intel Deutschland GmbH | 10 | * Copyright (c) 2016 - 2017 Intel Deutschland GmbH |
| 11 | * Copyright (c) 2018 Intel Corporation | 11 | * Copyright (c) 2018 - 2019 Intel Corporation |
| 12 | * | 12 | * |
| 13 | * This program is free software; you can redistribute it and/or modify | 13 | * This program is free software; you can redistribute it and/or modify |
| 14 | * it under the terms of the GNU General Public License version 2 as | 14 | * it under the terms of the GNU General Public License version 2 as |
| @@ -1803,6 +1803,9 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, | |||
| 1803 | #define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 | 1803 | #define IEEE80211_HE_MAC_CAP5_SUBCHAN_SELECVITE_TRANSMISSION 0x04 |
| 1804 | #define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 | 1804 | #define IEEE80211_HE_MAC_CAP5_UL_2x996_TONE_RU 0x08 |
| 1805 | #define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 | 1805 | #define IEEE80211_HE_MAC_CAP5_OM_CTRL_UL_MU_DATA_DIS_RX 0x10 |
| 1806 | #define IEEE80211_HE_MAC_CAP5_HE_DYNAMIC_SM_PS 0x20 | ||
| 1807 | #define IEEE80211_HE_MAC_CAP5_PUNCTURED_SOUNDING 0x40 | ||
| 1808 | #define IEEE80211_HE_MAC_CAP5_HT_VHT_TRIG_FRAME_RX 0x80 | ||
| 1806 | 1809 | ||
| 1807 | /* 802.11ax HE PHY capabilities */ | 1810 | /* 802.11ax HE PHY capabilities */ |
| 1808 | #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 | 1811 | #define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02 |
| @@ -1926,11 +1929,11 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, | |||
| 1926 | #define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 | 1929 | #define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08 |
| 1927 | #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 | 1930 | #define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10 |
| 1928 | #define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 | 1931 | #define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_TX_2X_AND_1XLTF 0x20 |
| 1929 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_20MHZ 0x00 | 1932 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_242 0x00 |
| 1930 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_40MHZ 0x40 | 1933 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_484 0x40 |
| 1931 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_80MHZ 0x80 | 1934 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_996 0x80 |
| 1932 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_160_OR_80P80_MHZ 0xc0 | 1935 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_2x996 0xc0 |
| 1933 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_BW_MASK 0xc0 | 1936 | #define IEEE80211_HE_PHY_CAP8_DCM_MAX_RU_MASK 0xc0 |
| 1934 | 1937 | ||
| 1935 | #define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 | 1938 | #define IEEE80211_HE_PHY_CAP9_LONGER_THAN_16_SIGB_OFDM_SYM 0x01 |
| 1936 | #define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 | 1939 | #define IEEE80211_HE_PHY_CAP9_NON_TRIGGERED_CQI_FEEDBACK 0x02 |
| @@ -1938,6 +1941,11 @@ int ieee80211_get_vht_max_nss(struct ieee80211_vht_cap *cap, | |||
| 1938 | #define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 | 1941 | #define IEEE80211_HE_PHY_CAP9_RX_1024_QAM_LESS_THAN_242_TONE_RU 0x08 |
| 1939 | #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 | 1942 | #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_COMP_SIGB 0x10 |
| 1940 | #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 | 1943 | #define IEEE80211_HE_PHY_CAP9_RX_FULL_BW_SU_USING_MU_WITH_NON_COMP_SIGB 0x20 |
| 1944 | #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_0US 0x00 | ||
| 1945 | #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_8US 0x40 | ||
| 1946 | #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_16US 0x80 | ||
| 1947 | #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_RESERVED 0xc0 | ||
| 1948 | #define IEEE80211_HE_PHY_CAP9_NOMIMAL_PKT_PADDING_MASK 0xc0 | ||
| 1941 | 1949 | ||
| 1942 | /* 802.11ax HE TX/RX MCS NSS Support */ | 1950 | /* 802.11ax HE TX/RX MCS NSS Support */ |
| 1943 | #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) | 1951 | #define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3) |
| @@ -2016,7 +2024,7 @@ ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info) | |||
| 2016 | #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 | 2024 | #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x00003ff0 |
| 2017 | #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 | 2025 | #define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 4 |
| 2018 | #define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 | 2026 | #define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x00004000 |
| 2019 | #define IEEE80211_HE_OPERATION_CO_LOCATED_BSS 0x00008000 | 2027 | #define IEEE80211_HE_OPERATION_CO_HOSTED_BSS 0x00008000 |
| 2020 | #define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 | 2028 | #define IEEE80211_HE_OPERATION_ER_SU_DISABLE 0x00010000 |
| 2021 | #define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 | 2029 | #define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x3f000000 |
| 2022 | #define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 | 2030 | #define IEEE80211_HE_OPERATION_BSS_COLOR_OFFSET 24 |
| @@ -2046,7 +2054,7 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) | |||
| 2046 | he_oper_params = le32_to_cpu(he_oper->he_oper_params); | 2054 | he_oper_params = le32_to_cpu(he_oper->he_oper_params); |
| 2047 | if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) | 2055 | if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO) |
| 2048 | oper_len += 3; | 2056 | oper_len += 3; |
| 2049 | if (he_oper_params & IEEE80211_HE_OPERATION_CO_LOCATED_BSS) | 2057 | if (he_oper_params & IEEE80211_HE_OPERATION_CO_HOSTED_BSS) |
| 2050 | oper_len++; | 2058 | oper_len++; |
| 2051 | 2059 | ||
| 2052 | /* Add the first byte (extension ID) to the total length */ | 2060 | /* Add the first byte (extension ID) to the total length */ |
| @@ -2118,6 +2126,8 @@ ieee80211_he_oper_size(const u8 *he_oper_ie) | |||
| 2118 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 | 2126 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_BASIC 0 |
| 2119 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 | 2127 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_CCA 1 |
| 2120 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 | 2128 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_RPI 2 |
| 2129 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_LCI 8 | ||
| 2130 | #define IEEE80211_SPCT_MSR_RPRT_TYPE_CIVIC 11 | ||
| 2121 | 2131 | ||
| 2122 | /* 802.11g ERP information element */ | 2132 | /* 802.11g ERP information element */ |
| 2123 | #define WLAN_ERP_NON_ERP_PRESENT (1<<0) | 2133 | #define WLAN_ERP_NON_ERP_PRESENT (1<<0) |
| @@ -2475,6 +2485,8 @@ enum ieee80211_eid_ext { | |||
| 2475 | WLAN_EID_EXT_HE_OPERATION = 36, | 2485 | WLAN_EID_EXT_HE_OPERATION = 36, |
| 2476 | WLAN_EID_EXT_UORA = 37, | 2486 | WLAN_EID_EXT_UORA = 37, |
| 2477 | WLAN_EID_EXT_HE_MU_EDCA = 38, | 2487 | WLAN_EID_EXT_HE_MU_EDCA = 38, |
| 2488 | WLAN_EID_EXT_MAX_CHANNEL_SWITCH_TIME = 52, | ||
| 2489 | WLAN_EID_EXT_MULTIPLE_BSSID_CONFIGURATION = 55, | ||
| 2478 | }; | 2490 | }; |
| 2479 | 2491 | ||
| 2480 | /* Action category code */ | 2492 | /* Action category code */ |
| @@ -2656,6 +2668,11 @@ enum ieee80211_tdls_actioncode { | |||
| 2656 | */ | 2668 | */ |
| 2657 | #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) | 2669 | #define WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING BIT(2) |
| 2658 | 2670 | ||
| 2671 | /* Multiple BSSID capability is set in the 6th bit of 3rd byte of the | ||
| 2672 | * @WLAN_EID_EXT_CAPABILITY information element | ||
| 2673 | */ | ||
| 2674 | #define WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT BIT(6) | ||
| 2675 | |||
| 2659 | /* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ | 2676 | /* TDLS capabilities in the the 4th byte of @WLAN_EID_EXT_CAPABILITY */ |
| 2660 | #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) | 2677 | #define WLAN_EXT_CAPA4_TDLS_BUFFER_STA BIT(4) |
| 2661 | #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) | 2678 | #define WLAN_EXT_CAPA4_TDLS_PEER_PSM BIT(5) |
| @@ -2691,6 +2708,9 @@ enum ieee80211_tdls_actioncode { | |||
| 2691 | #define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) | 2708 | #define WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT BIT(5) |
| 2692 | #define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) | 2709 | #define WLAN_EXT_CAPA10_TWT_RESPONDER_SUPPORT BIT(6) |
| 2693 | 2710 | ||
| 2711 | /* Defines support for enhanced multi-bssid advertisement*/ | ||
| 2712 | #define WLAN_EXT_CAPA11_EMA_SUPPORT BIT(1) | ||
| 2713 | |||
| 2694 | /* TDLS specific payload type in the LLC/SNAP header */ | 2714 | /* TDLS specific payload type in the LLC/SNAP header */ |
| 2695 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 | 2715 | #define WLAN_TDLS_SNAP_RFTYPE 0x2 |
| 2696 | 2716 | ||
| @@ -2882,6 +2902,34 @@ enum ieee80211_sa_query_action { | |||
| 2882 | WLAN_ACTION_SA_QUERY_RESPONSE = 1, | 2902 | WLAN_ACTION_SA_QUERY_RESPONSE = 1, |
| 2883 | }; | 2903 | }; |
| 2884 | 2904 | ||
| 2905 | /** | ||
| 2906 | * struct ieee80211_bssid_index | ||
| 2907 | * | ||
| 2908 | * This structure refers to "Multiple BSSID-index element" | ||
| 2909 | * | ||
| 2910 | * @bssid_index: BSSID index | ||
| 2911 | * @dtim_period: optional, overrides transmitted BSS dtim period | ||
| 2912 | * @dtim_count: optional, overrides transmitted BSS dtim count | ||
| 2913 | */ | ||
| 2914 | struct ieee80211_bssid_index { | ||
| 2915 | u8 bssid_index; | ||
| 2916 | u8 dtim_period; | ||
| 2917 | u8 dtim_count; | ||
| 2918 | }; | ||
| 2919 | |||
| 2920 | /** | ||
| 2921 | * struct ieee80211_multiple_bssid_configuration | ||
| 2922 | * | ||
| 2923 | * This structure refers to "Multiple BSSID Configuration element" | ||
| 2924 | * | ||
| 2925 | * @bssid_count: total number of active BSSIDs in the set | ||
| 2926 | * @profile_periodicity: the least number of beacon frames need to be received | ||
| 2927 | * in order to discover all the nontransmitted BSSIDs in the set. | ||
| 2928 | */ | ||
| 2929 | struct ieee80211_multiple_bssid_configuration { | ||
| 2930 | u8 bssid_count; | ||
| 2931 | u8 profile_periodicity; | ||
| 2932 | }; | ||
| 2885 | 2933 | ||
| 2886 | #define SUITE(oui, id) (((oui) << 8) | (id)) | 2934 | #define SUITE(oui, id) (((oui) << 8) | (id)) |
| 2887 | 2935 | ||
| @@ -3243,4 +3291,57 @@ static inline bool ieee80211_action_contains_tpc(struct sk_buff *skb) | |||
| 3243 | return true; | 3291 | return true; |
| 3244 | } | 3292 | } |
| 3245 | 3293 | ||
| 3294 | struct element { | ||
| 3295 | u8 id; | ||
| 3296 | u8 datalen; | ||
| 3297 | u8 data[]; | ||
| 3298 | } __packed; | ||
| 3299 | |||
| 3300 | /* element iteration helpers */ | ||
| 3301 | #define for_each_element(_elem, _data, _datalen) \ | ||
| 3302 | for (_elem = (const struct element *)(_data); \ | ||
| 3303 | (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ | ||
| 3304 | (int)sizeof(*_elem) && \ | ||
| 3305 | (const u8 *)(_data) + (_datalen) - (const u8 *)_elem >= \ | ||
| 3306 | (int)sizeof(*_elem) + _elem->datalen; \ | ||
| 3307 | _elem = (const struct element *)(_elem->data + _elem->datalen)) | ||
| 3308 | |||
| 3309 | #define for_each_element_id(element, _id, data, datalen) \ | ||
| 3310 | for_each_element(element, data, datalen) \ | ||
| 3311 | if (element->id == (_id)) | ||
| 3312 | |||
| 3313 | #define for_each_element_extid(element, extid, _data, _datalen) \ | ||
| 3314 | for_each_element(element, _data, _datalen) \ | ||
| 3315 | if (element->id == WLAN_EID_EXTENSION && \ | ||
| 3316 | element->datalen > 0 && \ | ||
| 3317 | element->data[0] == (extid)) | ||
| 3318 | |||
| 3319 | #define for_each_subelement(sub, element) \ | ||
| 3320 | for_each_element(sub, (element)->data, (element)->datalen) | ||
| 3321 | |||
| 3322 | #define for_each_subelement_id(sub, id, element) \ | ||
| 3323 | for_each_element_id(sub, id, (element)->data, (element)->datalen) | ||
| 3324 | |||
| 3325 | #define for_each_subelement_extid(sub, extid, element) \ | ||
| 3326 | for_each_element_extid(sub, extid, (element)->data, (element)->datalen) | ||
| 3327 | |||
| 3328 | /** | ||
| 3329 | * for_each_element_completed - determine if element parsing consumed all data | ||
| 3330 | * @element: element pointer after for_each_element() or friends | ||
| 3331 | * @data: same data pointer as passed to for_each_element() or friends | ||
| 3332 | * @datalen: same data length as passed to for_each_element() or friends | ||
| 3333 | * | ||
| 3334 | * This function returns %true if all the data was parsed or considered | ||
| 3335 | * while walking the elements. Only use this if your for_each_element() | ||
| 3336 | * loop cannot be broken out of, otherwise it always returns %false. | ||
| 3337 | * | ||
| 3338 | * If some data was malformed, this returns %false since the last parsed | ||
| 3339 | * element will not fill the whole remaining data. | ||
| 3340 | */ | ||
| 3341 | static inline bool for_each_element_completed(const struct element *element, | ||
| 3342 | const void *data, size_t datalen) | ||
| 3343 | { | ||
| 3344 | return (const u8 *)element == (const u8 *)data + datalen; | ||
| 3345 | } | ||
| 3346 | |||
| 3246 | #endif /* LINUX_IEEE80211_H */ | 3347 | #endif /* LINUX_IEEE80211_H */ |
diff --git a/include/linux/if_arp.h b/include/linux/if_arp.h index 6756fea18b69..e44746de95cd 100644 --- a/include/linux/if_arp.h +++ b/include/linux/if_arp.h | |||
| @@ -54,6 +54,7 @@ static inline bool dev_is_mac_header_xmit(const struct net_device *dev) | |||
| 54 | case ARPHRD_IPGRE: | 54 | case ARPHRD_IPGRE: |
| 55 | case ARPHRD_VOID: | 55 | case ARPHRD_VOID: |
| 56 | case ARPHRD_NONE: | 56 | case ARPHRD_NONE: |
| 57 | case ARPHRD_RAWIP: | ||
| 57 | return false; | 58 | return false; |
| 58 | default: | 59 | default: |
| 59 | return true; | 60 | return true; |
diff --git a/include/linux/igmp.h b/include/linux/igmp.h index 119f53941c12..9c94b2ea789c 100644 --- a/include/linux/igmp.h +++ b/include/linux/igmp.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/skbuff.h> | 18 | #include <linux/skbuff.h> |
| 19 | #include <linux/timer.h> | 19 | #include <linux/timer.h> |
| 20 | #include <linux/in.h> | 20 | #include <linux/in.h> |
| 21 | #include <linux/ip.h> | ||
| 21 | #include <linux/refcount.h> | 22 | #include <linux/refcount.h> |
| 22 | #include <uapi/linux/igmp.h> | 23 | #include <uapi/linux/igmp.h> |
| 23 | 24 | ||
| @@ -106,6 +107,14 @@ struct ip_mc_list { | |||
| 106 | #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) | 107 | #define IGMPV3_QQIC(value) IGMPV3_EXP(0x80, 4, 3, value) |
| 107 | #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) | 108 | #define IGMPV3_MRC(value) IGMPV3_EXP(0x80, 4, 3, value) |
| 108 | 109 | ||
| 110 | static inline int ip_mc_may_pull(struct sk_buff *skb, unsigned int len) | ||
| 111 | { | ||
| 112 | if (skb_transport_offset(skb) + ip_transport_len(skb) < len) | ||
| 113 | return 0; | ||
| 114 | |||
| 115 | return pskb_may_pull(skb, len); | ||
| 116 | } | ||
| 117 | |||
| 109 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); | 118 | extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto); |
| 110 | extern int igmp_rcv(struct sk_buff *); | 119 | extern int igmp_rcv(struct sk_buff *); |
| 111 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); | 120 | extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr); |
| @@ -128,8 +137,14 @@ extern void ip_mc_up(struct in_device *); | |||
| 128 | extern void ip_mc_down(struct in_device *); | 137 | extern void ip_mc_down(struct in_device *); |
| 129 | extern void ip_mc_unmap(struct in_device *); | 138 | extern void ip_mc_unmap(struct in_device *); |
| 130 | extern void ip_mc_remap(struct in_device *); | 139 | extern void ip_mc_remap(struct in_device *); |
| 131 | extern void ip_mc_dec_group(struct in_device *in_dev, __be32 addr); | 140 | extern void __ip_mc_dec_group(struct in_device *in_dev, __be32 addr, gfp_t gfp); |
| 141 | static inline void ip_mc_dec_group(struct in_device *in_dev, __be32 addr) | ||
| 142 | { | ||
| 143 | return __ip_mc_dec_group(in_dev, addr, GFP_KERNEL); | ||
| 144 | } | ||
| 145 | extern void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, | ||
| 146 | gfp_t gfp); | ||
| 132 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); | 147 | extern void ip_mc_inc_group(struct in_device *in_dev, __be32 addr); |
| 133 | int ip_mc_check_igmp(struct sk_buff *skb, struct sk_buff **skb_trimmed); | 148 | int ip_mc_check_igmp(struct sk_buff *skb); |
| 134 | 149 | ||
| 135 | #endif | 150 | #endif |
diff --git a/include/linux/ihex.h b/include/linux/ihex.h index 75c194391869..98cb5ce0b0a0 100644 --- a/include/linux/ihex.h +++ b/include/linux/ihex.h | |||
| @@ -21,12 +21,24 @@ struct ihex_binrec { | |||
| 21 | uint8_t data[0]; | 21 | uint8_t data[0]; |
| 22 | } __attribute__((packed)); | 22 | } __attribute__((packed)); |
| 23 | 23 | ||
| 24 | static inline uint16_t ihex_binrec_size(const struct ihex_binrec *p) | ||
| 25 | { | ||
| 26 | return be16_to_cpu(p->len) + sizeof(*p); | ||
| 27 | } | ||
| 28 | |||
| 24 | /* Find the next record, taking into account the 4-byte alignment */ | 29 | /* Find the next record, taking into account the 4-byte alignment */ |
| 25 | static inline const struct ihex_binrec * | 30 | static inline const struct ihex_binrec * |
| 31 | __ihex_next_binrec(const struct ihex_binrec *rec) | ||
| 32 | { | ||
| 33 | const void *p = rec; | ||
| 34 | |||
| 35 | return p + ALIGN(ihex_binrec_size(rec), 4); | ||
| 36 | } | ||
| 37 | |||
| 38 | static inline const struct ihex_binrec * | ||
| 26 | ihex_next_binrec(const struct ihex_binrec *rec) | 39 | ihex_next_binrec(const struct ihex_binrec *rec) |
| 27 | { | 40 | { |
| 28 | int next = ((be16_to_cpu(rec->len) + 5) & ~3) - 2; | 41 | rec = __ihex_next_binrec(rec); |
| 29 | rec = (void *)&rec->data[next]; | ||
| 30 | 42 | ||
| 31 | return be16_to_cpu(rec->len) ? rec : NULL; | 43 | return be16_to_cpu(rec->len) ? rec : NULL; |
| 32 | } | 44 | } |
| @@ -34,18 +46,15 @@ ihex_next_binrec(const struct ihex_binrec *rec) | |||
| 34 | /* Check that ihex_next_binrec() won't take us off the end of the image... */ | 46 | /* Check that ihex_next_binrec() won't take us off the end of the image... */ |
| 35 | static inline int ihex_validate_fw(const struct firmware *fw) | 47 | static inline int ihex_validate_fw(const struct firmware *fw) |
| 36 | { | 48 | { |
| 37 | const struct ihex_binrec *rec; | 49 | const struct ihex_binrec *end, *rec; |
| 38 | size_t ofs = 0; | ||
| 39 | 50 | ||
| 40 | while (ofs <= fw->size - sizeof(*rec)) { | 51 | rec = (const void *)fw->data; |
| 41 | rec = (void *)&fw->data[ofs]; | 52 | end = (const void *)&fw->data[fw->size - sizeof(*end)]; |
| 42 | 53 | ||
| 54 | for (; rec <= end; rec = __ihex_next_binrec(rec)) { | ||
| 43 | /* Zero length marks end of records */ | 55 | /* Zero length marks end of records */ |
| 44 | if (!be16_to_cpu(rec->len)) | 56 | if (rec == end && !be16_to_cpu(rec->len)) |
| 45 | return 0; | 57 | return 0; |
| 46 | |||
| 47 | /* Point to next record... */ | ||
| 48 | ofs += (sizeof(*rec) + be16_to_cpu(rec->len) + 3) & ~3; | ||
| 49 | } | 58 | } |
| 50 | return -EINVAL; | 59 | return -EINVAL; |
| 51 | } | 60 | } |
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h index 8092b8e7f37e..45e9667f0a8c 100644 --- a/include/linux/iio/common/st_sensors.h +++ b/include/linux/iio/common/st_sensors.h | |||
| @@ -260,6 +260,7 @@ struct st_sensor_settings { | |||
| 260 | struct st_sensor_data { | 260 | struct st_sensor_data { |
| 261 | struct device *dev; | 261 | struct device *dev; |
| 262 | struct iio_trigger *trig; | 262 | struct iio_trigger *trig; |
| 263 | struct iio_mount_matrix *mount_matrix; | ||
| 263 | struct st_sensor_settings *sensor_settings; | 264 | struct st_sensor_settings *sensor_settings; |
| 264 | struct st_sensor_fullscale_avl *current_fullscale; | 265 | struct st_sensor_fullscale_avl *current_fullscale; |
| 265 | struct regulator *vdd; | 266 | struct regulator *vdd; |
diff --git a/include/linux/ima.h b/include/linux/ima.h index b5e16b8c50b7..dc12fbcf484c 100644 --- a/include/linux/ima.h +++ b/include/linux/ima.h | |||
| @@ -18,6 +18,7 @@ struct linux_binprm; | |||
| 18 | #ifdef CONFIG_IMA | 18 | #ifdef CONFIG_IMA |
| 19 | extern int ima_bprm_check(struct linux_binprm *bprm); | 19 | extern int ima_bprm_check(struct linux_binprm *bprm); |
| 20 | extern int ima_file_check(struct file *file, int mask); | 20 | extern int ima_file_check(struct file *file, int mask); |
| 21 | extern void ima_post_create_tmpfile(struct inode *inode); | ||
| 21 | extern void ima_file_free(struct file *file); | 22 | extern void ima_file_free(struct file *file); |
| 22 | extern int ima_file_mmap(struct file *file, unsigned long prot); | 23 | extern int ima_file_mmap(struct file *file, unsigned long prot); |
| 23 | extern int ima_load_data(enum kernel_load_data_id id); | 24 | extern int ima_load_data(enum kernel_load_data_id id); |
| @@ -56,6 +57,10 @@ static inline int ima_file_check(struct file *file, int mask) | |||
| 56 | return 0; | 57 | return 0; |
| 57 | } | 58 | } |
| 58 | 59 | ||
| 60 | static inline void ima_post_create_tmpfile(struct inode *inode) | ||
| 61 | { | ||
| 62 | } | ||
| 63 | |||
| 59 | static inline void ima_file_free(struct file *file) | 64 | static inline void ima_file_free(struct file *file) |
| 60 | { | 65 | { |
| 61 | return; | 66 | return; |
diff --git a/include/linux/in.h b/include/linux/in.h index 31b493734763..435e7f2a513a 100644 --- a/include/linux/in.h +++ b/include/linux/in.h | |||
| @@ -60,6 +60,11 @@ static inline bool ipv4_is_lbcast(__be32 addr) | |||
| 60 | return addr == htonl(INADDR_BROADCAST); | 60 | return addr == htonl(INADDR_BROADCAST); |
| 61 | } | 61 | } |
| 62 | 62 | ||
| 63 | static inline bool ipv4_is_all_snoopers(__be32 addr) | ||
| 64 | { | ||
| 65 | return addr == htonl(INADDR_ALLSNOOPERS_GROUP); | ||
| 66 | } | ||
| 67 | |||
| 63 | static inline bool ipv4_is_zeronet(__be32 addr) | 68 | static inline bool ipv4_is_zeronet(__be32 addr) |
| 64 | { | 69 | { |
| 65 | return (addr & htonl(0xff000000)) == htonl(0x00000000); | 70 | return (addr & htonl(0xff000000)) == htonl(0x00000000); |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index a7083a45a26c..6049baa5b8bc 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/securebits.h> | 13 | #include <linux/securebits.h> |
| 14 | #include <linux/seqlock.h> | 14 | #include <linux/seqlock.h> |
| 15 | #include <linux/rbtree.h> | 15 | #include <linux/rbtree.h> |
| 16 | #include <linux/refcount.h> | ||
| 16 | #include <linux/sched/autogroup.h> | 17 | #include <linux/sched/autogroup.h> |
| 17 | #include <net/net_namespace.h> | 18 | #include <net/net_namespace.h> |
| 18 | #include <linux/sched/rt.h> | 19 | #include <linux/sched/rt.h> |
diff --git a/include/linux/initrd.h b/include/linux/initrd.h index 14beaff9b445..d77fe34fb00a 100644 --- a/include/linux/initrd.h +++ b/include/linux/initrd.h | |||
| @@ -25,3 +25,6 @@ extern phys_addr_t phys_initrd_start; | |||
| 25 | extern unsigned long phys_initrd_size; | 25 | extern unsigned long phys_initrd_size; |
| 26 | 26 | ||
| 27 | extern unsigned int real_root_dev; | 27 | extern unsigned int real_root_dev; |
| 28 | |||
| 29 | extern char __initramfs_start[]; | ||
| 30 | extern unsigned long __initramfs_size; | ||
diff --git a/include/linux/input/ili210x.h b/include/linux/input/ili210x.h deleted file mode 100644 index b76e7c1404cd..000000000000 --- a/include/linux/input/ili210x.h +++ /dev/null | |||
| @@ -1,11 +0,0 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _ILI210X_H | ||
| 3 | #define _ILI210X_H | ||
| 4 | |||
| 5 | struct ili210x_platform_data { | ||
| 6 | unsigned long irq_flags; | ||
| 7 | unsigned int poll_period; | ||
| 8 | bool (*get_pendown_state)(void); | ||
| 9 | }; | ||
| 10 | |||
| 11 | #endif | ||
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h index 0605f3bf6e79..fa364de9db18 100644 --- a/include/linux/intel-iommu.h +++ b/include/linux/intel-iommu.h | |||
| @@ -374,20 +374,17 @@ enum { | |||
| 374 | #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) | 374 | #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52)) |
| 375 | #define QI_DEV_EIOTLB_MAX_INVS 32 | 375 | #define QI_DEV_EIOTLB_MAX_INVS 32 |
| 376 | 376 | ||
| 377 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 55) | 377 | /* Page group response descriptor QW0 */ |
| 378 | #define QI_PGRP_PRIV(priv) (((u64)(priv)) << 32) | ||
| 379 | #define QI_PGRP_RESP_CODE(res) ((u64)(res)) | ||
| 380 | #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) | ||
| 381 | #define QI_PGRP_DID(did) (((u64)(did)) << 16) | ||
| 382 | #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) | 378 | #define QI_PGRP_PASID_P(p) (((u64)(p)) << 4) |
| 379 | #define QI_PGRP_PDP(p) (((u64)(p)) << 5) | ||
| 380 | #define QI_PGRP_RESP_CODE(res) (((u64)(res)) << 12) | ||
| 381 | #define QI_PGRP_DID(rid) (((u64)(rid)) << 16) | ||
| 382 | #define QI_PGRP_PASID(pasid) (((u64)(pasid)) << 32) | ||
| 383 | |||
| 384 | /* Page group response descriptor QW1 */ | ||
| 385 | #define QI_PGRP_LPIG(x) (((u64)(x)) << 2) | ||
| 386 | #define QI_PGRP_IDX(idx) (((u64)(idx)) << 3) | ||
| 383 | 387 | ||
| 384 | #define QI_PSTRM_ADDR(addr) (((u64)(addr)) & VTD_PAGE_MASK) | ||
| 385 | #define QI_PSTRM_DEVFN(devfn) (((u64)(devfn)) << 4) | ||
| 386 | #define QI_PSTRM_RESP_CODE(res) ((u64)(res)) | ||
| 387 | #define QI_PSTRM_IDX(idx) (((u64)(idx)) << 55) | ||
| 388 | #define QI_PSTRM_PRIV(priv) (((u64)(priv)) << 32) | ||
| 389 | #define QI_PSTRM_BUS(bus) (((u64)(bus)) << 24) | ||
| 390 | #define QI_PSTRM_PASID(pasid) (((u64)(pasid)) << 4) | ||
| 391 | 388 | ||
| 392 | #define QI_RESP_SUCCESS 0x0 | 389 | #define QI_RESP_SUCCESS 0x0 |
| 393 | #define QI_RESP_INVALID 0x1 | 390 | #define QI_RESP_INVALID 0x1 |
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h index 99bc5b3ae26e..e3f76315ca4d 100644 --- a/include/linux/intel-svm.h +++ b/include/linux/intel-svm.h | |||
| @@ -20,7 +20,7 @@ struct device; | |||
| 20 | 20 | ||
| 21 | struct svm_dev_ops { | 21 | struct svm_dev_ops { |
| 22 | void (*fault_cb)(struct device *dev, int pasid, u64 address, | 22 | void (*fault_cb)(struct device *dev, int pasid, u64 address, |
| 23 | u32 private, int rwxp, int response); | 23 | void *private, int rwxp, int response); |
| 24 | }; | 24 | }; |
| 25 | 25 | ||
| 26 | /* Values for rxwp in fault_cb callback */ | 26 | /* Values for rxwp in fault_cb callback */ |
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h new file mode 100644 index 000000000000..63caccadc2db --- /dev/null +++ b/include/linux/interconnect-provider.h | |||
| @@ -0,0 +1,142 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2018, Linaro Ltd. | ||
| 4 | * Author: Georgi Djakov <georgi.djakov@linaro.org> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __LINUX_INTERCONNECT_PROVIDER_H | ||
| 8 | #define __LINUX_INTERCONNECT_PROVIDER_H | ||
| 9 | |||
| 10 | #include <linux/interconnect.h> | ||
| 11 | |||
| 12 | #define icc_units_to_bps(bw) ((bw) * 1000ULL) | ||
| 13 | |||
| 14 | struct icc_node; | ||
| 15 | struct of_phandle_args; | ||
| 16 | |||
| 17 | /** | ||
| 18 | * struct icc_onecell_data - driver data for onecell interconnect providers | ||
| 19 | * | ||
| 20 | * @num_nodes: number of nodes in this device | ||
| 21 | * @nodes: array of pointers to the nodes in this device | ||
| 22 | */ | ||
| 23 | struct icc_onecell_data { | ||
| 24 | unsigned int num_nodes; | ||
| 25 | struct icc_node *nodes[]; | ||
| 26 | }; | ||
| 27 | |||
| 28 | struct icc_node *of_icc_xlate_onecell(struct of_phandle_args *spec, | ||
| 29 | void *data); | ||
| 30 | |||
| 31 | /** | ||
| 32 | * struct icc_provider - interconnect provider (controller) entity that might | ||
| 33 | * provide multiple interconnect controls | ||
| 34 | * | ||
| 35 | * @provider_list: list of the registered interconnect providers | ||
| 36 | * @nodes: internal list of the interconnect provider nodes | ||
| 37 | * @set: pointer to device specific set operation function | ||
| 38 | * @aggregate: pointer to device specific aggregate operation function | ||
| 39 | * @xlate: provider-specific callback for mapping nodes from phandle arguments | ||
| 40 | * @dev: the device this interconnect provider belongs to | ||
| 41 | * @users: count of active users | ||
| 42 | * @data: pointer to private data | ||
| 43 | */ | ||
| 44 | struct icc_provider { | ||
| 45 | struct list_head provider_list; | ||
| 46 | struct list_head nodes; | ||
| 47 | int (*set)(struct icc_node *src, struct icc_node *dst); | ||
| 48 | int (*aggregate)(struct icc_node *node, u32 avg_bw, u32 peak_bw, | ||
| 49 | u32 *agg_avg, u32 *agg_peak); | ||
| 50 | struct icc_node* (*xlate)(struct of_phandle_args *spec, void *data); | ||
| 51 | struct device *dev; | ||
| 52 | int users; | ||
| 53 | void *data; | ||
| 54 | }; | ||
| 55 | |||
| 56 | /** | ||
| 57 | * struct icc_node - entity that is part of the interconnect topology | ||
| 58 | * | ||
| 59 | * @id: platform specific node id | ||
| 60 | * @name: node name used in debugfs | ||
| 61 | * @links: a list of targets pointing to where we can go next when traversing | ||
| 62 | * @num_links: number of links to other interconnect nodes | ||
| 63 | * @provider: points to the interconnect provider of this node | ||
| 64 | * @node_list: the list entry in the parent provider's "nodes" list | ||
| 65 | * @search_list: list used when walking the nodes graph | ||
| 66 | * @reverse: pointer to previous node when walking the nodes graph | ||
| 67 | * @is_traversed: flag that is used when walking the nodes graph | ||
| 68 | * @req_list: a list of QoS constraint requests associated with this node | ||
| 69 | * @avg_bw: aggregated value of average bandwidth requests from all consumers | ||
| 70 | * @peak_bw: aggregated value of peak bandwidth requests from all consumers | ||
| 71 | * @data: pointer to private data | ||
| 72 | */ | ||
| 73 | struct icc_node { | ||
| 74 | int id; | ||
| 75 | const char *name; | ||
| 76 | struct icc_node **links; | ||
| 77 | size_t num_links; | ||
| 78 | |||
| 79 | struct icc_provider *provider; | ||
| 80 | struct list_head node_list; | ||
| 81 | struct list_head search_list; | ||
| 82 | struct icc_node *reverse; | ||
| 83 | u8 is_traversed:1; | ||
| 84 | struct hlist_head req_list; | ||
| 85 | u32 avg_bw; | ||
| 86 | u32 peak_bw; | ||
| 87 | void *data; | ||
| 88 | }; | ||
| 89 | |||
| 90 | #if IS_ENABLED(CONFIG_INTERCONNECT) | ||
| 91 | |||
| 92 | struct icc_node *icc_node_create(int id); | ||
| 93 | void icc_node_destroy(int id); | ||
| 94 | int icc_link_create(struct icc_node *node, const int dst_id); | ||
| 95 | int icc_link_destroy(struct icc_node *src, struct icc_node *dst); | ||
| 96 | void icc_node_add(struct icc_node *node, struct icc_provider *provider); | ||
| 97 | void icc_node_del(struct icc_node *node); | ||
| 98 | int icc_provider_add(struct icc_provider *provider); | ||
| 99 | int icc_provider_del(struct icc_provider *provider); | ||
| 100 | |||
| 101 | #else | ||
| 102 | |||
| 103 | static inline struct icc_node *icc_node_create(int id) | ||
| 104 | { | ||
| 105 | return ERR_PTR(-ENOTSUPP); | ||
| 106 | } | ||
| 107 | |||
| 108 | void icc_node_destroy(int id) | ||
| 109 | { | ||
| 110 | } | ||
| 111 | |||
| 112 | static inline int icc_link_create(struct icc_node *node, const int dst_id) | ||
| 113 | { | ||
| 114 | return -ENOTSUPP; | ||
| 115 | } | ||
| 116 | |||
| 117 | int icc_link_destroy(struct icc_node *src, struct icc_node *dst) | ||
| 118 | { | ||
| 119 | return -ENOTSUPP; | ||
| 120 | } | ||
| 121 | |||
| 122 | void icc_node_add(struct icc_node *node, struct icc_provider *provider) | ||
| 123 | { | ||
| 124 | } | ||
| 125 | |||
| 126 | void icc_node_del(struct icc_node *node) | ||
| 127 | { | ||
| 128 | } | ||
| 129 | |||
| 130 | static inline int icc_provider_add(struct icc_provider *provider) | ||
| 131 | { | ||
| 132 | return -ENOTSUPP; | ||
| 133 | } | ||
| 134 | |||
| 135 | static inline int icc_provider_del(struct icc_provider *provider) | ||
| 136 | { | ||
| 137 | return -ENOTSUPP; | ||
| 138 | } | ||
| 139 | |||
| 140 | #endif /* CONFIG_INTERCONNECT */ | ||
| 141 | |||
| 142 | #endif /* __LINUX_INTERCONNECT_PROVIDER_H */ | ||
diff --git a/include/linux/interconnect.h b/include/linux/interconnect.h new file mode 100644 index 000000000000..dc25864755ba --- /dev/null +++ b/include/linux/interconnect.h | |||
| @@ -0,0 +1,59 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (c) 2018-2019, Linaro Ltd. | ||
| 4 | * Author: Georgi Djakov <georgi.djakov@linaro.org> | ||
| 5 | */ | ||
| 6 | |||
| 7 | #ifndef __LINUX_INTERCONNECT_H | ||
| 8 | #define __LINUX_INTERCONNECT_H | ||
| 9 | |||
| 10 | #include <linux/mutex.h> | ||
| 11 | #include <linux/types.h> | ||
| 12 | |||
| 13 | /* macros for converting to icc units */ | ||
| 14 | #define Bps_to_icc(x) ((x) / 1000) | ||
| 15 | #define kBps_to_icc(x) (x) | ||
| 16 | #define MBps_to_icc(x) ((x) * 1000) | ||
| 17 | #define GBps_to_icc(x) ((x) * 1000 * 1000) | ||
| 18 | #define bps_to_icc(x) (1) | ||
| 19 | #define kbps_to_icc(x) ((x) / 8 + ((x) % 8 ? 1 : 0)) | ||
| 20 | #define Mbps_to_icc(x) ((x) * 1000 / 8) | ||
| 21 | #define Gbps_to_icc(x) ((x) * 1000 * 1000 / 8) | ||
| 22 | |||
| 23 | struct icc_path; | ||
| 24 | struct device; | ||
| 25 | |||
| 26 | #if IS_ENABLED(CONFIG_INTERCONNECT) | ||
| 27 | |||
| 28 | struct icc_path *icc_get(struct device *dev, const int src_id, | ||
| 29 | const int dst_id); | ||
| 30 | struct icc_path *of_icc_get(struct device *dev, const char *name); | ||
| 31 | void icc_put(struct icc_path *path); | ||
| 32 | int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw); | ||
| 33 | |||
| 34 | #else | ||
| 35 | |||
| 36 | static inline struct icc_path *icc_get(struct device *dev, const int src_id, | ||
| 37 | const int dst_id) | ||
| 38 | { | ||
| 39 | return NULL; | ||
| 40 | } | ||
| 41 | |||
| 42 | static inline struct icc_path *of_icc_get(struct device *dev, | ||
| 43 | const char *name) | ||
| 44 | { | ||
| 45 | return NULL; | ||
| 46 | } | ||
| 47 | |||
| 48 | static inline void icc_put(struct icc_path *path) | ||
| 49 | { | ||
| 50 | } | ||
| 51 | |||
| 52 | static inline int icc_set_bw(struct icc_path *path, u32 avg_bw, u32 peak_bw) | ||
| 53 | { | ||
| 54 | return 0; | ||
| 55 | } | ||
| 56 | |||
| 57 | #endif /* CONFIG_INTERCONNECT */ | ||
| 58 | |||
| 59 | #endif /* __LINUX_INTERCONNECT_H */ | ||
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index c672f34235e7..690b238a44d5 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
| @@ -156,6 +156,10 @@ __request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
| 156 | unsigned long flags, const char *devname, | 156 | unsigned long flags, const char *devname, |
| 157 | void __percpu *percpu_dev_id); | 157 | void __percpu *percpu_dev_id); |
| 158 | 158 | ||
| 159 | extern int __must_check | ||
| 160 | request_nmi(unsigned int irq, irq_handler_t handler, unsigned long flags, | ||
| 161 | const char *name, void *dev); | ||
| 162 | |||
| 159 | static inline int __must_check | 163 | static inline int __must_check |
| 160 | request_percpu_irq(unsigned int irq, irq_handler_t handler, | 164 | request_percpu_irq(unsigned int irq, irq_handler_t handler, |
| 161 | const char *devname, void __percpu *percpu_dev_id) | 165 | const char *devname, void __percpu *percpu_dev_id) |
| @@ -164,9 +168,16 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler, | |||
| 164 | devname, percpu_dev_id); | 168 | devname, percpu_dev_id); |
| 165 | } | 169 | } |
| 166 | 170 | ||
| 171 | extern int __must_check | ||
| 172 | request_percpu_nmi(unsigned int irq, irq_handler_t handler, | ||
| 173 | const char *devname, void __percpu *dev); | ||
| 174 | |||
| 167 | extern const void *free_irq(unsigned int, void *); | 175 | extern const void *free_irq(unsigned int, void *); |
| 168 | extern void free_percpu_irq(unsigned int, void __percpu *); | 176 | extern void free_percpu_irq(unsigned int, void __percpu *); |
| 169 | 177 | ||
| 178 | extern const void *free_nmi(unsigned int irq, void *dev_id); | ||
| 179 | extern void free_percpu_nmi(unsigned int irq, void __percpu *percpu_dev_id); | ||
| 180 | |||
| 170 | struct device; | 181 | struct device; |
| 171 | 182 | ||
| 172 | extern int __must_check | 183 | extern int __must_check |
| @@ -217,6 +228,13 @@ extern void enable_percpu_irq(unsigned int irq, unsigned int type); | |||
| 217 | extern bool irq_percpu_is_enabled(unsigned int irq); | 228 | extern bool irq_percpu_is_enabled(unsigned int irq); |
| 218 | extern void irq_wake_thread(unsigned int irq, void *dev_id); | 229 | extern void irq_wake_thread(unsigned int irq, void *dev_id); |
| 219 | 230 | ||
| 231 | extern void disable_nmi_nosync(unsigned int irq); | ||
| 232 | extern void disable_percpu_nmi(unsigned int irq); | ||
| 233 | extern void enable_nmi(unsigned int irq); | ||
| 234 | extern void enable_percpu_nmi(unsigned int irq, unsigned int type); | ||
| 235 | extern int prepare_percpu_nmi(unsigned int irq); | ||
| 236 | extern void teardown_percpu_nmi(unsigned int irq); | ||
| 237 | |||
| 220 | /* The following three functions are for the core kernel use only. */ | 238 | /* The following three functions are for the core kernel use only. */ |
| 221 | extern void suspend_device_irqs(void); | 239 | extern void suspend_device_irqs(void); |
| 222 | extern void resume_device_irqs(void); | 240 | extern void resume_device_irqs(void); |
| @@ -241,25 +259,35 @@ struct irq_affinity_notify { | |||
| 241 | void (*release)(struct kref *ref); | 259 | void (*release)(struct kref *ref); |
| 242 | }; | 260 | }; |
| 243 | 261 | ||
| 262 | #define IRQ_AFFINITY_MAX_SETS 4 | ||
| 263 | |||
| 244 | /** | 264 | /** |
| 245 | * struct irq_affinity - Description for automatic irq affinity assignements | 265 | * struct irq_affinity - Description for automatic irq affinity assignements |
| 246 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of | 266 | * @pre_vectors: Don't apply affinity to @pre_vectors at beginning of |
| 247 | * the MSI(-X) vector space | 267 | * the MSI(-X) vector space |
| 248 | * @post_vectors: Don't apply affinity to @post_vectors at end of | 268 | * @post_vectors: Don't apply affinity to @post_vectors at end of |
| 249 | * the MSI(-X) vector space | 269 | * the MSI(-X) vector space |
| 250 | * @nr_sets: Length of passed in *sets array | 270 | * @nr_sets: The number of interrupt sets for which affinity |
| 251 | * @sets: Number of affinitized sets | 271 | * spreading is required |
| 272 | * @set_size: Array holding the size of each interrupt set | ||
| 273 | * @calc_sets: Callback for calculating the number and size | ||
| 274 | * of interrupt sets | ||
| 275 | * @priv: Private data for usage by @calc_sets, usually a | ||
| 276 | * pointer to driver/device specific data. | ||
| 252 | */ | 277 | */ |
| 253 | struct irq_affinity { | 278 | struct irq_affinity { |
| 254 | int pre_vectors; | 279 | unsigned int pre_vectors; |
| 255 | int post_vectors; | 280 | unsigned int post_vectors; |
| 256 | int nr_sets; | 281 | unsigned int nr_sets; |
| 257 | int *sets; | 282 | unsigned int set_size[IRQ_AFFINITY_MAX_SETS]; |
| 283 | void (*calc_sets)(struct irq_affinity *, unsigned int nvecs); | ||
| 284 | void *priv; | ||
| 258 | }; | 285 | }; |
| 259 | 286 | ||
| 260 | /** | 287 | /** |
| 261 | * struct irq_affinity_desc - Interrupt affinity descriptor | 288 | * struct irq_affinity_desc - Interrupt affinity descriptor |
| 262 | * @mask: cpumask to hold the affinity assignment | 289 | * @mask: cpumask to hold the affinity assignment |
| 290 | * @is_managed: 1 if the interrupt is managed internally | ||
| 263 | */ | 291 | */ |
| 264 | struct irq_affinity_desc { | 292 | struct irq_affinity_desc { |
| 265 | struct cpumask mask; | 293 | struct cpumask mask; |
| @@ -313,9 +341,10 @@ extern int | |||
| 313 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); | 341 | irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify); |
| 314 | 342 | ||
| 315 | struct irq_affinity_desc * | 343 | struct irq_affinity_desc * |
| 316 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd); | 344 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd); |
| 317 | 345 | ||
| 318 | int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd); | 346 | unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 347 | const struct irq_affinity *affd); | ||
| 319 | 348 | ||
| 320 | #else /* CONFIG_SMP */ | 349 | #else /* CONFIG_SMP */ |
| 321 | 350 | ||
| @@ -349,13 +378,14 @@ irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify) | |||
| 349 | } | 378 | } |
| 350 | 379 | ||
| 351 | static inline struct irq_affinity_desc * | 380 | static inline struct irq_affinity_desc * |
| 352 | irq_create_affinity_masks(int nvec, const struct irq_affinity *affd) | 381 | irq_create_affinity_masks(unsigned int nvec, struct irq_affinity *affd) |
| 353 | { | 382 | { |
| 354 | return NULL; | 383 | return NULL; |
| 355 | } | 384 | } |
| 356 | 385 | ||
| 357 | static inline int | 386 | static inline unsigned int |
| 358 | irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity *affd) | 387 | irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec, |
| 388 | const struct irq_affinity *affd) | ||
| 359 | { | 389 | { |
| 360 | return maxvec; | 390 | return maxvec; |
| 361 | } | 391 | } |
diff --git a/include/linux/io-64-nonatomic-hi-lo.h b/include/linux/io-64-nonatomic-hi-lo.h index 862d786a904f..ae21b72cce85 100644 --- a/include/linux/io-64-nonatomic-hi-lo.h +++ b/include/linux/io-64-nonatomic-hi-lo.h | |||
| @@ -55,4 +55,68 @@ static inline void hi_lo_writeq_relaxed(__u64 val, volatile void __iomem *addr) | |||
| 55 | #define writeq_relaxed hi_lo_writeq_relaxed | 55 | #define writeq_relaxed hi_lo_writeq_relaxed |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #ifndef ioread64_hi_lo | ||
| 59 | #define ioread64_hi_lo ioread64_hi_lo | ||
| 60 | static inline u64 ioread64_hi_lo(void __iomem *addr) | ||
| 61 | { | ||
| 62 | u32 low, high; | ||
| 63 | |||
| 64 | high = ioread32(addr + sizeof(u32)); | ||
| 65 | low = ioread32(addr); | ||
| 66 | |||
| 67 | return low + ((u64)high << 32); | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #ifndef iowrite64_hi_lo | ||
| 72 | #define iowrite64_hi_lo iowrite64_hi_lo | ||
| 73 | static inline void iowrite64_hi_lo(u64 val, void __iomem *addr) | ||
| 74 | { | ||
| 75 | iowrite32(val >> 32, addr + sizeof(u32)); | ||
| 76 | iowrite32(val, addr); | ||
| 77 | } | ||
| 78 | #endif | ||
| 79 | |||
| 80 | #ifndef ioread64be_hi_lo | ||
| 81 | #define ioread64be_hi_lo ioread64be_hi_lo | ||
| 82 | static inline u64 ioread64be_hi_lo(void __iomem *addr) | ||
| 83 | { | ||
| 84 | u32 low, high; | ||
| 85 | |||
| 86 | high = ioread32be(addr); | ||
| 87 | low = ioread32be(addr + sizeof(u32)); | ||
| 88 | |||
| 89 | return low + ((u64)high << 32); | ||
| 90 | } | ||
| 91 | #endif | ||
| 92 | |||
| 93 | #ifndef iowrite64be_hi_lo | ||
| 94 | #define iowrite64be_hi_lo iowrite64be_hi_lo | ||
| 95 | static inline void iowrite64be_hi_lo(u64 val, void __iomem *addr) | ||
| 96 | { | ||
| 97 | iowrite32be(val >> 32, addr); | ||
| 98 | iowrite32be(val, addr + sizeof(u32)); | ||
| 99 | } | ||
| 100 | #endif | ||
| 101 | |||
| 102 | #ifndef ioread64 | ||
| 103 | #define ioread64_is_nonatomic | ||
| 104 | #define ioread64 ioread64_hi_lo | ||
| 105 | #endif | ||
| 106 | |||
| 107 | #ifndef iowrite64 | ||
| 108 | #define iowrite64_is_nonatomic | ||
| 109 | #define iowrite64 iowrite64_hi_lo | ||
| 110 | #endif | ||
| 111 | |||
| 112 | #ifndef ioread64be | ||
| 113 | #define ioread64be_is_nonatomic | ||
| 114 | #define ioread64be ioread64be_hi_lo | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #ifndef iowrite64be | ||
| 118 | #define iowrite64be_is_nonatomic | ||
| 119 | #define iowrite64be iowrite64be_hi_lo | ||
| 120 | #endif | ||
| 121 | |||
| 58 | #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ | 122 | #endif /* _LINUX_IO_64_NONATOMIC_HI_LO_H_ */ |
diff --git a/include/linux/io-64-nonatomic-lo-hi.h b/include/linux/io-64-nonatomic-lo-hi.h index d042e7bb5adb..faaa842dbdb9 100644 --- a/include/linux/io-64-nonatomic-lo-hi.h +++ b/include/linux/io-64-nonatomic-lo-hi.h | |||
| @@ -55,4 +55,68 @@ static inline void lo_hi_writeq_relaxed(__u64 val, volatile void __iomem *addr) | |||
| 55 | #define writeq_relaxed lo_hi_writeq_relaxed | 55 | #define writeq_relaxed lo_hi_writeq_relaxed |
| 56 | #endif | 56 | #endif |
| 57 | 57 | ||
| 58 | #ifndef ioread64_lo_hi | ||
| 59 | #define ioread64_lo_hi ioread64_lo_hi | ||
| 60 | static inline u64 ioread64_lo_hi(void __iomem *addr) | ||
| 61 | { | ||
| 62 | u32 low, high; | ||
| 63 | |||
| 64 | low = ioread32(addr); | ||
| 65 | high = ioread32(addr + sizeof(u32)); | ||
| 66 | |||
| 67 | return low + ((u64)high << 32); | ||
| 68 | } | ||
| 69 | #endif | ||
| 70 | |||
| 71 | #ifndef iowrite64_lo_hi | ||
| 72 | #define iowrite64_lo_hi iowrite64_lo_hi | ||
| 73 | static inline void iowrite64_lo_hi(u64 val, void __iomem *addr) | ||
| 74 | { | ||
| 75 | iowrite32(val, addr); | ||
| 76 | iowrite32(val >> 32, addr + sizeof(u32)); | ||
| 77 | } | ||
| 78 | #endif | ||
| 79 | |||
| 80 | #ifndef ioread64be_lo_hi | ||
| 81 | #define ioread64be_lo_hi ioread64be_lo_hi | ||
| 82 | static inline u64 ioread64be_lo_hi(void __iomem *addr) | ||
| 83 | { | ||
| 84 | u32 low, high; | ||
| 85 | |||
| 86 | low = ioread32be(addr + sizeof(u32)); | ||
| 87 | high = ioread32be(addr); | ||
| 88 | |||
| 89 | return low + ((u64)high << 32); | ||
| 90 | } | ||
| 91 | #endif | ||
| 92 | |||
| 93 | #ifndef iowrite64be_lo_hi | ||
| 94 | #define iowrite64be_lo_hi iowrite64be_lo_hi | ||
| 95 | static inline void iowrite64be_lo_hi(u64 val, void __iomem *addr) | ||
| 96 | { | ||
| 97 | iowrite32be(val, addr + sizeof(u32)); | ||
| 98 | iowrite32be(val >> 32, addr); | ||
| 99 | } | ||
| 100 | #endif | ||
| 101 | |||
| 102 | #ifndef ioread64 | ||
| 103 | #define ioread64_is_nonatomic | ||
| 104 | #define ioread64 ioread64_lo_hi | ||
| 105 | #endif | ||
| 106 | |||
| 107 | #ifndef iowrite64 | ||
| 108 | #define iowrite64_is_nonatomic | ||
| 109 | #define iowrite64 iowrite64_lo_hi | ||
| 110 | #endif | ||
| 111 | |||
| 112 | #ifndef ioread64be | ||
| 113 | #define ioread64be_is_nonatomic | ||
| 114 | #define ioread64be ioread64be_lo_hi | ||
| 115 | #endif | ||
| 116 | |||
| 117 | #ifndef iowrite64be | ||
| 118 | #define iowrite64be_is_nonatomic | ||
| 119 | #define iowrite64be iowrite64be_lo_hi | ||
| 120 | #endif | ||
| 121 | |||
| 58 | #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ | 122 | #endif /* _LINUX_IO_64_NONATOMIC_LO_HI_H_ */ |
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h new file mode 100644 index 000000000000..47d5ae559329 --- /dev/null +++ b/include/linux/io-pgtable.h | |||
| @@ -0,0 +1,213 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __IO_PGTABLE_H | ||
| 3 | #define __IO_PGTABLE_H | ||
| 4 | #include <linux/bitops.h> | ||
| 5 | |||
| 6 | /* | ||
| 7 | * Public API for use by IOMMU drivers | ||
| 8 | */ | ||
| 9 | enum io_pgtable_fmt { | ||
| 10 | ARM_32_LPAE_S1, | ||
| 11 | ARM_32_LPAE_S2, | ||
| 12 | ARM_64_LPAE_S1, | ||
| 13 | ARM_64_LPAE_S2, | ||
| 14 | ARM_V7S, | ||
| 15 | IO_PGTABLE_NUM_FMTS, | ||
| 16 | }; | ||
| 17 | |||
| 18 | /** | ||
| 19 | * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. | ||
| 20 | * | ||
| 21 | * @tlb_flush_all: Synchronously invalidate the entire TLB context. | ||
| 22 | * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. | ||
| 23 | * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and | ||
| 24 | * any corresponding page table updates are visible to the | ||
| 25 | * IOMMU. | ||
| 26 | * | ||
| 27 | * Note that these can all be called in atomic context and must therefore | ||
| 28 | * not block. | ||
| 29 | */ | ||
| 30 | struct iommu_gather_ops { | ||
| 31 | void (*tlb_flush_all)(void *cookie); | ||
| 32 | void (*tlb_add_flush)(unsigned long iova, size_t size, size_t granule, | ||
| 33 | bool leaf, void *cookie); | ||
| 34 | void (*tlb_sync)(void *cookie); | ||
| 35 | }; | ||
| 36 | |||
| 37 | /** | ||
| 38 | * struct io_pgtable_cfg - Configuration data for a set of page tables. | ||
| 39 | * | ||
| 40 | * @quirks: A bitmap of hardware quirks that require some special | ||
| 41 | * action by the low-level page table allocator. | ||
| 42 | * @pgsize_bitmap: A bitmap of page sizes supported by this set of page | ||
| 43 | * tables. | ||
| 44 | * @ias: Input address (iova) size, in bits. | ||
| 45 | * @oas: Output address (paddr) size, in bits. | ||
| 46 | * @tlb: TLB management callbacks for this set of tables. | ||
| 47 | * @iommu_dev: The device representing the DMA configuration for the | ||
| 48 | * page table walker. | ||
| 49 | */ | ||
| 50 | struct io_pgtable_cfg { | ||
| 51 | /* | ||
| 52 | * IO_PGTABLE_QUIRK_ARM_NS: (ARM formats) Set NS and NSTABLE bits in | ||
| 53 | * stage 1 PTEs, for hardware which insists on validating them | ||
| 54 | * even in non-secure state where they should normally be ignored. | ||
| 55 | * | ||
| 56 | * IO_PGTABLE_QUIRK_NO_PERMS: Ignore the IOMMU_READ, IOMMU_WRITE and | ||
| 57 | * IOMMU_NOEXEC flags and map everything with full access, for | ||
| 58 | * hardware which does not implement the permissions of a given | ||
| 59 | * format, and/or requires some format-specific default value. | ||
| 60 | * | ||
| 61 | * IO_PGTABLE_QUIRK_TLBI_ON_MAP: If the format forbids caching invalid | ||
| 62 | * (unmapped) entries but the hardware might do so anyway, perform | ||
| 63 | * TLB maintenance when mapping as well as when unmapping. | ||
| 64 | * | ||
| 65 | * IO_PGTABLE_QUIRK_ARM_MTK_4GB: (ARM v7s format) Set bit 9 in all | ||
| 66 | * PTEs, for Mediatek IOMMUs which treat it as a 33rd address bit | ||
| 67 | * when the SoC is in "4GB mode" and they can only access the high | ||
| 68 | * remap of DRAM (0x1_00000000 to 0x1_ffffffff). | ||
| 69 | * | ||
| 70 | * IO_PGTABLE_QUIRK_NO_DMA: Guarantees that the tables will only ever | ||
| 71 | * be accessed by a fully cache-coherent IOMMU or CPU (e.g. for a | ||
| 72 | * software-emulated IOMMU), such that pagetable updates need not | ||
| 73 | * be treated as explicit DMA data. | ||
| 74 | * | ||
| 75 | * IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs | ||
| 76 | * on unmap, for DMA domains using the flush queue mechanism for | ||
| 77 | * delayed invalidation. | ||
| 78 | */ | ||
| 79 | #define IO_PGTABLE_QUIRK_ARM_NS BIT(0) | ||
| 80 | #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) | ||
| 81 | #define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2) | ||
| 82 | #define IO_PGTABLE_QUIRK_ARM_MTK_4GB BIT(3) | ||
| 83 | #define IO_PGTABLE_QUIRK_NO_DMA BIT(4) | ||
| 84 | #define IO_PGTABLE_QUIRK_NON_STRICT BIT(5) | ||
| 85 | unsigned long quirks; | ||
| 86 | unsigned long pgsize_bitmap; | ||
| 87 | unsigned int ias; | ||
| 88 | unsigned int oas; | ||
| 89 | const struct iommu_gather_ops *tlb; | ||
| 90 | struct device *iommu_dev; | ||
| 91 | |||
| 92 | /* Low-level data specific to the table format */ | ||
| 93 | union { | ||
| 94 | struct { | ||
| 95 | u64 ttbr[2]; | ||
| 96 | u64 tcr; | ||
| 97 | u64 mair[2]; | ||
| 98 | } arm_lpae_s1_cfg; | ||
| 99 | |||
| 100 | struct { | ||
| 101 | u64 vttbr; | ||
| 102 | u64 vtcr; | ||
| 103 | } arm_lpae_s2_cfg; | ||
| 104 | |||
| 105 | struct { | ||
| 106 | u32 ttbr[2]; | ||
| 107 | u32 tcr; | ||
| 108 | u32 nmrr; | ||
| 109 | u32 prrr; | ||
| 110 | } arm_v7s_cfg; | ||
| 111 | }; | ||
| 112 | }; | ||
| 113 | |||
| 114 | /** | ||
| 115 | * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. | ||
| 116 | * | ||
| 117 | * @map: Map a physically contiguous memory region. | ||
| 118 | * @unmap: Unmap a physically contiguous memory region. | ||
| 119 | * @iova_to_phys: Translate iova to physical address. | ||
| 120 | * | ||
| 121 | * These functions map directly onto the iommu_ops member functions with | ||
| 122 | * the same names. | ||
| 123 | */ | ||
| 124 | struct io_pgtable_ops { | ||
| 125 | int (*map)(struct io_pgtable_ops *ops, unsigned long iova, | ||
| 126 | phys_addr_t paddr, size_t size, int prot); | ||
| 127 | size_t (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, | ||
| 128 | size_t size); | ||
| 129 | phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, | ||
| 130 | unsigned long iova); | ||
| 131 | }; | ||
| 132 | |||
| 133 | /** | ||
| 134 | * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. | ||
| 135 | * | ||
| 136 | * @fmt: The page table format. | ||
| 137 | * @cfg: The page table configuration. This will be modified to represent | ||
| 138 | * the configuration actually provided by the allocator (e.g. the | ||
| 139 | * pgsize_bitmap may be restricted). | ||
| 140 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | ||
| 141 | * the callback routines in cfg->tlb. | ||
| 142 | */ | ||
| 143 | struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, | ||
| 144 | struct io_pgtable_cfg *cfg, | ||
| 145 | void *cookie); | ||
| 146 | |||
| 147 | /** | ||
| 148 | * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller | ||
| 149 | * *must* ensure that the page table is no longer | ||
| 150 | * live, but the TLB can be dirty. | ||
| 151 | * | ||
| 152 | * @ops: The ops returned from alloc_io_pgtable_ops. | ||
| 153 | */ | ||
| 154 | void free_io_pgtable_ops(struct io_pgtable_ops *ops); | ||
| 155 | |||
| 156 | |||
| 157 | /* | ||
| 158 | * Internal structures for page table allocator implementations. | ||
| 159 | */ | ||
| 160 | |||
| 161 | /** | ||
| 162 | * struct io_pgtable - Internal structure describing a set of page tables. | ||
| 163 | * | ||
| 164 | * @fmt: The page table format. | ||
| 165 | * @cookie: An opaque token provided by the IOMMU driver and passed back to | ||
| 166 | * any callback routines. | ||
| 167 | * @cfg: A copy of the page table configuration. | ||
| 168 | * @ops: The page table operations in use for this set of page tables. | ||
| 169 | */ | ||
| 170 | struct io_pgtable { | ||
| 171 | enum io_pgtable_fmt fmt; | ||
| 172 | void *cookie; | ||
| 173 | struct io_pgtable_cfg cfg; | ||
| 174 | struct io_pgtable_ops ops; | ||
| 175 | }; | ||
| 176 | |||
| 177 | #define io_pgtable_ops_to_pgtable(x) container_of((x), struct io_pgtable, ops) | ||
| 178 | |||
| 179 | static inline void io_pgtable_tlb_flush_all(struct io_pgtable *iop) | ||
| 180 | { | ||
| 181 | iop->cfg.tlb->tlb_flush_all(iop->cookie); | ||
| 182 | } | ||
| 183 | |||
| 184 | static inline void io_pgtable_tlb_add_flush(struct io_pgtable *iop, | ||
| 185 | unsigned long iova, size_t size, size_t granule, bool leaf) | ||
| 186 | { | ||
| 187 | iop->cfg.tlb->tlb_add_flush(iova, size, granule, leaf, iop->cookie); | ||
| 188 | } | ||
| 189 | |||
| 190 | static inline void io_pgtable_tlb_sync(struct io_pgtable *iop) | ||
| 191 | { | ||
| 192 | iop->cfg.tlb->tlb_sync(iop->cookie); | ||
| 193 | } | ||
| 194 | |||
| 195 | /** | ||
| 196 | * struct io_pgtable_init_fns - Alloc/free a set of page tables for a | ||
| 197 | * particular format. | ||
| 198 | * | ||
| 199 | * @alloc: Allocate a set of page tables described by cfg. | ||
| 200 | * @free: Free the page tables associated with iop. | ||
| 201 | */ | ||
| 202 | struct io_pgtable_init_fns { | ||
| 203 | struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); | ||
| 204 | void (*free)(struct io_pgtable *iop); | ||
| 205 | }; | ||
| 206 | |||
| 207 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; | ||
| 208 | extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; | ||
| 209 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; | ||
| 210 | extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; | ||
| 211 | extern struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns; | ||
| 212 | |||
| 213 | #endif /* __IO_PGTABLE_H */ | ||
diff --git a/include/linux/iomap.h b/include/linux/iomap.h index 9a4258154b25..0fefb5455bda 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h | |||
| @@ -162,6 +162,7 @@ typedef int (iomap_dio_end_io_t)(struct kiocb *iocb, ssize_t ret, | |||
| 162 | unsigned flags); | 162 | unsigned flags); |
| 163 | ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, | 163 | ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter, |
| 164 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io); | 164 | const struct iomap_ops *ops, iomap_dio_end_io_t end_io); |
| 165 | int iomap_dio_iopoll(struct kiocb *kiocb, bool spin); | ||
| 165 | 166 | ||
| 166 | #ifdef CONFIG_SWAP | 167 | #ifdef CONFIG_SWAP |
| 167 | struct file; | 168 | struct file; |
diff --git a/include/linux/iommu.h b/include/linux/iommu.h index e90da6b6f3d1..ffbbc7e39cee 100644 --- a/include/linux/iommu.h +++ b/include/linux/iommu.h | |||
| @@ -167,8 +167,9 @@ struct iommu_resv_region { | |||
| 167 | * @detach_dev: detach device from an iommu domain | 167 | * @detach_dev: detach device from an iommu domain |
| 168 | * @map: map a physically contiguous memory region to an iommu domain | 168 | * @map: map a physically contiguous memory region to an iommu domain |
| 169 | * @unmap: unmap a physically contiguous memory region from an iommu domain | 169 | * @unmap: unmap a physically contiguous memory region from an iommu domain |
| 170 | * @flush_tlb_all: Synchronously flush all hardware TLBs for this domain | 170 | * @flush_iotlb_all: Synchronously flush all hardware TLBs for this domain |
| 171 | * @iotlb_range_add: Add a given iova range to the flush queue for this domain | 171 | * @iotlb_range_add: Add a given iova range to the flush queue for this domain |
| 172 | * @iotlb_sync_map: Sync mappings created recently using @map to the hardware | ||
| 172 | * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush | 173 | * @iotlb_sync: Flush all queued ranges from the hardware TLBs and empty flush |
| 173 | * queue | 174 | * queue |
| 174 | * @iova_to_phys: translate iova to physical address | 175 | * @iova_to_phys: translate iova to physical address |
| @@ -183,6 +184,8 @@ struct iommu_resv_region { | |||
| 183 | * @domain_window_enable: Configure and enable a particular window for a domain | 184 | * @domain_window_enable: Configure and enable a particular window for a domain |
| 184 | * @domain_window_disable: Disable a particular window for a domain | 185 | * @domain_window_disable: Disable a particular window for a domain |
| 185 | * @of_xlate: add OF master IDs to iommu grouping | 186 | * @of_xlate: add OF master IDs to iommu grouping |
| 187 | * @is_attach_deferred: Check if domain attach should be deferred from iommu | ||
| 188 | * driver init to device driver init (default no) | ||
| 186 | * @pgsize_bitmap: bitmap of all possible supported page sizes | 189 | * @pgsize_bitmap: bitmap of all possible supported page sizes |
| 187 | */ | 190 | */ |
| 188 | struct iommu_ops { | 191 | struct iommu_ops { |
| @@ -201,6 +204,7 @@ struct iommu_ops { | |||
| 201 | void (*flush_iotlb_all)(struct iommu_domain *domain); | 204 | void (*flush_iotlb_all)(struct iommu_domain *domain); |
| 202 | void (*iotlb_range_add)(struct iommu_domain *domain, | 205 | void (*iotlb_range_add)(struct iommu_domain *domain, |
| 203 | unsigned long iova, size_t size); | 206 | unsigned long iova, size_t size); |
| 207 | void (*iotlb_sync_map)(struct iommu_domain *domain); | ||
| 204 | void (*iotlb_sync)(struct iommu_domain *domain); | 208 | void (*iotlb_sync)(struct iommu_domain *domain); |
| 205 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); | 209 | phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); |
| 206 | int (*add_device)(struct device *dev); | 210 | int (*add_device)(struct device *dev); |
diff --git a/include/linux/ip.h b/include/linux/ip.h index 492bc6513533..482b7b7c9f30 100644 --- a/include/linux/ip.h +++ b/include/linux/ip.h | |||
| @@ -34,4 +34,9 @@ static inline struct iphdr *ipip_hdr(const struct sk_buff *skb) | |||
| 34 | { | 34 | { |
| 35 | return (struct iphdr *)skb_transport_header(skb); | 35 | return (struct iphdr *)skb_transport_header(skb); |
| 36 | } | 36 | } |
| 37 | |||
| 38 | static inline unsigned int ip_transport_len(const struct sk_buff *skb) | ||
| 39 | { | ||
| 40 | return ntohs(ip_hdr(skb)->tot_len) - skb_network_header_len(skb); | ||
| 41 | } | ||
| 37 | #endif /* _LINUX_IP_H */ | 42 | #endif /* _LINUX_IP_H */ |
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 8c4e2ab696c3..4dc66157d872 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
| @@ -31,6 +31,14 @@ struct device; | |||
| 31 | struct ipmi_smi; | 31 | struct ipmi_smi; |
| 32 | 32 | ||
| 33 | /* | 33 | /* |
| 34 | * Flags for set_check_watch() below. Tells if the SMI should be | ||
| 35 | * waiting for watchdog timeouts, commands and/or messages. | ||
| 36 | */ | ||
| 37 | #define IPMI_WATCH_MASK_CHECK_MESSAGES (1 << 0) | ||
| 38 | #define IPMI_WATCH_MASK_CHECK_WATCHDOG (1 << 1) | ||
| 39 | #define IPMI_WATCH_MASK_CHECK_COMMANDS (1 << 2) | ||
| 40 | |||
| 41 | /* | ||
| 34 | * Messages to/from the lower layer. The smi interface will take one | 42 | * Messages to/from the lower layer. The smi interface will take one |
| 35 | * of these to send. After the send has occurred and a response has | 43 | * of these to send. After the send has occurred and a response has |
| 36 | * been received, it will report this same data structure back up to | 44 | * been received, it will report this same data structure back up to |
| @@ -55,8 +63,10 @@ struct ipmi_smi_msg { | |||
| 55 | int rsp_size; | 63 | int rsp_size; |
| 56 | unsigned char rsp[IPMI_MAX_MSG_LENGTH]; | 64 | unsigned char rsp[IPMI_MAX_MSG_LENGTH]; |
| 57 | 65 | ||
| 58 | /* Will be called when the system is done with the message | 66 | /* |
| 59 | (presumably to free it). */ | 67 | * Will be called when the system is done with the message |
| 68 | * (presumably to free it). | ||
| 69 | */ | ||
| 60 | void (*done)(struct ipmi_smi_msg *msg); | 70 | void (*done)(struct ipmi_smi_msg *msg); |
| 61 | }; | 71 | }; |
| 62 | 72 | ||
| @@ -105,12 +115,15 @@ struct ipmi_smi_handlers { | |||
| 105 | 115 | ||
| 106 | /* | 116 | /* |
| 107 | * Called by the upper layer when some user requires that the | 117 | * Called by the upper layer when some user requires that the |
| 108 | * interface watch for events, received messages, watchdog | 118 | * interface watch for received messages and watchdog |
| 109 | * pretimeouts, or not. Used by the SMI to know if it should | 119 | * pretimeouts (basically do a "Get Flags", or not. Used by |
| 110 | * watch for these. This may be NULL if the SMI does not | 120 | * the SMI to know if it should watch for these. This may be |
| 111 | * implement it. | 121 | * NULL if the SMI does not implement it. watch_mask is from |
| 122 | * IPMI_WATCH_MASK_xxx above. The interface should run slower | ||
| 123 | * timeouts for just watchdog checking or faster timeouts when | ||
| 124 | * waiting for the message queue. | ||
| 112 | */ | 125 | */ |
| 113 | void (*set_need_watch)(void *send_info, bool enable); | 126 | void (*set_need_watch)(void *send_info, unsigned int watch_mask); |
| 114 | 127 | ||
| 115 | /* | 128 | /* |
| 116 | * Called when flushing all pending messages. | 129 | * Called when flushing all pending messages. |
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 495e834c1367..ea7c7906591e 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h | |||
| @@ -104,6 +104,12 @@ static inline struct ipv6hdr *ipipv6_hdr(const struct sk_buff *skb) | |||
| 104 | return (struct ipv6hdr *)skb_transport_header(skb); | 104 | return (struct ipv6hdr *)skb_transport_header(skb); |
| 105 | } | 105 | } |
| 106 | 106 | ||
| 107 | static inline unsigned int ipv6_transport_len(const struct sk_buff *skb) | ||
| 108 | { | ||
| 109 | return ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr) - | ||
| 110 | skb_network_header_len(skb); | ||
| 111 | } | ||
| 112 | |||
| 107 | /* | 113 | /* |
| 108 | This structure contains results of exthdrs parsing | 114 | This structure contains results of exthdrs parsing |
| 109 | as offsets from skb->nh. | 115 | as offsets from skb->nh. |
| @@ -275,7 +281,8 @@ struct ipv6_pinfo { | |||
| 275 | dontfrag:1, | 281 | dontfrag:1, |
| 276 | autoflowlabel:1, | 282 | autoflowlabel:1, |
| 277 | autoflowlabel_set:1, | 283 | autoflowlabel_set:1, |
| 278 | mc_all:1; | 284 | mc_all:1, |
| 285 | rtalert_isolate:1; | ||
| 279 | __u8 min_hopcount; | 286 | __u8 min_hopcount; |
| 280 | __u8 tclass; | 287 | __u8 tclass; |
| 281 | __be32 rcv_flowinfo; | 288 | __be32 rcv_flowinfo; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index def2b2aac8b1..7ae8de5ad0f2 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
| @@ -195,7 +195,7 @@ struct irq_data { | |||
| 195 | * IRQD_LEVEL - Interrupt is level triggered | 195 | * IRQD_LEVEL - Interrupt is level triggered |
| 196 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup | 196 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
| 197 | * from suspend | 197 | * from suspend |
| 198 | * IRDQ_MOVE_PCNTXT - Interrupt can be moved in process | 198 | * IRQD_MOVE_PCNTXT - Interrupt can be moved in process |
| 199 | * context | 199 | * context |
| 200 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt | 200 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
| 201 | * IRQD_IRQ_MASKED - Masked state of the interrupt | 201 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
| @@ -442,6 +442,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | |||
| 442 | * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine | 442 | * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine |
| 443 | * @ipi_send_single: send a single IPI to destination cpus | 443 | * @ipi_send_single: send a single IPI to destination cpus |
| 444 | * @ipi_send_mask: send an IPI to destination cpus in cpumask | 444 | * @ipi_send_mask: send an IPI to destination cpus in cpumask |
| 445 | * @irq_nmi_setup: function called from core code before enabling an NMI | ||
| 446 | * @irq_nmi_teardown: function called from core code after disabling an NMI | ||
| 445 | * @flags: chip specific flags | 447 | * @flags: chip specific flags |
| 446 | */ | 448 | */ |
| 447 | struct irq_chip { | 449 | struct irq_chip { |
| @@ -490,6 +492,9 @@ struct irq_chip { | |||
| 490 | void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); | 492 | void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); |
| 491 | void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); | 493 | void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); |
| 492 | 494 | ||
| 495 | int (*irq_nmi_setup)(struct irq_data *data); | ||
| 496 | void (*irq_nmi_teardown)(struct irq_data *data); | ||
| 497 | |||
| 493 | unsigned long flags; | 498 | unsigned long flags; |
| 494 | }; | 499 | }; |
| 495 | 500 | ||
| @@ -505,6 +510,7 @@ struct irq_chip { | |||
| 505 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask | 510 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
| 506 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode | 511 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
| 507 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs | 512 | * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs |
| 513 | * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips | ||
| 508 | */ | 514 | */ |
| 509 | enum { | 515 | enum { |
| 510 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), | 516 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
| @@ -515,6 +521,7 @@ enum { | |||
| 515 | IRQCHIP_ONESHOT_SAFE = (1 << 5), | 521 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
| 516 | IRQCHIP_EOI_THREADED = (1 << 6), | 522 | IRQCHIP_EOI_THREADED = (1 << 6), |
| 517 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), | 523 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), |
| 524 | IRQCHIP_SUPPORTS_NMI = (1 << 8), | ||
| 518 | }; | 525 | }; |
| 519 | 526 | ||
| 520 | #include <linux/irqdesc.h> | 527 | #include <linux/irqdesc.h> |
| @@ -594,6 +601,9 @@ extern void handle_percpu_devid_irq(struct irq_desc *desc); | |||
| 594 | extern void handle_bad_irq(struct irq_desc *desc); | 601 | extern void handle_bad_irq(struct irq_desc *desc); |
| 595 | extern void handle_nested_irq(unsigned int irq); | 602 | extern void handle_nested_irq(unsigned int irq); |
| 596 | 603 | ||
| 604 | extern void handle_fasteoi_nmi(struct irq_desc *desc); | ||
| 605 | extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); | ||
| 606 | |||
| 597 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); | 607 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); |
| 598 | extern int irq_chip_pm_get(struct irq_data *data); | 608 | extern int irq_chip_pm_get(struct irq_data *data); |
| 599 | extern int irq_chip_pm_put(struct irq_data *data); | 609 | extern int irq_chip_pm_put(struct irq_data *data); |
| @@ -605,6 +615,7 @@ extern void irq_chip_disable_parent(struct irq_data *data); | |||
| 605 | extern void irq_chip_ack_parent(struct irq_data *data); | 615 | extern void irq_chip_ack_parent(struct irq_data *data); |
| 606 | extern int irq_chip_retrigger_hierarchy(struct irq_data *data); | 616 | extern int irq_chip_retrigger_hierarchy(struct irq_data *data); |
| 607 | extern void irq_chip_mask_parent(struct irq_data *data); | 617 | extern void irq_chip_mask_parent(struct irq_data *data); |
| 618 | extern void irq_chip_mask_ack_parent(struct irq_data *data); | ||
| 608 | extern void irq_chip_unmask_parent(struct irq_data *data); | 619 | extern void irq_chip_unmask_parent(struct irq_data *data); |
| 609 | extern void irq_chip_eoi_parent(struct irq_data *data); | 620 | extern void irq_chip_eoi_parent(struct irq_data *data); |
| 610 | extern int irq_chip_set_affinity_parent(struct irq_data *data, | 621 | extern int irq_chip_set_affinity_parent(struct irq_data *data, |
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h index 071b4cbdf010..c848a7cc502e 100644 --- a/include/linux/irqchip/arm-gic-v3.h +++ b/include/linux/irqchip/arm-gic-v3.h | |||
| @@ -319,7 +319,7 @@ | |||
| 319 | #define GITS_TYPER_PLPIS (1UL << 0) | 319 | #define GITS_TYPER_PLPIS (1UL << 0) |
| 320 | #define GITS_TYPER_VLPIS (1UL << 1) | 320 | #define GITS_TYPER_VLPIS (1UL << 1) |
| 321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 | 321 | #define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4 |
| 322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1) | 322 | #define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1) |
| 323 | #define GITS_TYPER_IDBITS_SHIFT 8 | 323 | #define GITS_TYPER_IDBITS_SHIFT 8 |
| 324 | #define GITS_TYPER_DEVBITS_SHIFT 13 | 324 | #define GITS_TYPER_DEVBITS_SHIFT 13 |
| 325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) | 325 | #define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) |
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h index 626179077bb0..0f049b384ccd 100644 --- a/include/linux/irqchip/arm-gic.h +++ b/include/linux/irqchip/arm-gic.h | |||
| @@ -158,8 +158,7 @@ int gic_of_init_child(struct device *dev, struct gic_chip_data **gic, int irq); | |||
| 158 | * Legacy platforms not converted to DT yet must use this to init | 158 | * Legacy platforms not converted to DT yet must use this to init |
| 159 | * their GIC | 159 | * their GIC |
| 160 | */ | 160 | */ |
| 161 | void gic_init(unsigned int nr, int start, | 161 | void gic_init(void __iomem *dist , void __iomem *cpu); |
| 162 | void __iomem *dist , void __iomem *cpu); | ||
| 163 | 162 | ||
| 164 | int gicv2m_init(struct fwnode_handle *parent_handle, | 163 | int gicv2m_init(struct fwnode_handle *parent_handle, |
| 165 | struct irq_domain *parent); | 164 | struct irq_domain *parent); |
diff --git a/include/linux/irqchip/irq-davinci-aintc.h b/include/linux/irqchip/irq-davinci-aintc.h new file mode 100644 index 000000000000..ea4e087fac98 --- /dev/null +++ b/include/linux/irqchip/irq-davinci-aintc.h | |||
| @@ -0,0 +1,27 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2019 Texas Instruments | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _LINUX_IRQ_DAVINCI_AINTC_ | ||
| 7 | #define _LINUX_IRQ_DAVINCI_AINTC_ | ||
| 8 | |||
| 9 | #include <linux/ioport.h> | ||
| 10 | |||
| 11 | /** | ||
| 12 | * struct davinci_aintc_config - configuration data for davinci-aintc driver. | ||
| 13 | * | ||
| 14 | * @reg: register range to map | ||
| 15 | * @num_irqs: number of HW interrupts supported by the controller | ||
| 16 | * @prios: an array of size num_irqs containing priority settings for | ||
| 17 | * each interrupt | ||
| 18 | */ | ||
| 19 | struct davinci_aintc_config { | ||
| 20 | struct resource reg; | ||
| 21 | unsigned int num_irqs; | ||
| 22 | u8 *prios; | ||
| 23 | }; | ||
| 24 | |||
| 25 | void davinci_aintc_init(const struct davinci_aintc_config *config); | ||
| 26 | |||
| 27 | #endif /* _LINUX_IRQ_DAVINCI_AINTC_ */ | ||
diff --git a/include/linux/irqchip/irq-davinci-cp-intc.h b/include/linux/irqchip/irq-davinci-cp-intc.h new file mode 100644 index 000000000000..8d71ed5b5a61 --- /dev/null +++ b/include/linux/irqchip/irq-davinci-cp-intc.h | |||
| @@ -0,0 +1,25 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) 2019 Texas Instruments | ||
| 4 | */ | ||
| 5 | |||
| 6 | #ifndef _LINUX_IRQ_DAVINCI_CP_INTC_ | ||
| 7 | #define _LINUX_IRQ_DAVINCI_CP_INTC_ | ||
| 8 | |||
| 9 | #include <linux/ioport.h> | ||
| 10 | |||
| 11 | /** | ||
| 12 | * struct davinci_cp_intc_config - configuration data for davinci-cp-intc | ||
| 13 | * driver. | ||
| 14 | * | ||
| 15 | * @reg: register range to map | ||
| 16 | * @num_irqs: number of HW interrupts supported by the controller | ||
| 17 | */ | ||
| 18 | struct davinci_cp_intc_config { | ||
| 19 | struct resource reg; | ||
| 20 | unsigned int num_irqs; | ||
| 21 | }; | ||
| 22 | |||
| 23 | int davinci_cp_intc_init(const struct davinci_cp_intc_config *config); | ||
| 24 | |||
| 25 | #endif /* _LINUX_IRQ_DAVINCI_CP_INTC_ */ | ||
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index dd1e40ddac7d..d6e2ab538ef2 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h | |||
| @@ -28,6 +28,7 @@ struct pt_regs; | |||
| 28 | * @core_internal_state__do_not_mess_with_it: core internal status information | 28 | * @core_internal_state__do_not_mess_with_it: core internal status information |
| 29 | * @depth: disable-depth, for nested irq_disable() calls | 29 | * @depth: disable-depth, for nested irq_disable() calls |
| 30 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers | 30 | * @wake_depth: enable depth, for multiple irq_set_irq_wake() callers |
| 31 | * @tot_count: stats field for non-percpu irqs | ||
| 31 | * @irq_count: stats field to detect stalled irqs | 32 | * @irq_count: stats field to detect stalled irqs |
| 32 | * @last_unhandled: aging timer for unhandled count | 33 | * @last_unhandled: aging timer for unhandled count |
| 33 | * @irqs_unhandled: stats field for spurious unhandled interrupts | 34 | * @irqs_unhandled: stats field for spurious unhandled interrupts |
| @@ -65,6 +66,7 @@ struct irq_desc { | |||
| 65 | unsigned int core_internal_state__do_not_mess_with_it; | 66 | unsigned int core_internal_state__do_not_mess_with_it; |
| 66 | unsigned int depth; /* nested irq disables */ | 67 | unsigned int depth; /* nested irq disables */ |
| 67 | unsigned int wake_depth; /* nested wake enables */ | 68 | unsigned int wake_depth; /* nested wake enables */ |
| 69 | unsigned int tot_count; | ||
| 68 | unsigned int irq_count; /* For detecting broken IRQs */ | 70 | unsigned int irq_count; /* For detecting broken IRQs */ |
| 69 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | 71 | unsigned long last_unhandled; /* Aging timer for unhandled count */ |
| 70 | unsigned int irqs_unhandled; | 72 | unsigned int irqs_unhandled; |
| @@ -171,6 +173,11 @@ static inline int handle_domain_irq(struct irq_domain *domain, | |||
| 171 | { | 173 | { |
| 172 | return __handle_domain_irq(domain, hwirq, true, regs); | 174 | return __handle_domain_irq(domain, hwirq, true, regs); |
| 173 | } | 175 | } |
| 176 | |||
| 177 | #ifdef CONFIG_IRQ_DOMAIN | ||
| 178 | int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq, | ||
| 179 | struct pt_regs *regs); | ||
| 180 | #endif | ||
| 174 | #endif | 181 | #endif |
| 175 | 182 | ||
| 176 | /* Test to see if a driver has successfully requested an irq */ | 183 | /* Test to see if a driver has successfully requested an irq */ |
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h index 35965f41d7be..61706b430907 100644 --- a/include/linux/irqdomain.h +++ b/include/linux/irqdomain.h | |||
| @@ -265,6 +265,7 @@ extern struct irq_domain *irq_find_matching_fwspec(struct irq_fwspec *fwspec, | |||
| 265 | enum irq_domain_bus_token bus_token); | 265 | enum irq_domain_bus_token bus_token); |
| 266 | extern bool irq_domain_check_msi_remap(void); | 266 | extern bool irq_domain_check_msi_remap(void); |
| 267 | extern void irq_set_default_host(struct irq_domain *host); | 267 | extern void irq_set_default_host(struct irq_domain *host); |
| 268 | extern struct irq_domain *irq_get_default_host(void); | ||
| 268 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, | 269 | extern int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, |
| 269 | irq_hw_number_t hwirq, int node, | 270 | irq_hw_number_t hwirq, int node, |
| 270 | const struct irq_affinity_desc *affinity); | 271 | const struct irq_affinity_desc *affinity); |
| @@ -419,6 +420,11 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, | |||
| 419 | const u32 *intspec, unsigned int intsize, | 420 | const u32 *intspec, unsigned int intsize, |
| 420 | irq_hw_number_t *out_hwirq, unsigned int *out_type); | 421 | irq_hw_number_t *out_hwirq, unsigned int *out_type); |
| 421 | 422 | ||
| 423 | int irq_domain_translate_twocell(struct irq_domain *d, | ||
| 424 | struct irq_fwspec *fwspec, | ||
| 425 | unsigned long *out_hwirq, | ||
| 426 | unsigned int *out_type); | ||
| 427 | |||
| 422 | /* IPI functions */ | 428 | /* IPI functions */ |
| 423 | int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); | 429 | int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest); |
| 424 | int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); | 430 | int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest); |
diff --git a/include/linux/kasan-checks.h b/include/linux/kasan-checks.h index d314150658a4..a61dc075e2ce 100644 --- a/include/linux/kasan-checks.h +++ b/include/linux/kasan-checks.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #ifndef _LINUX_KASAN_CHECKS_H | 2 | #ifndef _LINUX_KASAN_CHECKS_H |
| 3 | #define _LINUX_KASAN_CHECKS_H | 3 | #define _LINUX_KASAN_CHECKS_H |
| 4 | 4 | ||
| 5 | #ifdef CONFIG_KASAN | 5 | #if defined(__SANITIZE_ADDRESS__) || defined(__KASAN_INTERNAL) |
| 6 | void kasan_check_read(const volatile void *p, unsigned int size); | 6 | void kasan_check_read(const volatile void *p, unsigned int size); |
| 7 | void kasan_check_write(const volatile void *p, unsigned int size); | 7 | void kasan_check_write(const volatile void *p, unsigned int size); |
| 8 | #else | 8 | #else |
diff --git a/include/linux/kcore.h b/include/linux/kcore.h index 8c3f8c14eeaa..da676cdbd727 100644 --- a/include/linux/kcore.h +++ b/include/linux/kcore.h | |||
| @@ -38,22 +38,13 @@ struct vmcoredd_node { | |||
| 38 | 38 | ||
| 39 | #ifdef CONFIG_PROC_KCORE | 39 | #ifdef CONFIG_PROC_KCORE |
| 40 | void __init kclist_add(struct kcore_list *, void *, size_t, int type); | 40 | void __init kclist_add(struct kcore_list *, void *, size_t, int type); |
| 41 | static inline | 41 | |
| 42 | void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) | 42 | extern int __init register_mem_pfn_is_ram(int (*fn)(unsigned long pfn)); |
| 43 | { | ||
| 44 | m->vaddr = (unsigned long)vaddr; | ||
| 45 | kclist_add(m, addr, sz, KCORE_REMAP); | ||
| 46 | } | ||
| 47 | #else | 43 | #else |
| 48 | static inline | 44 | static inline |
| 49 | void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) | 45 | void kclist_add(struct kcore_list *new, void *addr, size_t size, int type) |
| 50 | { | 46 | { |
| 51 | } | 47 | } |
| 52 | |||
| 53 | static inline | ||
| 54 | void kclist_add_remap(struct kcore_list *m, void *addr, void *vaddr, size_t sz) | ||
| 55 | { | ||
| 56 | } | ||
| 57 | #endif | 48 | #endif |
| 58 | 49 | ||
| 59 | #endif /* _LINUX_KCORE_H */ | 50 | #endif /* _LINUX_KCORE_H */ |
diff --git a/include/linux/kern_levels.h b/include/linux/kern_levels.h index d237fe854ad9..bf2389c26ae3 100644 --- a/include/linux/kern_levels.h +++ b/include/linux/kern_levels.h | |||
| @@ -14,7 +14,7 @@ | |||
| 14 | #define KERN_INFO KERN_SOH "6" /* informational */ | 14 | #define KERN_INFO KERN_SOH "6" /* informational */ |
| 15 | #define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ | 15 | #define KERN_DEBUG KERN_SOH "7" /* debug-level messages */ |
| 16 | 16 | ||
| 17 | #define KERN_DEFAULT KERN_SOH "d" /* the default kernel loglevel */ | 17 | #define KERN_DEFAULT "" /* the default kernel loglevel */ |
| 18 | 18 | ||
| 19 | /* | 19 | /* |
| 20 | * Annotation for a "continued" line of log printout (only done after a | 20 | * Annotation for a "continued" line of log printout (only done after a |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 8f0e68e250a7..2d14e21c16c0 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -4,6 +4,7 @@ | |||
| 4 | 4 | ||
| 5 | 5 | ||
| 6 | #include <stdarg.h> | 6 | #include <stdarg.h> |
| 7 | #include <linux/limits.h> | ||
| 7 | #include <linux/linkage.h> | 8 | #include <linux/linkage.h> |
| 8 | #include <linux/stddef.h> | 9 | #include <linux/stddef.h> |
| 9 | #include <linux/types.h> | 10 | #include <linux/types.h> |
| @@ -14,36 +15,9 @@ | |||
| 14 | #include <linux/printk.h> | 15 | #include <linux/printk.h> |
| 15 | #include <linux/build_bug.h> | 16 | #include <linux/build_bug.h> |
| 16 | #include <asm/byteorder.h> | 17 | #include <asm/byteorder.h> |
| 18 | #include <asm/div64.h> | ||
| 17 | #include <uapi/linux/kernel.h> | 19 | #include <uapi/linux/kernel.h> |
| 18 | 20 | ||
| 19 | #define USHRT_MAX ((u16)(~0U)) | ||
| 20 | #define SHRT_MAX ((s16)(USHRT_MAX>>1)) | ||
| 21 | #define SHRT_MIN ((s16)(-SHRT_MAX - 1)) | ||
| 22 | #define INT_MAX ((int)(~0U>>1)) | ||
| 23 | #define INT_MIN (-INT_MAX - 1) | ||
| 24 | #define UINT_MAX (~0U) | ||
| 25 | #define LONG_MAX ((long)(~0UL>>1)) | ||
| 26 | #define LONG_MIN (-LONG_MAX - 1) | ||
| 27 | #define ULONG_MAX (~0UL) | ||
| 28 | #define LLONG_MAX ((long long)(~0ULL>>1)) | ||
| 29 | #define LLONG_MIN (-LLONG_MAX - 1) | ||
| 30 | #define ULLONG_MAX (~0ULL) | ||
| 31 | #define SIZE_MAX (~(size_t)0) | ||
| 32 | #define PHYS_ADDR_MAX (~(phys_addr_t)0) | ||
| 33 | |||
| 34 | #define U8_MAX ((u8)~0U) | ||
| 35 | #define S8_MAX ((s8)(U8_MAX>>1)) | ||
| 36 | #define S8_MIN ((s8)(-S8_MAX - 1)) | ||
| 37 | #define U16_MAX ((u16)~0U) | ||
| 38 | #define S16_MAX ((s16)(U16_MAX>>1)) | ||
| 39 | #define S16_MIN ((s16)(-S16_MAX - 1)) | ||
| 40 | #define U32_MAX ((u32)~0U) | ||
| 41 | #define S32_MAX ((s32)(U32_MAX>>1)) | ||
| 42 | #define S32_MIN ((s32)(-S32_MAX - 1)) | ||
| 43 | #define U64_MAX ((u64)~0ULL) | ||
| 44 | #define S64_MAX ((s64)(U64_MAX>>1)) | ||
| 45 | #define S64_MIN ((s64)(-S64_MAX - 1)) | ||
| 46 | |||
| 47 | #define STACK_MAGIC 0xdeadbeef | 21 | #define STACK_MAGIC 0xdeadbeef |
| 48 | 22 | ||
| 49 | /** | 23 | /** |
| @@ -73,8 +47,8 @@ | |||
| 73 | 47 | ||
| 74 | #define u64_to_user_ptr(x) ( \ | 48 | #define u64_to_user_ptr(x) ( \ |
| 75 | { \ | 49 | { \ |
| 76 | typecheck(u64, x); \ | 50 | typecheck(u64, (x)); \ |
| 77 | (void __user *)(uintptr_t)x; \ | 51 | (void __user *)(uintptr_t)(x); \ |
| 78 | } \ | 52 | } \ |
| 79 | ) | 53 | ) |
| 80 | 54 | ||
| @@ -133,12 +107,10 @@ | |||
| 133 | * | 107 | * |
| 134 | * Rounds @x up to next multiple of @y. If @y will always be a power | 108 | * Rounds @x up to next multiple of @y. If @y will always be a power |
| 135 | * of 2, consider using the faster round_up(). | 109 | * of 2, consider using the faster round_up(). |
| 136 | * | ||
| 137 | * The `const' here prevents gcc-3.3 from calling __divdi3 | ||
| 138 | */ | 110 | */ |
| 139 | #define roundup(x, y) ( \ | 111 | #define roundup(x, y) ( \ |
| 140 | { \ | 112 | { \ |
| 141 | const typeof(y) __y = y; \ | 113 | typeof(y) __y = y; \ |
| 142 | (((x) + (__y - 1)) / __y) * __y; \ | 114 | (((x) + (__y - 1)) / __y) * __y; \ |
| 143 | } \ | 115 | } \ |
| 144 | ) | 116 | ) |
| @@ -204,7 +176,6 @@ | |||
| 204 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) | 176 | #define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; }) |
| 205 | 177 | ||
| 206 | #ifdef CONFIG_LBDAF | 178 | #ifdef CONFIG_LBDAF |
| 207 | # include <asm/div64.h> | ||
| 208 | # define sector_div(a, b) do_div(a, b) | 179 | # define sector_div(a, b) do_div(a, b) |
| 209 | #else | 180 | #else |
| 210 | # define sector_div(n, b)( \ | 181 | # define sector_div(n, b)( \ |
| @@ -245,8 +216,10 @@ extern int _cond_resched(void); | |||
| 245 | #endif | 216 | #endif |
| 246 | 217 | ||
| 247 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP | 218 | #ifdef CONFIG_DEBUG_ATOMIC_SLEEP |
| 248 | void ___might_sleep(const char *file, int line, int preempt_offset); | 219 | extern void ___might_sleep(const char *file, int line, int preempt_offset); |
| 249 | void __might_sleep(const char *file, int line, int preempt_offset); | 220 | extern void __might_sleep(const char *file, int line, int preempt_offset); |
| 221 | extern void __cant_sleep(const char *file, int line, int preempt_offset); | ||
| 222 | |||
| 250 | /** | 223 | /** |
| 251 | * might_sleep - annotation for functions that can sleep | 224 | * might_sleep - annotation for functions that can sleep |
| 252 | * | 225 | * |
| @@ -259,6 +232,13 @@ extern int _cond_resched(void); | |||
| 259 | */ | 232 | */ |
| 260 | # define might_sleep() \ | 233 | # define might_sleep() \ |
| 261 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) | 234 | do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) |
| 235 | /** | ||
| 236 | * cant_sleep - annotation for functions that cannot sleep | ||
| 237 | * | ||
| 238 | * this macro will print a stack trace if it is executed with preemption enabled | ||
| 239 | */ | ||
| 240 | # define cant_sleep() \ | ||
| 241 | do { __cant_sleep(__FILE__, __LINE__, 0); } while (0) | ||
| 262 | # define sched_annotate_sleep() (current->task_state_change = 0) | 242 | # define sched_annotate_sleep() (current->task_state_change = 0) |
| 263 | #else | 243 | #else |
| 264 | static inline void ___might_sleep(const char *file, int line, | 244 | static inline void ___might_sleep(const char *file, int line, |
| @@ -266,6 +246,7 @@ extern int _cond_resched(void); | |||
| 266 | static inline void __might_sleep(const char *file, int line, | 246 | static inline void __might_sleep(const char *file, int line, |
| 267 | int preempt_offset) { } | 247 | int preempt_offset) { } |
| 268 | # define might_sleep() do { might_resched(); } while (0) | 248 | # define might_sleep() do { might_resched(); } while (0) |
| 249 | # define cant_sleep() do { } while (0) | ||
| 269 | # define sched_annotate_sleep() do { } while (0) | 250 | # define sched_annotate_sleep() do { } while (0) |
| 270 | #endif | 251 | #endif |
| 271 | 252 | ||
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h index 5b36b1287a5a..c8893f663470 100644 --- a/include/linux/kernfs.h +++ b/include/linux/kernfs.h | |||
| @@ -25,7 +25,10 @@ struct seq_file; | |||
| 25 | struct vm_area_struct; | 25 | struct vm_area_struct; |
| 26 | struct super_block; | 26 | struct super_block; |
| 27 | struct file_system_type; | 27 | struct file_system_type; |
| 28 | struct poll_table_struct; | ||
| 29 | struct fs_context; | ||
| 28 | 30 | ||
| 31 | struct kernfs_fs_context; | ||
| 29 | struct kernfs_open_node; | 32 | struct kernfs_open_node; |
| 30 | struct kernfs_iattrs; | 33 | struct kernfs_iattrs; |
| 31 | 34 | ||
| @@ -167,7 +170,6 @@ struct kernfs_node { | |||
| 167 | * kernfs_node parameter. | 170 | * kernfs_node parameter. |
| 168 | */ | 171 | */ |
| 169 | struct kernfs_syscall_ops { | 172 | struct kernfs_syscall_ops { |
| 170 | int (*remount_fs)(struct kernfs_root *root, int *flags, char *data); | ||
| 171 | int (*show_options)(struct seq_file *sf, struct kernfs_root *root); | 173 | int (*show_options)(struct seq_file *sf, struct kernfs_root *root); |
| 172 | 174 | ||
| 173 | int (*mkdir)(struct kernfs_node *parent, const char *name, | 175 | int (*mkdir)(struct kernfs_node *parent, const char *name, |
| @@ -261,6 +263,9 @@ struct kernfs_ops { | |||
| 261 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, | 263 | ssize_t (*write)(struct kernfs_open_file *of, char *buf, size_t bytes, |
| 262 | loff_t off); | 264 | loff_t off); |
| 263 | 265 | ||
| 266 | __poll_t (*poll)(struct kernfs_open_file *of, | ||
| 267 | struct poll_table_struct *pt); | ||
| 268 | |||
| 264 | int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); | 269 | int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma); |
| 265 | 270 | ||
| 266 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 271 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| @@ -268,6 +273,18 @@ struct kernfs_ops { | |||
| 268 | #endif | 273 | #endif |
| 269 | }; | 274 | }; |
| 270 | 275 | ||
| 276 | /* | ||
| 277 | * The kernfs superblock creation/mount parameter context. | ||
| 278 | */ | ||
| 279 | struct kernfs_fs_context { | ||
| 280 | struct kernfs_root *root; /* Root of the hierarchy being mounted */ | ||
| 281 | void *ns_tag; /* Namespace tag of the mount (or NULL) */ | ||
| 282 | unsigned long magic; /* File system specific magic number */ | ||
| 283 | |||
| 284 | /* The following are set/used by kernfs_mount() */ | ||
| 285 | bool new_sb_created; /* Set to T if we allocated a new sb */ | ||
| 286 | }; | ||
| 287 | |||
| 271 | #ifdef CONFIG_KERNFS | 288 | #ifdef CONFIG_KERNFS |
| 272 | 289 | ||
| 273 | static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) | 290 | static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn) |
| @@ -350,14 +367,14 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name, | |||
| 350 | int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, | 367 | int kernfs_rename_ns(struct kernfs_node *kn, struct kernfs_node *new_parent, |
| 351 | const char *new_name, const void *new_ns); | 368 | const char *new_name, const void *new_ns); |
| 352 | int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); | 369 | int kernfs_setattr(struct kernfs_node *kn, const struct iattr *iattr); |
| 370 | __poll_t kernfs_generic_poll(struct kernfs_open_file *of, | ||
| 371 | struct poll_table_struct *pt); | ||
| 353 | void kernfs_notify(struct kernfs_node *kn); | 372 | void kernfs_notify(struct kernfs_node *kn); |
| 354 | 373 | ||
| 355 | const void *kernfs_super_ns(struct super_block *sb); | 374 | const void *kernfs_super_ns(struct super_block *sb); |
| 356 | struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags, | 375 | int kernfs_get_tree(struct fs_context *fc); |
| 357 | struct kernfs_root *root, unsigned long magic, | 376 | void kernfs_free_fs_context(struct fs_context *fc); |
| 358 | bool *new_sb_created, const void *ns); | ||
| 359 | void kernfs_kill_sb(struct super_block *sb); | 377 | void kernfs_kill_sb(struct super_block *sb); |
| 360 | struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns); | ||
| 361 | 378 | ||
| 362 | void kernfs_init(void); | 379 | void kernfs_init(void); |
| 363 | 380 | ||
| @@ -459,11 +476,10 @@ static inline void kernfs_notify(struct kernfs_node *kn) { } | |||
| 459 | static inline const void *kernfs_super_ns(struct super_block *sb) | 476 | static inline const void *kernfs_super_ns(struct super_block *sb) |
| 460 | { return NULL; } | 477 | { return NULL; } |
| 461 | 478 | ||
| 462 | static inline struct dentry * | 479 | static inline int kernfs_get_tree(struct fs_context *fc) |
| 463 | kernfs_mount_ns(struct file_system_type *fs_type, int flags, | 480 | { return -ENOSYS; } |
| 464 | struct kernfs_root *root, unsigned long magic, | 481 | |
| 465 | bool *new_sb_created, const void *ns) | 482 | static inline void kernfs_free_fs_context(struct fs_context *fc) { } |
| 466 | { return ERR_PTR(-ENOSYS); } | ||
| 467 | 483 | ||
| 468 | static inline void kernfs_kill_sb(struct super_block *sb) { } | 484 | static inline void kernfs_kill_sb(struct super_block *sb) { } |
| 469 | 485 | ||
| @@ -546,13 +562,4 @@ static inline int kernfs_rename(struct kernfs_node *kn, | |||
| 546 | return kernfs_rename_ns(kn, new_parent, new_name, NULL); | 562 | return kernfs_rename_ns(kn, new_parent, new_name, NULL); |
| 547 | } | 563 | } |
| 548 | 564 | ||
| 549 | static inline struct dentry * | ||
| 550 | kernfs_mount(struct file_system_type *fs_type, int flags, | ||
| 551 | struct kernfs_root *root, unsigned long magic, | ||
| 552 | bool *new_sb_created) | ||
| 553 | { | ||
| 554 | return kernfs_mount_ns(fs_type, flags, root, | ||
| 555 | magic, new_sb_created, NULL); | ||
| 556 | } | ||
| 557 | |||
| 558 | #endif /* __LINUX_KERNFS_H */ | 565 | #endif /* __LINUX_KERNFS_H */ |
diff --git a/include/linux/key-type.h b/include/linux/key-type.h index bc9af551fc83..e49d1de0614e 100644 --- a/include/linux/key-type.h +++ b/include/linux/key-type.h | |||
| @@ -21,15 +21,6 @@ struct kernel_pkey_query; | |||
| 21 | struct kernel_pkey_params; | 21 | struct kernel_pkey_params; |
| 22 | 22 | ||
| 23 | /* | 23 | /* |
| 24 | * key under-construction record | ||
| 25 | * - passed to the request_key actor if supplied | ||
| 26 | */ | ||
| 27 | struct key_construction { | ||
| 28 | struct key *key; /* key being constructed */ | ||
| 29 | struct key *authkey;/* authorisation for key being constructed */ | ||
| 30 | }; | ||
| 31 | |||
| 32 | /* | ||
| 33 | * Pre-parsed payload, used by key add, update and instantiate. | 24 | * Pre-parsed payload, used by key add, update and instantiate. |
| 34 | * | 25 | * |
| 35 | * This struct will be cleared and data and datalen will be set with the data | 26 | * This struct will be cleared and data and datalen will be set with the data |
| @@ -50,8 +41,7 @@ struct key_preparsed_payload { | |||
| 50 | time64_t expiry; /* Expiry time of key */ | 41 | time64_t expiry; /* Expiry time of key */ |
| 51 | } __randomize_layout; | 42 | } __randomize_layout; |
| 52 | 43 | ||
| 53 | typedef int (*request_key_actor_t)(struct key_construction *key, | 44 | typedef int (*request_key_actor_t)(struct key *auth_key, void *aux); |
| 54 | const char *op, void *aux); | ||
| 55 | 45 | ||
| 56 | /* | 46 | /* |
| 57 | * Preparsed matching criterion. | 47 | * Preparsed matching criterion. |
| @@ -181,20 +171,20 @@ extern int key_instantiate_and_link(struct key *key, | |||
| 181 | const void *data, | 171 | const void *data, |
| 182 | size_t datalen, | 172 | size_t datalen, |
| 183 | struct key *keyring, | 173 | struct key *keyring, |
| 184 | struct key *instkey); | 174 | struct key *authkey); |
| 185 | extern int key_reject_and_link(struct key *key, | 175 | extern int key_reject_and_link(struct key *key, |
| 186 | unsigned timeout, | 176 | unsigned timeout, |
| 187 | unsigned error, | 177 | unsigned error, |
| 188 | struct key *keyring, | 178 | struct key *keyring, |
| 189 | struct key *instkey); | 179 | struct key *authkey); |
| 190 | extern void complete_request_key(struct key_construction *cons, int error); | 180 | extern void complete_request_key(struct key *authkey, int error); |
| 191 | 181 | ||
| 192 | static inline int key_negate_and_link(struct key *key, | 182 | static inline int key_negate_and_link(struct key *key, |
| 193 | unsigned timeout, | 183 | unsigned timeout, |
| 194 | struct key *keyring, | 184 | struct key *keyring, |
| 195 | struct key *instkey) | 185 | struct key *authkey) |
| 196 | { | 186 | { |
| 197 | return key_reject_and_link(key, timeout, ENOKEY, keyring, instkey); | 187 | return key_reject_and_link(key, timeout, ENOKEY, keyring, authkey); |
| 198 | } | 188 | } |
| 199 | 189 | ||
| 200 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); | 190 | extern int generic_key_instantiate(struct key *key, struct key_preparsed_payload *prep); |
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index e07e91daaacc..9a897256e481 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h | |||
| @@ -173,6 +173,7 @@ struct kretprobe_instance { | |||
| 173 | struct kretprobe *rp; | 173 | struct kretprobe *rp; |
| 174 | kprobe_opcode_t *ret_addr; | 174 | kprobe_opcode_t *ret_addr; |
| 175 | struct task_struct *task; | 175 | struct task_struct *task; |
| 176 | void *fp; | ||
| 176 | char data[0]; | 177 | char data[0]; |
| 177 | }; | 178 | }; |
| 178 | 179 | ||
| @@ -442,6 +443,11 @@ static inline int enable_kprobe(struct kprobe *kp) | |||
| 442 | { | 443 | { |
| 443 | return -ENOSYS; | 444 | return -ENOSYS; |
| 444 | } | 445 | } |
| 446 | |||
| 447 | static inline bool within_kprobe_blacklist(unsigned long addr) | ||
| 448 | { | ||
| 449 | return true; | ||
| 450 | } | ||
| 445 | #endif /* CONFIG_KPROBES */ | 451 | #endif /* CONFIG_KPROBES */ |
| 446 | static inline int disable_kretprobe(struct kretprobe *rp) | 452 | static inline int disable_kretprobe(struct kretprobe *rp) |
| 447 | { | 453 | { |
diff --git a/include/linux/ksm.h b/include/linux/ksm.h index 161e8164abcf..e48b1e453ff5 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h | |||
| @@ -53,6 +53,8 @@ struct page *ksm_might_need_to_copy(struct page *page, | |||
| 53 | 53 | ||
| 54 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); | 54 | void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); |
| 55 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); | 55 | void ksm_migrate_page(struct page *newpage, struct page *oldpage); |
| 56 | bool reuse_ksm_page(struct page *page, | ||
| 57 | struct vm_area_struct *vma, unsigned long address); | ||
| 56 | 58 | ||
| 57 | #else /* !CONFIG_KSM */ | 59 | #else /* !CONFIG_KSM */ |
| 58 | 60 | ||
| @@ -86,6 +88,11 @@ static inline void rmap_walk_ksm(struct page *page, | |||
| 86 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) | 88 | static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) |
| 87 | { | 89 | { |
| 88 | } | 90 | } |
| 91 | static inline bool reuse_ksm_page(struct page *page, | ||
| 92 | struct vm_area_struct *vma, unsigned long address) | ||
| 93 | { | ||
| 94 | return false; | ||
| 95 | } | ||
| 89 | #endif /* CONFIG_MMU */ | 96 | #endif /* CONFIG_MMU */ |
| 90 | #endif /* !CONFIG_KSM */ | 97 | #endif /* !CONFIG_KSM */ |
| 91 | 98 | ||
diff --git a/include/linux/kthread.h b/include/linux/kthread.h index c1961761311d..2c89e60bc752 100644 --- a/include/linux/kthread.h +++ b/include/linux/kthread.h | |||
| @@ -56,6 +56,7 @@ void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask); | |||
| 56 | int kthread_stop(struct task_struct *k); | 56 | int kthread_stop(struct task_struct *k); |
| 57 | bool kthread_should_stop(void); | 57 | bool kthread_should_stop(void); |
| 58 | bool kthread_should_park(void); | 58 | bool kthread_should_park(void); |
| 59 | bool __kthread_should_park(struct task_struct *k); | ||
| 59 | bool kthread_freezable_should_stop(bool *was_frozen); | 60 | bool kthread_freezable_should_stop(bool *was_frozen); |
| 60 | void *kthread_data(struct task_struct *k); | 61 | void *kthread_data(struct task_struct *k); |
| 61 | void *kthread_probe_data(struct task_struct *k); | 62 | void *kthread_probe_data(struct task_struct *k); |
| @@ -85,7 +86,7 @@ enum { | |||
| 85 | 86 | ||
| 86 | struct kthread_worker { | 87 | struct kthread_worker { |
| 87 | unsigned int flags; | 88 | unsigned int flags; |
| 88 | spinlock_t lock; | 89 | raw_spinlock_t lock; |
| 89 | struct list_head work_list; | 90 | struct list_head work_list; |
| 90 | struct list_head delayed_work_list; | 91 | struct list_head delayed_work_list; |
| 91 | struct task_struct *task; | 92 | struct task_struct *task; |
| @@ -106,7 +107,7 @@ struct kthread_delayed_work { | |||
| 106 | }; | 107 | }; |
| 107 | 108 | ||
| 108 | #define KTHREAD_WORKER_INIT(worker) { \ | 109 | #define KTHREAD_WORKER_INIT(worker) { \ |
| 109 | .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \ | 110 | .lock = __RAW_SPIN_LOCK_UNLOCKED((worker).lock), \ |
| 110 | .work_list = LIST_HEAD_INIT((worker).work_list), \ | 111 | .work_list = LIST_HEAD_INIT((worker).work_list), \ |
| 111 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ | 112 | .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\ |
| 112 | } | 113 | } |
| @@ -164,9 +165,8 @@ extern void __kthread_init_worker(struct kthread_worker *worker, | |||
| 164 | #define kthread_init_delayed_work(dwork, fn) \ | 165 | #define kthread_init_delayed_work(dwork, fn) \ |
| 165 | do { \ | 166 | do { \ |
| 166 | kthread_init_work(&(dwork)->work, (fn)); \ | 167 | kthread_init_work(&(dwork)->work, (fn)); \ |
| 167 | __init_timer(&(dwork)->timer, \ | 168 | timer_setup(&(dwork)->timer, \ |
| 168 | kthread_delayed_work_timer_fn, \ | 169 | kthread_delayed_work_timer_fn, 0); \ |
| 169 | TIMER_IRQSAFE); \ | ||
| 170 | } while (0) | 170 | } while (0) |
| 171 | 171 | ||
| 172 | int kthread_worker_fn(void *worker_ptr); | 172 | int kthread_worker_fn(void *worker_ptr); |
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index c38cc5eb7e73..640a03642766 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
| @@ -28,6 +28,7 @@ | |||
| 28 | #include <linux/irqbypass.h> | 28 | #include <linux/irqbypass.h> |
| 29 | #include <linux/swait.h> | 29 | #include <linux/swait.h> |
| 30 | #include <linux/refcount.h> | 30 | #include <linux/refcount.h> |
| 31 | #include <linux/nospec.h> | ||
| 31 | #include <asm/signal.h> | 32 | #include <asm/signal.h> |
| 32 | 33 | ||
| 33 | #include <linux/kvm.h> | 34 | #include <linux/kvm.h> |
| @@ -48,6 +49,27 @@ | |||
| 48 | */ | 49 | */ |
| 49 | #define KVM_MEMSLOT_INVALID (1UL << 16) | 50 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
| 50 | 51 | ||
| 52 | /* | ||
| 53 | * Bit 63 of the memslot generation number is an "update in-progress flag", | ||
| 54 | * e.g. is temporarily set for the duration of install_new_memslots(). | ||
| 55 | * This flag effectively creates a unique generation number that is used to | ||
| 56 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, | ||
| 57 | * i.e. may (or may not) have come from the previous memslots generation. | ||
| 58 | * | ||
| 59 | * This is necessary because the actual memslots update is not atomic with | ||
| 60 | * respect to the generation number update. Updating the generation number | ||
| 61 | * first would allow a vCPU to cache a spte from the old memslots using the | ||
| 62 | * new generation number, and updating the generation number after switching | ||
| 63 | * to the new memslots would allow cache hits using the old generation number | ||
| 64 | * to reference the defunct memslots. | ||
| 65 | * | ||
| 66 | * This mechanism is used to prevent getting hits in KVM's caches while a | ||
| 67 | * memslot update is in-progress, and to prevent cache hits *after* updating | ||
| 68 | * the actual generation number against accesses that were inserted into the | ||
| 69 | * cache *before* the memslots were updated. | ||
| 70 | */ | ||
| 71 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) | ||
| 72 | |||
| 51 | /* Two fragments for cross MMIO pages. */ | 73 | /* Two fragments for cross MMIO pages. */ |
| 52 | #define KVM_MAX_MMIO_FRAGMENTS 2 | 74 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
| 53 | 75 | ||
| @@ -492,10 +514,10 @@ static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) | |||
| 492 | 514 | ||
| 493 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) | 515 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
| 494 | { | 516 | { |
| 495 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case | 517 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
| 496 | * the caller has read kvm->online_vcpus before (as is the case | 518 | i = array_index_nospec(i, num_vcpus); |
| 497 | * for kvm_for_each_vcpu, for example). | 519 | |
| 498 | */ | 520 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ |
| 499 | smp_rmb(); | 521 | smp_rmb(); |
| 500 | return kvm->vcpus[i]; | 522 | return kvm->vcpus[i]; |
| 501 | } | 523 | } |
| @@ -579,6 +601,7 @@ void kvm_put_kvm(struct kvm *kvm); | |||
| 579 | 601 | ||
| 580 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) | 602 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
| 581 | { | 603 | { |
| 604 | as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); | ||
| 582 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, | 605 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
| 583 | lockdep_is_held(&kvm->slots_lock) || | 606 | lockdep_is_held(&kvm->slots_lock) || |
| 584 | !refcount_read(&kvm->users_count)); | 607 | !refcount_read(&kvm->users_count)); |
| @@ -634,7 +657,7 @@ void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |||
| 634 | struct kvm_memory_slot *dont); | 657 | struct kvm_memory_slot *dont); |
| 635 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | 658 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 636 | unsigned long npages); | 659 | unsigned long npages); |
| 637 | void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); | 660 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
| 638 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | 661 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 639 | struct kvm_memory_slot *memslot, | 662 | struct kvm_memory_slot *memslot, |
| 640 | const struct kvm_userspace_memory_region *mem, | 663 | const struct kvm_userspace_memory_region *mem, |
| @@ -1182,6 +1205,7 @@ extern bool kvm_rebooting; | |||
| 1182 | 1205 | ||
| 1183 | extern unsigned int halt_poll_ns; | 1206 | extern unsigned int halt_poll_ns; |
| 1184 | extern unsigned int halt_poll_ns_grow; | 1207 | extern unsigned int halt_poll_ns_grow; |
| 1208 | extern unsigned int halt_poll_ns_grow_start; | ||
| 1185 | extern unsigned int halt_poll_ns_shrink; | 1209 | extern unsigned int halt_poll_ns_shrink; |
| 1186 | 1210 | ||
| 1187 | struct kvm_device { | 1211 | struct kvm_device { |
diff --git a/include/linux/leds.h b/include/linux/leds.h index 5263f87e1d2c..78204650fe2a 100644 --- a/include/linux/leds.h +++ b/include/linux/leds.h | |||
| @@ -219,6 +219,19 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev, | |||
| 219 | extern int led_update_brightness(struct led_classdev *led_cdev); | 219 | extern int led_update_brightness(struct led_classdev *led_cdev); |
| 220 | 220 | ||
| 221 | /** | 221 | /** |
| 222 | * led_get_default_pattern - return default pattern | ||
| 223 | * | ||
| 224 | * @led_cdev: the LED to get default pattern for | ||
| 225 | * @size: pointer for storing the number of elements in returned array, | ||
| 226 | * modified only if return != NULL | ||
| 227 | * | ||
| 228 | * Return: Allocated array of integers with default pattern from device tree | ||
| 229 | * or NULL. Caller is responsible for kfree(). | ||
| 230 | */ | ||
| 231 | extern u32 *led_get_default_pattern(struct led_classdev *led_cdev, | ||
| 232 | unsigned int *size); | ||
| 233 | |||
| 234 | /** | ||
| 222 | * led_sysfs_disable - disable LED sysfs interface | 235 | * led_sysfs_disable - disable LED sysfs interface |
| 223 | * @led_cdev: the LED to set | 236 | * @led_cdev: the LED to set |
| 224 | * | 237 | * |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 68133842e6d7..c9419c05a90a 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -1122,10 +1122,11 @@ extern int ata_host_activate(struct ata_host *host, int irq, | |||
| 1122 | extern void ata_host_detach(struct ata_host *host); | 1122 | extern void ata_host_detach(struct ata_host *host); |
| 1123 | extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); | 1123 | extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_operations *); |
| 1124 | extern int ata_scsi_detect(struct scsi_host_template *sht); | 1124 | extern int ata_scsi_detect(struct scsi_host_template *sht); |
| 1125 | extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); | 1125 | extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd, |
| 1126 | void __user *arg); | ||
| 1126 | extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); | 1127 | extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd); |
| 1127 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, | 1128 | extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev, |
| 1128 | int cmd, void __user *arg); | 1129 | unsigned int cmd, void __user *arg); |
| 1129 | extern void ata_sas_port_destroy(struct ata_port *); | 1130 | extern void ata_sas_port_destroy(struct ata_port *); |
| 1130 | extern struct ata_port *ata_sas_port_alloc(struct ata_host *, | 1131 | extern struct ata_port *ata_sas_port_alloc(struct ata_host *, |
| 1131 | struct ata_port_info *, struct Scsi_Host *); | 1132 | struct ata_port_info *, struct Scsi_Host *); |
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h index 5440f11b0907..feb342d026f2 100644 --- a/include/linux/libnvdimm.h +++ b/include/linux/libnvdimm.h | |||
| @@ -42,6 +42,8 @@ enum { | |||
| 42 | NDD_SECURITY_OVERWRITE = 3, | 42 | NDD_SECURITY_OVERWRITE = 3, |
| 43 | /* tracking whether or not there is a pending device reference */ | 43 | /* tracking whether or not there is a pending device reference */ |
| 44 | NDD_WORK_PENDING = 4, | 44 | NDD_WORK_PENDING = 4, |
| 45 | /* ignore / filter NSLABEL_FLAG_LOCAL for this DIMM, i.e. no aliasing */ | ||
| 46 | NDD_NOBLK = 5, | ||
| 45 | 47 | ||
| 46 | /* need to set a limit somewhere, but yes, this is likely overkill */ | 48 | /* need to set a limit somewhere, but yes, this is likely overkill */ |
| 47 | ND_IOCTL_MAX_BUFLEN = SZ_4M, | 49 | ND_IOCTL_MAX_BUFLEN = SZ_4M, |
| @@ -128,6 +130,7 @@ struct nd_region_desc { | |||
| 128 | void *provider_data; | 130 | void *provider_data; |
| 129 | int num_lanes; | 131 | int num_lanes; |
| 130 | int numa_node; | 132 | int numa_node; |
| 133 | int target_node; | ||
| 131 | unsigned long flags; | 134 | unsigned long flags; |
| 132 | struct device_node *of_node; | 135 | struct device_node *of_node; |
| 133 | }; | 136 | }; |
| @@ -160,6 +163,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc( | |||
| 160 | } | 163 | } |
| 161 | 164 | ||
| 162 | enum nvdimm_security_state { | 165 | enum nvdimm_security_state { |
| 166 | NVDIMM_SECURITY_ERROR = -1, | ||
| 163 | NVDIMM_SECURITY_DISABLED, | 167 | NVDIMM_SECURITY_DISABLED, |
| 164 | NVDIMM_SECURITY_UNLOCKED, | 168 | NVDIMM_SECURITY_UNLOCKED, |
| 165 | NVDIMM_SECURITY_LOCKED, | 169 | NVDIMM_SECURITY_LOCKED, |
| @@ -234,7 +238,6 @@ static inline struct nvdimm *nvdimm_create(struct nvdimm_bus *nvdimm_bus, | |||
| 234 | cmd_mask, num_flush, flush_wpq, NULL, NULL); | 238 | cmd_mask, num_flush, flush_wpq, NULL, NULL); |
| 235 | } | 239 | } |
| 236 | 240 | ||
| 237 | int nvdimm_security_setup_events(struct nvdimm *nvdimm); | ||
| 238 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); | 241 | const struct nd_cmd_desc *nd_cmd_dimm_desc(int cmd); |
| 239 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); | 242 | const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd); |
| 240 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, | 243 | u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd, |
diff --git a/include/linux/limits.h b/include/linux/limits.h new file mode 100644 index 000000000000..76afcd24ff8c --- /dev/null +++ b/include/linux/limits.h | |||
| @@ -0,0 +1,36 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef _LINUX_LIMITS_H | ||
| 3 | #define _LINUX_LIMITS_H | ||
| 4 | |||
| 5 | #include <uapi/linux/limits.h> | ||
| 6 | #include <linux/types.h> | ||
| 7 | |||
| 8 | #define USHRT_MAX ((unsigned short)~0U) | ||
| 9 | #define SHRT_MAX ((short)(USHRT_MAX >> 1)) | ||
| 10 | #define SHRT_MIN ((short)(-SHRT_MAX - 1)) | ||
| 11 | #define INT_MAX ((int)(~0U >> 1)) | ||
| 12 | #define INT_MIN (-INT_MAX - 1) | ||
| 13 | #define UINT_MAX (~0U) | ||
| 14 | #define LONG_MAX ((long)(~0UL >> 1)) | ||
| 15 | #define LONG_MIN (-LONG_MAX - 1) | ||
| 16 | #define ULONG_MAX (~0UL) | ||
| 17 | #define LLONG_MAX ((long long)(~0ULL >> 1)) | ||
| 18 | #define LLONG_MIN (-LLONG_MAX - 1) | ||
| 19 | #define ULLONG_MAX (~0ULL) | ||
| 20 | #define SIZE_MAX (~(size_t)0) | ||
| 21 | #define PHYS_ADDR_MAX (~(phys_addr_t)0) | ||
| 22 | |||
| 23 | #define U8_MAX ((u8)~0U) | ||
| 24 | #define S8_MAX ((s8)(U8_MAX >> 1)) | ||
| 25 | #define S8_MIN ((s8)(-S8_MAX - 1)) | ||
| 26 | #define U16_MAX ((u16)~0U) | ||
| 27 | #define S16_MAX ((s16)(U16_MAX >> 1)) | ||
| 28 | #define S16_MIN ((s16)(-S16_MAX - 1)) | ||
| 29 | #define U32_MAX ((u32)~0U) | ||
| 30 | #define S32_MAX ((s32)(U32_MAX >> 1)) | ||
| 31 | #define S32_MIN ((s32)(-S32_MAX - 1)) | ||
| 32 | #define U64_MAX ((u64)~0ULL) | ||
| 33 | #define S64_MAX ((s64)(U64_MAX >> 1)) | ||
| 34 | #define S64_MIN ((s64)(-S64_MAX - 1)) | ||
| 35 | |||
| 36 | #endif /* _LINUX_LIMITS_H */ | ||
diff --git a/include/linux/list.h b/include/linux/list.h index edb7628e46ed..58aa3adf94e6 100644 --- a/include/linux/list.h +++ b/include/linux/list.h | |||
| @@ -207,6 +207,17 @@ static inline void list_bulk_move_tail(struct list_head *head, | |||
| 207 | } | 207 | } |
| 208 | 208 | ||
| 209 | /** | 209 | /** |
| 210 | * list_is_first -- tests whether @list is the first entry in list @head | ||
| 211 | * @list: the entry to test | ||
| 212 | * @head: the head of the list | ||
| 213 | */ | ||
| 214 | static inline int list_is_first(const struct list_head *list, | ||
| 215 | const struct list_head *head) | ||
| 216 | { | ||
| 217 | return list->prev == head; | ||
| 218 | } | ||
| 219 | |||
| 220 | /** | ||
| 210 | * list_is_last - tests whether @list is the last entry in list @head | 221 | * list_is_last - tests whether @list is the last entry in list @head |
| 211 | * @list: the entry to test | 222 | * @list: the entry to test |
| 212 | * @head: the head of the list | 223 | * @head: the head of the list |
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index aec44b1d9582..53551f470722 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h | |||
| @@ -24,6 +24,7 @@ | |||
| 24 | #include <linux/module.h> | 24 | #include <linux/module.h> |
| 25 | #include <linux/ftrace.h> | 25 | #include <linux/ftrace.h> |
| 26 | #include <linux/completion.h> | 26 | #include <linux/completion.h> |
| 27 | #include <linux/list.h> | ||
| 27 | 28 | ||
| 28 | #if IS_ENABLED(CONFIG_LIVEPATCH) | 29 | #if IS_ENABLED(CONFIG_LIVEPATCH) |
| 29 | 30 | ||
| @@ -40,11 +41,14 @@ | |||
| 40 | * @new_func: pointer to the patched function code | 41 | * @new_func: pointer to the patched function code |
| 41 | * @old_sympos: a hint indicating which symbol position the old function | 42 | * @old_sympos: a hint indicating which symbol position the old function |
| 42 | * can be found (optional) | 43 | * can be found (optional) |
| 43 | * @old_addr: the address of the function being patched | 44 | * @old_func: pointer to the function being patched |
| 44 | * @kobj: kobject for sysfs resources | 45 | * @kobj: kobject for sysfs resources |
| 46 | * @node: list node for klp_object func_list | ||
| 45 | * @stack_node: list node for klp_ops func_stack list | 47 | * @stack_node: list node for klp_ops func_stack list |
| 46 | * @old_size: size of the old function | 48 | * @old_size: size of the old function |
| 47 | * @new_size: size of the new function | 49 | * @new_size: size of the new function |
| 50 | * @kobj_added: @kobj has been added and needs freeing | ||
| 51 | * @nop: temporary patch to use the original code again; dyn. allocated | ||
| 48 | * @patched: the func has been added to the klp_ops list | 52 | * @patched: the func has been added to the klp_ops list |
| 49 | * @transition: the func is currently being applied or reverted | 53 | * @transition: the func is currently being applied or reverted |
| 50 | * | 54 | * |
| @@ -77,10 +81,13 @@ struct klp_func { | |||
| 77 | unsigned long old_sympos; | 81 | unsigned long old_sympos; |
| 78 | 82 | ||
| 79 | /* internal */ | 83 | /* internal */ |
| 80 | unsigned long old_addr; | 84 | void *old_func; |
| 81 | struct kobject kobj; | 85 | struct kobject kobj; |
| 86 | struct list_head node; | ||
| 82 | struct list_head stack_node; | 87 | struct list_head stack_node; |
| 83 | unsigned long old_size, new_size; | 88 | unsigned long old_size, new_size; |
| 89 | bool kobj_added; | ||
| 90 | bool nop; | ||
| 84 | bool patched; | 91 | bool patched; |
| 85 | bool transition; | 92 | bool transition; |
| 86 | }; | 93 | }; |
| @@ -115,8 +122,12 @@ struct klp_callbacks { | |||
| 115 | * @funcs: function entries for functions to be patched in the object | 122 | * @funcs: function entries for functions to be patched in the object |
| 116 | * @callbacks: functions to be executed pre/post (un)patching | 123 | * @callbacks: functions to be executed pre/post (un)patching |
| 117 | * @kobj: kobject for sysfs resources | 124 | * @kobj: kobject for sysfs resources |
| 125 | * @func_list: dynamic list of the function entries | ||
| 126 | * @node: list node for klp_patch obj_list | ||
| 118 | * @mod: kernel module associated with the patched object | 127 | * @mod: kernel module associated with the patched object |
| 119 | * (NULL for vmlinux) | 128 | * (NULL for vmlinux) |
| 129 | * @kobj_added: @kobj has been added and needs freeing | ||
| 130 | * @dynamic: temporary object for nop functions; dynamically allocated | ||
| 120 | * @patched: the object's funcs have been added to the klp_ops list | 131 | * @patched: the object's funcs have been added to the klp_ops list |
| 121 | */ | 132 | */ |
| 122 | struct klp_object { | 133 | struct klp_object { |
| @@ -127,7 +138,11 @@ struct klp_object { | |||
| 127 | 138 | ||
| 128 | /* internal */ | 139 | /* internal */ |
| 129 | struct kobject kobj; | 140 | struct kobject kobj; |
| 141 | struct list_head func_list; | ||
| 142 | struct list_head node; | ||
| 130 | struct module *mod; | 143 | struct module *mod; |
| 144 | bool kobj_added; | ||
| 145 | bool dynamic; | ||
| 131 | bool patched; | 146 | bool patched; |
| 132 | }; | 147 | }; |
| 133 | 148 | ||
| @@ -135,35 +150,54 @@ struct klp_object { | |||
| 135 | * struct klp_patch - patch structure for live patching | 150 | * struct klp_patch - patch structure for live patching |
| 136 | * @mod: reference to the live patch module | 151 | * @mod: reference to the live patch module |
| 137 | * @objs: object entries for kernel objects to be patched | 152 | * @objs: object entries for kernel objects to be patched |
| 138 | * @list: list node for global list of registered patches | 153 | * @replace: replace all actively used patches |
| 154 | * @list: list node for global list of actively used patches | ||
| 139 | * @kobj: kobject for sysfs resources | 155 | * @kobj: kobject for sysfs resources |
| 156 | * @obj_list: dynamic list of the object entries | ||
| 157 | * @kobj_added: @kobj has been added and needs freeing | ||
| 140 | * @enabled: the patch is enabled (but operation may be incomplete) | 158 | * @enabled: the patch is enabled (but operation may be incomplete) |
| 159 | * @forced: was involved in a forced transition | ||
| 160 | * @free_work: patch cleanup from workqueue-context | ||
| 141 | * @finish: for waiting till it is safe to remove the patch module | 161 | * @finish: for waiting till it is safe to remove the patch module |
| 142 | */ | 162 | */ |
| 143 | struct klp_patch { | 163 | struct klp_patch { |
| 144 | /* external */ | 164 | /* external */ |
| 145 | struct module *mod; | 165 | struct module *mod; |
| 146 | struct klp_object *objs; | 166 | struct klp_object *objs; |
| 167 | bool replace; | ||
| 147 | 168 | ||
| 148 | /* internal */ | 169 | /* internal */ |
| 149 | struct list_head list; | 170 | struct list_head list; |
| 150 | struct kobject kobj; | 171 | struct kobject kobj; |
| 172 | struct list_head obj_list; | ||
| 173 | bool kobj_added; | ||
| 151 | bool enabled; | 174 | bool enabled; |
| 175 | bool forced; | ||
| 176 | struct work_struct free_work; | ||
| 152 | struct completion finish; | 177 | struct completion finish; |
| 153 | }; | 178 | }; |
| 154 | 179 | ||
| 155 | #define klp_for_each_object(patch, obj) \ | 180 | #define klp_for_each_object_static(patch, obj) \ |
| 156 | for (obj = patch->objs; obj->funcs || obj->name; obj++) | 181 | for (obj = patch->objs; obj->funcs || obj->name; obj++) |
| 157 | 182 | ||
| 158 | #define klp_for_each_func(obj, func) \ | 183 | #define klp_for_each_object_safe(patch, obj, tmp_obj) \ |
| 184 | list_for_each_entry_safe(obj, tmp_obj, &patch->obj_list, node) | ||
| 185 | |||
| 186 | #define klp_for_each_object(patch, obj) \ | ||
| 187 | list_for_each_entry(obj, &patch->obj_list, node) | ||
| 188 | |||
| 189 | #define klp_for_each_func_static(obj, func) \ | ||
| 159 | for (func = obj->funcs; \ | 190 | for (func = obj->funcs; \ |
| 160 | func->old_name || func->new_func || func->old_sympos; \ | 191 | func->old_name || func->new_func || func->old_sympos; \ |
| 161 | func++) | 192 | func++) |
| 162 | 193 | ||
| 163 | int klp_register_patch(struct klp_patch *); | 194 | #define klp_for_each_func_safe(obj, func, tmp_func) \ |
| 164 | int klp_unregister_patch(struct klp_patch *); | 195 | list_for_each_entry_safe(func, tmp_func, &obj->func_list, node) |
| 196 | |||
| 197 | #define klp_for_each_func(obj, func) \ | ||
| 198 | list_for_each_entry(func, &obj->func_list, node) | ||
| 199 | |||
| 165 | int klp_enable_patch(struct klp_patch *); | 200 | int klp_enable_patch(struct klp_patch *); |
| 166 | int klp_disable_patch(struct klp_patch *); | ||
| 167 | 201 | ||
| 168 | void arch_klp_init_object_loaded(struct klp_patch *patch, | 202 | void arch_klp_init_object_loaded(struct klp_patch *patch, |
| 169 | struct klp_object *obj); | 203 | struct klp_object *obj); |
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index c5335df2372f..79c3873d58ac 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
| @@ -46,16 +46,22 @@ extern int lock_stat; | |||
| 46 | #define NR_LOCKDEP_CACHING_CLASSES 2 | 46 | #define NR_LOCKDEP_CACHING_CLASSES 2 |
| 47 | 47 | ||
| 48 | /* | 48 | /* |
| 49 | * Lock-classes are keyed via unique addresses, by embedding the | 49 | * A lockdep key is associated with each lock object. For static locks we use |
| 50 | * lockclass-key into the kernel (or module) .data section. (For | 50 | * the lock address itself as the key. Dynamically allocated lock objects can |
| 51 | * static locks we use the lock address itself as the key.) | 51 | * have a statically or dynamically allocated key. Dynamically allocated lock |
| 52 | * keys must be registered before being used and must be unregistered before | ||
| 53 | * the key memory is freed. | ||
| 52 | */ | 54 | */ |
| 53 | struct lockdep_subclass_key { | 55 | struct lockdep_subclass_key { |
| 54 | char __one_byte; | 56 | char __one_byte; |
| 55 | } __attribute__ ((__packed__)); | 57 | } __attribute__ ((__packed__)); |
| 56 | 58 | ||
| 59 | /* hash_entry is used to keep track of dynamically allocated keys. */ | ||
| 57 | struct lock_class_key { | 60 | struct lock_class_key { |
| 58 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | 61 | union { |
| 62 | struct hlist_node hash_entry; | ||
| 63 | struct lockdep_subclass_key subkeys[MAX_LOCKDEP_SUBCLASSES]; | ||
| 64 | }; | ||
| 59 | }; | 65 | }; |
| 60 | 66 | ||
| 61 | extern struct lock_class_key __lockdep_no_validate__; | 67 | extern struct lock_class_key __lockdep_no_validate__; |
| @@ -63,7 +69,8 @@ extern struct lock_class_key __lockdep_no_validate__; | |||
| 63 | #define LOCKSTAT_POINTS 4 | 69 | #define LOCKSTAT_POINTS 4 |
| 64 | 70 | ||
| 65 | /* | 71 | /* |
| 66 | * The lock-class itself: | 72 | * The lock-class itself. The order of the structure members matters. |
| 73 | * reinit_class() zeroes the key member and all subsequent members. | ||
| 67 | */ | 74 | */ |
| 68 | struct lock_class { | 75 | struct lock_class { |
| 69 | /* | 76 | /* |
| @@ -72,10 +79,19 @@ struct lock_class { | |||
| 72 | struct hlist_node hash_entry; | 79 | struct hlist_node hash_entry; |
| 73 | 80 | ||
| 74 | /* | 81 | /* |
| 75 | * global list of all lock-classes: | 82 | * Entry in all_lock_classes when in use. Entry in free_lock_classes |
| 83 | * when not in use. Instances that are being freed are on one of the | ||
| 84 | * zapped_classes lists. | ||
| 76 | */ | 85 | */ |
| 77 | struct list_head lock_entry; | 86 | struct list_head lock_entry; |
| 78 | 87 | ||
| 88 | /* | ||
| 89 | * These fields represent a directed graph of lock dependencies, | ||
| 90 | * to every node we attach a list of "forward" and a list of | ||
| 91 | * "backward" graph nodes. | ||
| 92 | */ | ||
| 93 | struct list_head locks_after, locks_before; | ||
| 94 | |||
| 79 | struct lockdep_subclass_key *key; | 95 | struct lockdep_subclass_key *key; |
| 80 | unsigned int subclass; | 96 | unsigned int subclass; |
| 81 | unsigned int dep_gen_id; | 97 | unsigned int dep_gen_id; |
| @@ -87,13 +103,6 @@ struct lock_class { | |||
| 87 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; | 103 | struct stack_trace usage_traces[XXX_LOCK_USAGE_STATES]; |
| 88 | 104 | ||
| 89 | /* | 105 | /* |
| 90 | * These fields represent a directed graph of lock dependencies, | ||
| 91 | * to every node we attach a list of "forward" and a list of | ||
| 92 | * "backward" graph nodes. | ||
| 93 | */ | ||
| 94 | struct list_head locks_after, locks_before; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * Generation counter, when doing certain classes of graph walking, | 106 | * Generation counter, when doing certain classes of graph walking, |
| 98 | * to ensure that we check one node only once: | 107 | * to ensure that we check one node only once: |
| 99 | */ | 108 | */ |
| @@ -104,7 +113,7 @@ struct lock_class { | |||
| 104 | unsigned long contention_point[LOCKSTAT_POINTS]; | 113 | unsigned long contention_point[LOCKSTAT_POINTS]; |
| 105 | unsigned long contending_point[LOCKSTAT_POINTS]; | 114 | unsigned long contending_point[LOCKSTAT_POINTS]; |
| 106 | #endif | 115 | #endif |
| 107 | }; | 116 | } __no_randomize_layout; |
| 108 | 117 | ||
| 109 | #ifdef CONFIG_LOCK_STAT | 118 | #ifdef CONFIG_LOCK_STAT |
| 110 | struct lock_time { | 119 | struct lock_time { |
| @@ -178,6 +187,7 @@ static inline void lockdep_copy_map(struct lockdep_map *to, | |||
| 178 | struct lock_list { | 187 | struct lock_list { |
| 179 | struct list_head entry; | 188 | struct list_head entry; |
| 180 | struct lock_class *class; | 189 | struct lock_class *class; |
| 190 | struct lock_class *links_to; | ||
| 181 | struct stack_trace trace; | 191 | struct stack_trace trace; |
| 182 | int distance; | 192 | int distance; |
| 183 | 193 | ||
| @@ -264,10 +274,14 @@ extern void lockdep_reset(void); | |||
| 264 | extern void lockdep_reset_lock(struct lockdep_map *lock); | 274 | extern void lockdep_reset_lock(struct lockdep_map *lock); |
| 265 | extern void lockdep_free_key_range(void *start, unsigned long size); | 275 | extern void lockdep_free_key_range(void *start, unsigned long size); |
| 266 | extern asmlinkage void lockdep_sys_exit(void); | 276 | extern asmlinkage void lockdep_sys_exit(void); |
| 277 | extern void lockdep_set_selftest_task(struct task_struct *task); | ||
| 267 | 278 | ||
| 268 | extern void lockdep_off(void); | 279 | extern void lockdep_off(void); |
| 269 | extern void lockdep_on(void); | 280 | extern void lockdep_on(void); |
| 270 | 281 | ||
| 282 | extern void lockdep_register_key(struct lock_class_key *key); | ||
| 283 | extern void lockdep_unregister_key(struct lock_class_key *key); | ||
| 284 | |||
| 271 | /* | 285 | /* |
| 272 | * These methods are used by specific locking variants (spinlocks, | 286 | * These methods are used by specific locking variants (spinlocks, |
| 273 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events | 287 | * rwlocks, mutexes and rwsems) to pass init/acquire/release events |
| @@ -394,6 +408,10 @@ static inline void lockdep_on(void) | |||
| 394 | { | 408 | { |
| 395 | } | 409 | } |
| 396 | 410 | ||
| 411 | static inline void lockdep_set_selftest_task(struct task_struct *task) | ||
| 412 | { | ||
| 413 | } | ||
| 414 | |||
| 397 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) | 415 | # define lock_acquire(l, s, t, r, c, n, i) do { } while (0) |
| 398 | # define lock_release(l, n, i) do { } while (0) | 416 | # define lock_release(l, n, i) do { } while (0) |
| 399 | # define lock_downgrade(l, i) do { } while (0) | 417 | # define lock_downgrade(l, i) do { } while (0) |
| @@ -425,6 +443,14 @@ static inline void lockdep_on(void) | |||
| 425 | */ | 443 | */ |
| 426 | struct lock_class_key { }; | 444 | struct lock_class_key { }; |
| 427 | 445 | ||
| 446 | static inline void lockdep_register_key(struct lock_class_key *key) | ||
| 447 | { | ||
| 448 | } | ||
| 449 | |||
| 450 | static inline void lockdep_unregister_key(struct lock_class_key *key) | ||
| 451 | { | ||
| 452 | } | ||
| 453 | |||
| 428 | /* | 454 | /* |
| 429 | * The lockdep_map takes no space if lockdep is disabled: | 455 | * The lockdep_map takes no space if lockdep is disabled: |
| 430 | */ | 456 | */ |
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 9a0bdf91e646..a9b8ff578b6b 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h | |||
| @@ -76,6 +76,22 @@ | |||
| 76 | * changes on the process such as clearing out non-inheritable signal | 76 | * changes on the process such as clearing out non-inheritable signal |
| 77 | * state. This is called immediately after commit_creds(). | 77 | * state. This is called immediately after commit_creds(). |
| 78 | * | 78 | * |
| 79 | * Security hooks for mount using fs_context. | ||
| 80 | * [See also Documentation/filesystems/mounting.txt] | ||
| 81 | * | ||
| 82 | * @fs_context_dup: | ||
| 83 | * Allocate and attach a security structure to sc->security. This pointer | ||
| 84 | * is initialised to NULL by the caller. | ||
| 85 | * @fc indicates the new filesystem context. | ||
| 86 | * @src_fc indicates the original filesystem context. | ||
| 87 | * @fs_context_parse_param: | ||
| 88 | * Userspace provided a parameter to configure a superblock. The LSM may | ||
| 89 | * reject it with an error and may use it for itself, in which case it | ||
| 90 | * should return 0; otherwise it should return -ENOPARAM to pass it on to | ||
| 91 | * the filesystem. | ||
| 92 | * @fc indicates the filesystem context. | ||
| 93 | * @param The parameter | ||
| 94 | * | ||
| 79 | * Security hooks for filesystem operations. | 95 | * Security hooks for filesystem operations. |
| 80 | * | 96 | * |
| 81 | * @sb_alloc_security: | 97 | * @sb_alloc_security: |
| @@ -1270,7 +1286,7 @@ | |||
| 1270 | * @cred contains the credentials to use. | 1286 | * @cred contains the credentials to use. |
| 1271 | * @ns contains the user namespace we want the capability in | 1287 | * @ns contains the user namespace we want the capability in |
| 1272 | * @cap contains the capability <include/linux/capability.h>. | 1288 | * @cap contains the capability <include/linux/capability.h>. |
| 1273 | * @audit contains whether to write an audit message or not | 1289 | * @opts contains options for the capable check <include/linux/security.h> |
| 1274 | * Return 0 if the capability is granted for @tsk. | 1290 | * Return 0 if the capability is granted for @tsk. |
| 1275 | * @syslog: | 1291 | * @syslog: |
| 1276 | * Check permission before accessing the kernel message ring or changing | 1292 | * Check permission before accessing the kernel message ring or changing |
| @@ -1344,7 +1360,6 @@ | |||
| 1344 | * @field contains the field which relates to current LSM. | 1360 | * @field contains the field which relates to current LSM. |
| 1345 | * @op contains the operator that will be used for matching. | 1361 | * @op contains the operator that will be used for matching. |
| 1346 | * @rule points to the audit rule that will be checked against. | 1362 | * @rule points to the audit rule that will be checked against. |
| 1347 | * @actx points to the audit context associated with the check. | ||
| 1348 | * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. | 1363 | * Return 1 if secid matches the rule, 0 if it does not, -ERRNO on failure. |
| 1349 | * | 1364 | * |
| 1350 | * @audit_rule_free: | 1365 | * @audit_rule_free: |
| @@ -1446,8 +1461,10 @@ union security_list_options { | |||
| 1446 | const kernel_cap_t *effective, | 1461 | const kernel_cap_t *effective, |
| 1447 | const kernel_cap_t *inheritable, | 1462 | const kernel_cap_t *inheritable, |
| 1448 | const kernel_cap_t *permitted); | 1463 | const kernel_cap_t *permitted); |
| 1449 | int (*capable)(const struct cred *cred, struct user_namespace *ns, | 1464 | int (*capable)(const struct cred *cred, |
| 1450 | int cap, int audit); | 1465 | struct user_namespace *ns, |
| 1466 | int cap, | ||
| 1467 | unsigned int opts); | ||
| 1451 | int (*quotactl)(int cmds, int type, int id, struct super_block *sb); | 1468 | int (*quotactl)(int cmds, int type, int id, struct super_block *sb); |
| 1452 | int (*quota_on)(struct dentry *dentry); | 1469 | int (*quota_on)(struct dentry *dentry); |
| 1453 | int (*syslog)(int type); | 1470 | int (*syslog)(int type); |
| @@ -1459,6 +1476,9 @@ union security_list_options { | |||
| 1459 | void (*bprm_committing_creds)(struct linux_binprm *bprm); | 1476 | void (*bprm_committing_creds)(struct linux_binprm *bprm); |
| 1460 | void (*bprm_committed_creds)(struct linux_binprm *bprm); | 1477 | void (*bprm_committed_creds)(struct linux_binprm *bprm); |
| 1461 | 1478 | ||
| 1479 | int (*fs_context_dup)(struct fs_context *fc, struct fs_context *src_sc); | ||
| 1480 | int (*fs_context_parse_param)(struct fs_context *fc, struct fs_parameter *param); | ||
| 1481 | |||
| 1462 | int (*sb_alloc_security)(struct super_block *sb); | 1482 | int (*sb_alloc_security)(struct super_block *sb); |
| 1463 | void (*sb_free_security)(struct super_block *sb); | 1483 | void (*sb_free_security)(struct super_block *sb); |
| 1464 | void (*sb_free_mnt_opts)(void *mnt_opts); | 1484 | void (*sb_free_mnt_opts)(void *mnt_opts); |
| @@ -1764,8 +1784,7 @@ union security_list_options { | |||
| 1764 | int (*audit_rule_init)(u32 field, u32 op, char *rulestr, | 1784 | int (*audit_rule_init)(u32 field, u32 op, char *rulestr, |
| 1765 | void **lsmrule); | 1785 | void **lsmrule); |
| 1766 | int (*audit_rule_known)(struct audit_krule *krule); | 1786 | int (*audit_rule_known)(struct audit_krule *krule); |
| 1767 | int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule, | 1787 | int (*audit_rule_match)(u32 secid, u32 field, u32 op, void *lsmrule); |
| 1768 | struct audit_context *actx); | ||
| 1769 | void (*audit_rule_free)(void *lsmrule); | 1788 | void (*audit_rule_free)(void *lsmrule); |
| 1770 | #endif /* CONFIG_AUDIT */ | 1789 | #endif /* CONFIG_AUDIT */ |
| 1771 | 1790 | ||
| @@ -1800,6 +1819,8 @@ struct security_hook_heads { | |||
| 1800 | struct hlist_head bprm_check_security; | 1819 | struct hlist_head bprm_check_security; |
| 1801 | struct hlist_head bprm_committing_creds; | 1820 | struct hlist_head bprm_committing_creds; |
| 1802 | struct hlist_head bprm_committed_creds; | 1821 | struct hlist_head bprm_committed_creds; |
| 1822 | struct hlist_head fs_context_dup; | ||
| 1823 | struct hlist_head fs_context_parse_param; | ||
| 1803 | struct hlist_head sb_alloc_security; | 1824 | struct hlist_head sb_alloc_security; |
| 1804 | struct hlist_head sb_free_security; | 1825 | struct hlist_head sb_free_security; |
| 1805 | struct hlist_head sb_free_mnt_opts; | 1826 | struct hlist_head sb_free_mnt_opts; |
| @@ -2028,6 +2049,18 @@ struct security_hook_list { | |||
| 2028 | } __randomize_layout; | 2049 | } __randomize_layout; |
| 2029 | 2050 | ||
| 2030 | /* | 2051 | /* |
| 2052 | * Security blob size or offset data. | ||
| 2053 | */ | ||
| 2054 | struct lsm_blob_sizes { | ||
| 2055 | int lbs_cred; | ||
| 2056 | int lbs_file; | ||
| 2057 | int lbs_inode; | ||
| 2058 | int lbs_ipc; | ||
| 2059 | int lbs_msg_msg; | ||
| 2060 | int lbs_task; | ||
| 2061 | }; | ||
| 2062 | |||
| 2063 | /* | ||
| 2031 | * Initializing a security_hook_list structure takes | 2064 | * Initializing a security_hook_list structure takes |
| 2032 | * up a lot of space in a source file. This macro takes | 2065 | * up a lot of space in a source file. This macro takes |
| 2033 | * care of the common case and reduces the amount of | 2066 | * care of the common case and reduces the amount of |
| @@ -2042,9 +2075,21 @@ extern char *lsm_names; | |||
| 2042 | extern void security_add_hooks(struct security_hook_list *hooks, int count, | 2075 | extern void security_add_hooks(struct security_hook_list *hooks, int count, |
| 2043 | char *lsm); | 2076 | char *lsm); |
| 2044 | 2077 | ||
| 2078 | #define LSM_FLAG_LEGACY_MAJOR BIT(0) | ||
| 2079 | #define LSM_FLAG_EXCLUSIVE BIT(1) | ||
| 2080 | |||
| 2081 | enum lsm_order { | ||
| 2082 | LSM_ORDER_FIRST = -1, /* This is only for capabilities. */ | ||
| 2083 | LSM_ORDER_MUTABLE = 0, | ||
| 2084 | }; | ||
| 2085 | |||
| 2045 | struct lsm_info { | 2086 | struct lsm_info { |
| 2046 | const char *name; /* Required. */ | 2087 | const char *name; /* Required. */ |
| 2088 | enum lsm_order order; /* Optional: default is LSM_ORDER_MUTABLE */ | ||
| 2089 | unsigned long flags; /* Optional: flags describing LSM */ | ||
| 2090 | int *enabled; /* Optional: controlled by CONFIG_LSM */ | ||
| 2047 | int (*init)(void); /* Required. */ | 2091 | int (*init)(void); /* Required. */ |
| 2092 | struct lsm_blob_sizes *blobs; /* Optional: for blob sharing. */ | ||
| 2048 | }; | 2093 | }; |
| 2049 | 2094 | ||
| 2050 | extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; | 2095 | extern struct lsm_info __start_lsm_info[], __end_lsm_info[]; |
| @@ -2084,17 +2129,6 @@ static inline void security_delete_hooks(struct security_hook_list *hooks, | |||
| 2084 | #define __lsm_ro_after_init __ro_after_init | 2129 | #define __lsm_ro_after_init __ro_after_init |
| 2085 | #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ | 2130 | #endif /* CONFIG_SECURITY_WRITABLE_HOOKS */ |
| 2086 | 2131 | ||
| 2087 | extern int __init security_module_enable(const char *module); | 2132 | extern int lsm_inode_alloc(struct inode *inode); |
| 2088 | extern void __init capability_add_hooks(void); | ||
| 2089 | #ifdef CONFIG_SECURITY_YAMA | ||
| 2090 | extern void __init yama_add_hooks(void); | ||
| 2091 | #else | ||
| 2092 | static inline void __init yama_add_hooks(void) { } | ||
| 2093 | #endif | ||
| 2094 | #ifdef CONFIG_SECURITY_LOADPIN | ||
| 2095 | void __init loadpin_add_hooks(void); | ||
| 2096 | #else | ||
| 2097 | static inline void loadpin_add_hooks(void) { }; | ||
| 2098 | #endif | ||
| 2099 | 2133 | ||
| 2100 | #endif /* ! __LINUX_LSM_HOOKS_H */ | 2134 | #endif /* ! __LINUX_LSM_HOOKS_H */ |
diff --git a/include/linux/lzo.h b/include/linux/lzo.h index 2ae27cb89927..e95c7d1092b2 100644 --- a/include/linux/lzo.h +++ b/include/linux/lzo.h | |||
| @@ -18,12 +18,16 @@ | |||
| 18 | #define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) | 18 | #define LZO1X_1_MEM_COMPRESS (8192 * sizeof(unsigned short)) |
| 19 | #define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS | 19 | #define LZO1X_MEM_COMPRESS LZO1X_1_MEM_COMPRESS |
| 20 | 20 | ||
| 21 | #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3) | 21 | #define lzo1x_worst_compress(x) ((x) + ((x) / 16) + 64 + 3 + 2) |
| 22 | 22 | ||
| 23 | /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ | 23 | /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ |
| 24 | int lzo1x_1_compress(const unsigned char *src, size_t src_len, | 24 | int lzo1x_1_compress(const unsigned char *src, size_t src_len, |
| 25 | unsigned char *dst, size_t *dst_len, void *wrkmem); | 25 | unsigned char *dst, size_t *dst_len, void *wrkmem); |
| 26 | 26 | ||
| 27 | /* This requires 'wrkmem' of size LZO1X_1_MEM_COMPRESS */ | ||
| 28 | int lzorle1x_1_compress(const unsigned char *src, size_t src_len, | ||
| 29 | unsigned char *dst, size_t *dst_len, void *wrkmem); | ||
| 30 | |||
| 27 | /* safe decompression with overrun testing */ | 31 | /* safe decompression with overrun testing */ |
| 28 | int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, | 32 | int lzo1x_decompress_safe(const unsigned char *src, size_t src_len, |
| 29 | unsigned char *dst, size_t *dst_len); | 33 | unsigned char *dst, size_t *dst_len); |
diff --git a/include/linux/mailbox/zynqmp-ipi-message.h b/include/linux/mailbox/zynqmp-ipi-message.h new file mode 100644 index 000000000000..9542b41eacfd --- /dev/null +++ b/include/linux/mailbox/zynqmp-ipi-message.h | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | |||
| 3 | #ifndef _LINUX_ZYNQMP_IPI_MESSAGE_H_ | ||
| 4 | #define _LINUX_ZYNQMP_IPI_MESSAGE_H_ | ||
| 5 | |||
| 6 | /** | ||
| 7 | * struct zynqmp_ipi_message - ZynqMP IPI message structure | ||
| 8 | * @len: Length of message | ||
| 9 | * @data: message payload | ||
| 10 | * | ||
| 11 | * This is the structure for data used in mbox_send_message | ||
| 12 | * the maximum length of data buffer is fixed to 12 bytes. | ||
| 13 | * Client is supposed to be aware of this. | ||
| 14 | */ | ||
| 15 | struct zynqmp_ipi_message { | ||
| 16 | size_t len; | ||
| 17 | u8 data[0]; | ||
| 18 | }; | ||
| 19 | |||
| 20 | #endif /* _LINUX_ZYNQMP_IPI_MESSAGE_H_ */ | ||
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h index 1eb6f244588d..73d04743a2bb 100644 --- a/include/linux/marvell_phy.h +++ b/include/linux/marvell_phy.h | |||
| @@ -20,6 +20,8 @@ | |||
| 20 | #define MARVELL_PHY_ID_88E1540 0x01410eb0 | 20 | #define MARVELL_PHY_ID_88E1540 0x01410eb0 |
| 21 | #define MARVELL_PHY_ID_88E1545 0x01410ea0 | 21 | #define MARVELL_PHY_ID_88E1545 0x01410ea0 |
| 22 | #define MARVELL_PHY_ID_88E3016 0x01410e60 | 22 | #define MARVELL_PHY_ID_88E3016 0x01410e60 |
| 23 | #define MARVELL_PHY_ID_88X3310 0x002b09a0 | ||
| 24 | #define MARVELL_PHY_ID_88E2110 0x002b09b0 | ||
| 23 | 25 | ||
| 24 | /* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do | 26 | /* The MV88e6390 Ethernet switch contains embedded PHYs. These PHYs do |
| 25 | * not have a model ID. So the switch driver traps reads to the ID2 | 27 | * not have a model ID. So the switch driver traps reads to the ID2 |
diff --git a/include/linux/math64.h b/include/linux/math64.h index bb2c84afb80c..65bef21cdddb 100644 --- a/include/linux/math64.h +++ b/include/linux/math64.h | |||
| @@ -284,4 +284,17 @@ static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor) | |||
| 284 | #define DIV64_U64_ROUND_UP(ll, d) \ | 284 | #define DIV64_U64_ROUND_UP(ll, d) \ |
| 285 | ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) | 285 | ({ u64 _tmp = (d); div64_u64((ll) + _tmp - 1, _tmp); }) |
| 286 | 286 | ||
| 287 | /** | ||
| 288 | * DIV64_U64_ROUND_CLOSEST - unsigned 64bit divide with 64bit divisor rounded to nearest integer | ||
| 289 | * @dividend: unsigned 64bit dividend | ||
| 290 | * @divisor: unsigned 64bit divisor | ||
| 291 | * | ||
| 292 | * Divide unsigned 64bit dividend by unsigned 64bit divisor | ||
| 293 | * and round to closest integer. | ||
| 294 | * | ||
| 295 | * Return: dividend / divisor rounded to nearest integer | ||
| 296 | */ | ||
| 297 | #define DIV64_U64_ROUND_CLOSEST(dividend, divisor) \ | ||
| 298 | ({ u64 _tmp = (divisor); div64_u64((dividend) + _tmp / 2, _tmp); }) | ||
| 299 | |||
| 287 | #endif /* _LINUX_MATH64_H */ | 300 | #endif /* _LINUX_MATH64_H */ |
diff --git a/include/linux/mdev.h b/include/linux/mdev.h index b6e048e1045f..d7aee90e5da5 100644 --- a/include/linux/mdev.h +++ b/include/linux/mdev.h | |||
| @@ -120,7 +120,7 @@ struct mdev_driver { | |||
| 120 | 120 | ||
| 121 | extern void *mdev_get_drvdata(struct mdev_device *mdev); | 121 | extern void *mdev_get_drvdata(struct mdev_device *mdev); |
| 122 | extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); | 122 | extern void mdev_set_drvdata(struct mdev_device *mdev, void *data); |
| 123 | extern uuid_le mdev_uuid(struct mdev_device *mdev); | 123 | extern const guid_t *mdev_uuid(struct mdev_device *mdev); |
| 124 | 124 | ||
| 125 | extern struct bus_type mdev_bus_type; | 125 | extern struct bus_type mdev_bus_type; |
| 126 | 126 | ||
diff --git a/include/linux/mdio.h b/include/linux/mdio.h index bfa7114167d7..3e99ae3ed87f 100644 --- a/include/linux/mdio.h +++ b/include/linux/mdio.h | |||
| @@ -261,6 +261,50 @@ static inline u16 ethtool_adv_to_mmd_eee_adv_t(u32 adv) | |||
| 261 | return reg; | 261 | return reg; |
| 262 | } | 262 | } |
| 263 | 263 | ||
| 264 | /** | ||
| 265 | * linkmode_adv_to_mii_10gbt_adv_t | ||
| 266 | * @advertising: the linkmode advertisement settings | ||
| 267 | * | ||
| 268 | * A small helper function that translates linkmode advertisement | ||
| 269 | * settings to phy autonegotiation advertisements for the C45 | ||
| 270 | * 10GBASE-T AN CONTROL (7.32) register. | ||
| 271 | */ | ||
| 272 | static inline u32 linkmode_adv_to_mii_10gbt_adv_t(unsigned long *advertising) | ||
| 273 | { | ||
| 274 | u32 result = 0; | ||
| 275 | |||
| 276 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, | ||
| 277 | advertising)) | ||
| 278 | result |= MDIO_AN_10GBT_CTRL_ADV2_5G; | ||
| 279 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, | ||
| 280 | advertising)) | ||
| 281 | result |= MDIO_AN_10GBT_CTRL_ADV5G; | ||
| 282 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, | ||
| 283 | advertising)) | ||
| 284 | result |= MDIO_AN_10GBT_CTRL_ADV10G; | ||
| 285 | |||
| 286 | return result; | ||
| 287 | } | ||
| 288 | |||
| 289 | /** | ||
| 290 | * mii_10gbt_stat_mod_linkmode_lpa_t | ||
| 291 | * @advertising: target the linkmode advertisement settings | ||
| 292 | * @adv: value of the C45 10GBASE-T AN STATUS register | ||
| 293 | * | ||
| 294 | * A small helper function that translates C45 10GBASE-T AN STATUS register bits | ||
| 295 | * to linkmode advertisement settings. Other bits in advertising aren't changed. | ||
| 296 | */ | ||
| 297 | static inline void mii_10gbt_stat_mod_linkmode_lpa_t(unsigned long *advertising, | ||
| 298 | u32 lpa) | ||
| 299 | { | ||
| 300 | linkmode_mod_bit(ETHTOOL_LINK_MODE_2500baseT_Full_BIT, | ||
| 301 | advertising, lpa & MDIO_AN_10GBT_STAT_LP2_5G); | ||
| 302 | linkmode_mod_bit(ETHTOOL_LINK_MODE_5000baseT_Full_BIT, | ||
| 303 | advertising, lpa & MDIO_AN_10GBT_STAT_LP5G); | ||
| 304 | linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT, | ||
| 305 | advertising, lpa & MDIO_AN_10GBT_STAT_LP10G); | ||
| 306 | } | ||
| 307 | |||
| 264 | int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); | 308 | int __mdiobus_read(struct mii_bus *bus, int addr, u32 regnum); |
| 265 | int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); | 309 | int __mdiobus_write(struct mii_bus *bus, int addr, u32 regnum, u16 val); |
| 266 | 310 | ||
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h index 7fde40e17c8b..03b6ba2a63f8 100644 --- a/include/linux/mei_cl_bus.h +++ b/include/linux/mei_cl_bus.h | |||
| @@ -55,6 +55,8 @@ struct mei_cl_device { | |||
| 55 | void *priv_data; | 55 | void *priv_data; |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | #define to_mei_cl_device(d) container_of(d, struct mei_cl_device, dev) | ||
| 59 | |||
| 58 | struct mei_cl_driver { | 60 | struct mei_cl_driver { |
| 59 | struct device_driver driver; | 61 | struct device_driver driver; |
| 60 | const char *name; | 62 | const char *name; |
diff --git a/include/linux/memblock.h b/include/linux/memblock.h index 64c41cf45590..294d5d80e150 100644 --- a/include/linux/memblock.h +++ b/include/linux/memblock.h | |||
| @@ -29,9 +29,6 @@ extern unsigned long max_pfn; | |||
| 29 | */ | 29 | */ |
| 30 | extern unsigned long long max_possible_pfn; | 30 | extern unsigned long long max_possible_pfn; |
| 31 | 31 | ||
| 32 | #define INIT_MEMBLOCK_REGIONS 128 | ||
| 33 | #define INIT_PHYSMEM_REGIONS 4 | ||
| 34 | |||
| 35 | /** | 32 | /** |
| 36 | * enum memblock_flags - definition of memory region attributes | 33 | * enum memblock_flags - definition of memory region attributes |
| 37 | * @MEMBLOCK_NONE: no special request | 34 | * @MEMBLOCK_NONE: no special request |
| @@ -111,9 +108,6 @@ void memblock_discard(void); | |||
| 111 | #define memblock_dbg(fmt, ...) \ | 108 | #define memblock_dbg(fmt, ...) \ |
| 112 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) | 109 | if (memblock_debug) printk(KERN_INFO pr_fmt(fmt), ##__VA_ARGS__) |
| 113 | 110 | ||
| 114 | phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align, | ||
| 115 | phys_addr_t start, phys_addr_t end, | ||
| 116 | int nid, enum memblock_flags flags); | ||
| 117 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, | 111 | phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end, |
| 118 | phys_addr_t size, phys_addr_t align); | 112 | phys_addr_t size, phys_addr_t align); |
| 119 | void memblock_allow_resize(void); | 113 | void memblock_allow_resize(void); |
| @@ -130,7 +124,6 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size); | |||
| 130 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); | 124 | int memblock_mark_mirror(phys_addr_t base, phys_addr_t size); |
| 131 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); | 125 | int memblock_mark_nomap(phys_addr_t base, phys_addr_t size); |
| 132 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); | 126 | int memblock_clear_nomap(phys_addr_t base, phys_addr_t size); |
| 133 | enum memblock_flags choose_memblock_flags(void); | ||
| 134 | 127 | ||
| 135 | unsigned long memblock_free_all(void); | 128 | unsigned long memblock_free_all(void); |
| 136 | void reset_node_managed_pages(pg_data_t *pgdat); | 129 | void reset_node_managed_pages(pg_data_t *pgdat); |
| @@ -280,18 +273,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn, | |||
| 280 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ | 273 | for_each_mem_range_rev(i, &memblock.memory, &memblock.reserved, \ |
| 281 | nid, flags, p_start, p_end, p_nid) | 274 | nid, flags, p_start, p_end, p_nid) |
| 282 | 275 | ||
| 283 | static inline void memblock_set_region_flags(struct memblock_region *r, | ||
| 284 | enum memblock_flags flags) | ||
| 285 | { | ||
| 286 | r->flags |= flags; | ||
| 287 | } | ||
| 288 | |||
| 289 | static inline void memblock_clear_region_flags(struct memblock_region *r, | ||
| 290 | enum memblock_flags flags) | ||
| 291 | { | ||
| 292 | r->flags &= ~flags; | ||
| 293 | } | ||
| 294 | |||
| 295 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP | 276 | #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP |
| 296 | int memblock_set_node(phys_addr_t base, phys_addr_t size, | 277 | int memblock_set_node(phys_addr_t base, phys_addr_t size, |
| 297 | struct memblock_type *type, int nid); | 278 | struct memblock_type *type, int nid); |
| @@ -328,17 +309,20 @@ static inline int memblock_get_region_node(const struct memblock_region *r) | |||
| 328 | #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL | 309 | #define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL |
| 329 | #endif | 310 | #endif |
| 330 | 311 | ||
| 331 | phys_addr_t memblock_phys_alloc_nid(phys_addr_t size, phys_addr_t align, int nid); | 312 | phys_addr_t memblock_phys_alloc_range(phys_addr_t size, phys_addr_t align, |
| 313 | phys_addr_t start, phys_addr_t end); | ||
| 332 | phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); | 314 | phys_addr_t memblock_phys_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid); |
| 333 | 315 | ||
| 334 | phys_addr_t memblock_phys_alloc(phys_addr_t size, phys_addr_t align); | 316 | static inline phys_addr_t memblock_phys_alloc(phys_addr_t size, |
| 317 | phys_addr_t align) | ||
| 318 | { | ||
| 319 | return memblock_phys_alloc_range(size, align, 0, | ||
| 320 | MEMBLOCK_ALLOC_ACCESSIBLE); | ||
| 321 | } | ||
| 335 | 322 | ||
| 336 | void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, | 323 | void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align, |
| 337 | phys_addr_t min_addr, phys_addr_t max_addr, | 324 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 338 | int nid); | 325 | int nid); |
| 339 | void *memblock_alloc_try_nid_nopanic(phys_addr_t size, phys_addr_t align, | ||
| 340 | phys_addr_t min_addr, phys_addr_t max_addr, | ||
| 341 | int nid); | ||
| 342 | void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, | 326 | void *memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, |
| 343 | phys_addr_t min_addr, phys_addr_t max_addr, | 327 | phys_addr_t min_addr, phys_addr_t max_addr, |
| 344 | int nid); | 328 | int nid); |
| @@ -365,36 +349,12 @@ static inline void * __init memblock_alloc_from(phys_addr_t size, | |||
| 365 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); | 349 | MEMBLOCK_ALLOC_ACCESSIBLE, NUMA_NO_NODE); |
| 366 | } | 350 | } |
| 367 | 351 | ||
| 368 | static inline void * __init memblock_alloc_nopanic(phys_addr_t size, | ||
| 369 | phys_addr_t align) | ||
| 370 | { | ||
| 371 | return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, | ||
| 372 | MEMBLOCK_ALLOC_ACCESSIBLE, | ||
| 373 | NUMA_NO_NODE); | ||
| 374 | } | ||
| 375 | |||
| 376 | static inline void * __init memblock_alloc_low(phys_addr_t size, | 352 | static inline void * __init memblock_alloc_low(phys_addr_t size, |
| 377 | phys_addr_t align) | 353 | phys_addr_t align) |
| 378 | { | 354 | { |
| 379 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, | 355 | return memblock_alloc_try_nid(size, align, MEMBLOCK_LOW_LIMIT, |
| 380 | ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); | 356 | ARCH_LOW_ADDRESS_LIMIT, NUMA_NO_NODE); |
| 381 | } | 357 | } |
| 382 | static inline void * __init memblock_alloc_low_nopanic(phys_addr_t size, | ||
| 383 | phys_addr_t align) | ||
| 384 | { | ||
| 385 | return memblock_alloc_try_nid_nopanic(size, align, MEMBLOCK_LOW_LIMIT, | ||
| 386 | ARCH_LOW_ADDRESS_LIMIT, | ||
| 387 | NUMA_NO_NODE); | ||
| 388 | } | ||
| 389 | |||
| 390 | static inline void * __init memblock_alloc_from_nopanic(phys_addr_t size, | ||
| 391 | phys_addr_t align, | ||
| 392 | phys_addr_t min_addr) | ||
| 393 | { | ||
| 394 | return memblock_alloc_try_nid_nopanic(size, align, min_addr, | ||
| 395 | MEMBLOCK_ALLOC_ACCESSIBLE, | ||
| 396 | NUMA_NO_NODE); | ||
| 397 | } | ||
| 398 | 358 | ||
| 399 | static inline void * __init memblock_alloc_node(phys_addr_t size, | 359 | static inline void * __init memblock_alloc_node(phys_addr_t size, |
| 400 | phys_addr_t align, int nid) | 360 | phys_addr_t align, int nid) |
| @@ -403,14 +363,6 @@ static inline void * __init memblock_alloc_node(phys_addr_t size, | |||
| 403 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); | 363 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); |
| 404 | } | 364 | } |
| 405 | 365 | ||
| 406 | static inline void * __init memblock_alloc_node_nopanic(phys_addr_t size, | ||
| 407 | int nid) | ||
| 408 | { | ||
| 409 | return memblock_alloc_try_nid_nopanic(size, SMP_CACHE_BYTES, | ||
| 410 | MEMBLOCK_LOW_LIMIT, | ||
| 411 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); | ||
| 412 | } | ||
| 413 | |||
| 414 | static inline void __init memblock_free_early(phys_addr_t base, | 366 | static inline void __init memblock_free_early(phys_addr_t base, |
| 415 | phys_addr_t size) | 367 | phys_addr_t size) |
| 416 | { | 368 | { |
| @@ -446,16 +398,6 @@ static inline bool memblock_bottom_up(void) | |||
| 446 | return memblock.bottom_up; | 398 | return memblock.bottom_up; |
| 447 | } | 399 | } |
| 448 | 400 | ||
| 449 | phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align, | ||
| 450 | phys_addr_t start, phys_addr_t end, | ||
| 451 | enum memblock_flags flags); | ||
| 452 | phys_addr_t memblock_alloc_base_nid(phys_addr_t size, | ||
| 453 | phys_addr_t align, phys_addr_t max_addr, | ||
| 454 | int nid, enum memblock_flags flags); | ||
| 455 | phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align, | ||
| 456 | phys_addr_t max_addr); | ||
| 457 | phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align, | ||
| 458 | phys_addr_t max_addr); | ||
| 459 | phys_addr_t memblock_phys_mem_size(void); | 401 | phys_addr_t memblock_phys_mem_size(void); |
| 460 | phys_addr_t memblock_reserved_size(void); | 402 | phys_addr_t memblock_reserved_size(void); |
| 461 | phys_addr_t memblock_mem_size(unsigned long limit_pfn); | 403 | phys_addr_t memblock_mem_size(unsigned long limit_pfn); |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 83ae11cbd12c..dbb6118370c1 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
| @@ -429,6 +429,11 @@ static inline unsigned short mem_cgroup_id(struct mem_cgroup *memcg) | |||
| 429 | } | 429 | } |
| 430 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); | 430 | struct mem_cgroup *mem_cgroup_from_id(unsigned short id); |
| 431 | 431 | ||
| 432 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) | ||
| 433 | { | ||
| 434 | return mem_cgroup_from_css(seq_css(m)); | ||
| 435 | } | ||
| 436 | |||
| 432 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | 437 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
| 433 | { | 438 | { |
| 434 | struct mem_cgroup_per_node *mz; | 439 | struct mem_cgroup_per_node *mz; |
| @@ -561,7 +566,10 @@ struct mem_cgroup *lock_page_memcg(struct page *page); | |||
| 561 | void __unlock_page_memcg(struct mem_cgroup *memcg); | 566 | void __unlock_page_memcg(struct mem_cgroup *memcg); |
| 562 | void unlock_page_memcg(struct page *page); | 567 | void unlock_page_memcg(struct page *page); |
| 563 | 568 | ||
| 564 | /* idx can be of type enum memcg_stat_item or node_stat_item */ | 569 | /* |
| 570 | * idx can be of type enum memcg_stat_item or node_stat_item. | ||
| 571 | * Keep in sync with memcg_exact_page_state(). | ||
| 572 | */ | ||
| 565 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, | 573 | static inline unsigned long memcg_page_state(struct mem_cgroup *memcg, |
| 566 | int idx) | 574 | int idx) |
| 567 | { | 575 | { |
| @@ -937,6 +945,11 @@ static inline struct mem_cgroup *mem_cgroup_from_id(unsigned short id) | |||
| 937 | return NULL; | 945 | return NULL; |
| 938 | } | 946 | } |
| 939 | 947 | ||
| 948 | static inline struct mem_cgroup *mem_cgroup_from_seq(struct seq_file *m) | ||
| 949 | { | ||
| 950 | return NULL; | ||
| 951 | } | ||
| 952 | |||
| 940 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) | 953 | static inline struct mem_cgroup *lruvec_memcg(struct lruvec *lruvec) |
| 941 | { | 954 | { |
| 942 | return NULL; | 955 | return NULL; |
| @@ -1273,12 +1286,12 @@ static inline bool mem_cgroup_under_socket_pressure(struct mem_cgroup *memcg) | |||
| 1273 | 1286 | ||
| 1274 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); | 1287 | struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep); |
| 1275 | void memcg_kmem_put_cache(struct kmem_cache *cachep); | 1288 | void memcg_kmem_put_cache(struct kmem_cache *cachep); |
| 1276 | int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 1277 | struct mem_cgroup *memcg); | ||
| 1278 | 1289 | ||
| 1279 | #ifdef CONFIG_MEMCG_KMEM | 1290 | #ifdef CONFIG_MEMCG_KMEM |
| 1280 | int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); | 1291 | int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order); |
| 1281 | void memcg_kmem_uncharge(struct page *page, int order); | 1292 | void __memcg_kmem_uncharge(struct page *page, int order); |
| 1293 | int __memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, | ||
| 1294 | struct mem_cgroup *memcg); | ||
| 1282 | 1295 | ||
| 1283 | extern struct static_key_false memcg_kmem_enabled_key; | 1296 | extern struct static_key_false memcg_kmem_enabled_key; |
| 1284 | extern struct workqueue_struct *memcg_kmem_cache_wq; | 1297 | extern struct workqueue_struct *memcg_kmem_cache_wq; |
| @@ -1300,6 +1313,26 @@ static inline bool memcg_kmem_enabled(void) | |||
| 1300 | return static_branch_unlikely(&memcg_kmem_enabled_key); | 1313 | return static_branch_unlikely(&memcg_kmem_enabled_key); |
| 1301 | } | 1314 | } |
| 1302 | 1315 | ||
| 1316 | static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | ||
| 1317 | { | ||
| 1318 | if (memcg_kmem_enabled()) | ||
| 1319 | return __memcg_kmem_charge(page, gfp, order); | ||
| 1320 | return 0; | ||
| 1321 | } | ||
| 1322 | |||
| 1323 | static inline void memcg_kmem_uncharge(struct page *page, int order) | ||
| 1324 | { | ||
| 1325 | if (memcg_kmem_enabled()) | ||
| 1326 | __memcg_kmem_uncharge(page, order); | ||
| 1327 | } | ||
| 1328 | |||
| 1329 | static inline int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, | ||
| 1330 | int order, struct mem_cgroup *memcg) | ||
| 1331 | { | ||
| 1332 | if (memcg_kmem_enabled()) | ||
| 1333 | return __memcg_kmem_charge_memcg(page, gfp, order, memcg); | ||
| 1334 | return 0; | ||
| 1335 | } | ||
| 1303 | /* | 1336 | /* |
| 1304 | * helper for accessing a memcg's index. It will be used as an index in the | 1337 | * helper for accessing a memcg's index. It will be used as an index in the |
| 1305 | * child cache array in kmem_cache, and also to derive its name. This function | 1338 | * child cache array in kmem_cache, and also to derive its name. This function |
| @@ -1325,6 +1358,15 @@ static inline void memcg_kmem_uncharge(struct page *page, int order) | |||
| 1325 | { | 1358 | { |
| 1326 | } | 1359 | } |
| 1327 | 1360 | ||
| 1361 | static inline int __memcg_kmem_charge(struct page *page, gfp_t gfp, int order) | ||
| 1362 | { | ||
| 1363 | return 0; | ||
| 1364 | } | ||
| 1365 | |||
| 1366 | static inline void __memcg_kmem_uncharge(struct page *page, int order) | ||
| 1367 | { | ||
| 1368 | } | ||
| 1369 | |||
| 1328 | #define for_each_memcg_cache_index(_idx) \ | 1370 | #define for_each_memcg_cache_index(_idx) \ |
| 1329 | for (; NULL; ) | 1371 | for (; NULL; ) |
| 1330 | 1372 | ||
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h index 07da5c6c5ba0..8ade08c50d26 100644 --- a/include/linux/memory_hotplug.h +++ b/include/linux/memory_hotplug.h | |||
| @@ -21,14 +21,16 @@ struct vmem_altmap; | |||
| 21 | * walkers which rely on the fully initialized page->flags and others | 21 | * walkers which rely on the fully initialized page->flags and others |
| 22 | * should use this rather than pfn_valid && pfn_to_page | 22 | * should use this rather than pfn_valid && pfn_to_page |
| 23 | */ | 23 | */ |
| 24 | #define pfn_to_online_page(pfn) \ | 24 | #define pfn_to_online_page(pfn) \ |
| 25 | ({ \ | 25 | ({ \ |
| 26 | struct page *___page = NULL; \ | 26 | struct page *___page = NULL; \ |
| 27 | unsigned long ___nr = pfn_to_section_nr(pfn); \ | 27 | unsigned long ___pfn = pfn; \ |
| 28 | \ | 28 | unsigned long ___nr = pfn_to_section_nr(___pfn); \ |
| 29 | if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr))\ | 29 | \ |
| 30 | ___page = pfn_to_page(pfn); \ | 30 | if (___nr < NR_MEM_SECTIONS && online_section_nr(___nr) && \ |
| 31 | ___page; \ | 31 | pfn_valid_within(___pfn)) \ |
| 32 | ___page = pfn_to_page(___pfn); \ | ||
| 33 | ___page; \ | ||
| 32 | }) | 34 | }) |
| 33 | 35 | ||
| 34 | /* | 36 | /* |
| @@ -87,7 +89,7 @@ extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn, | |||
| 87 | unsigned long *valid_start, unsigned long *valid_end); | 89 | unsigned long *valid_start, unsigned long *valid_end); |
| 88 | extern void __offline_isolated_pages(unsigned long, unsigned long); | 90 | extern void __offline_isolated_pages(unsigned long, unsigned long); |
| 89 | 91 | ||
| 90 | typedef void (*online_page_callback_t)(struct page *page); | 92 | typedef void (*online_page_callback_t)(struct page *page, unsigned int order); |
| 91 | 93 | ||
| 92 | extern int set_online_page_callback(online_page_callback_t callback); | 94 | extern int set_online_page_callback(online_page_callback_t callback); |
| 93 | extern int restore_online_page_callback(online_page_callback_t callback); | 95 | extern int restore_online_page_callback(online_page_callback_t callback); |
| @@ -98,6 +100,8 @@ extern void __online_page_free(struct page *page); | |||
| 98 | 100 | ||
| 99 | extern int try_online_node(int nid); | 101 | extern int try_online_node(int nid); |
| 100 | 102 | ||
| 103 | extern u64 max_mem_size; | ||
| 104 | |||
| 101 | extern bool memhp_auto_online; | 105 | extern bool memhp_auto_online; |
| 102 | /* If movable_node boot option specified */ | 106 | /* If movable_node boot option specified */ |
| 103 | extern bool movable_node_enabled; | 107 | extern bool movable_node_enabled; |
diff --git a/include/linux/mfd/bcm2835-pm.h b/include/linux/mfd/bcm2835-pm.h new file mode 100644 index 000000000000..ed37dc40e82a --- /dev/null +++ b/include/linux/mfd/bcm2835-pm.h | |||
| @@ -0,0 +1,14 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 2 | |||
| 3 | #ifndef BCM2835_MFD_PM_H | ||
| 4 | #define BCM2835_MFD_PM_H | ||
| 5 | |||
| 6 | #include <linux/regmap.h> | ||
| 7 | |||
| 8 | struct bcm2835_pm { | ||
| 9 | struct device *dev; | ||
| 10 | void __iomem *base; | ||
| 11 | void __iomem *asb; | ||
| 12 | }; | ||
| 13 | |||
| 14 | #endif /* BCM2835_MFD_PM_H */ | ||
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h index de8b588c8776..8f2a8918bfa3 100644 --- a/include/linux/mfd/cros_ec.h +++ b/include/linux/mfd/cros_ec.h | |||
| @@ -282,16 +282,6 @@ int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev, | |||
| 282 | struct cros_ec_command *msg); | 282 | struct cros_ec_command *msg); |
| 283 | 283 | ||
| 284 | /** | 284 | /** |
| 285 | * cros_ec_remove() - Remove a ChromeOS EC. | ||
| 286 | * @ec_dev: Device to register. | ||
| 287 | * | ||
| 288 | * Call this to deregister a ChromeOS EC, then clean up any private data. | ||
| 289 | * | ||
| 290 | * Return: 0 on success or negative error code. | ||
| 291 | */ | ||
| 292 | int cros_ec_remove(struct cros_ec_device *ec_dev); | ||
| 293 | |||
| 294 | /** | ||
| 295 | * cros_ec_register() - Register a new ChromeOS EC, using the provided info. | 285 | * cros_ec_register() - Register a new ChromeOS EC, using the provided info. |
| 296 | * @ec_dev: Device to register. | 286 | * @ec_dev: Device to register. |
| 297 | * | 287 | * |
| @@ -335,15 +325,4 @@ int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event); | |||
| 335 | */ | 325 | */ |
| 336 | u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); | 326 | u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev); |
| 337 | 327 | ||
| 338 | /* sysfs stuff */ | ||
| 339 | extern struct attribute_group cros_ec_attr_group; | ||
| 340 | extern struct attribute_group cros_ec_lightbar_attr_group; | ||
| 341 | extern struct attribute_group cros_ec_vbc_attr_group; | ||
| 342 | |||
| 343 | /* debugfs stuff */ | ||
| 344 | int cros_ec_debugfs_init(struct cros_ec_dev *ec); | ||
| 345 | void cros_ec_debugfs_remove(struct cros_ec_dev *ec); | ||
| 346 | void cros_ec_debugfs_suspend(struct cros_ec_dev *ec); | ||
| 347 | void cros_ec_debugfs_resume(struct cros_ec_dev *ec); | ||
| 348 | |||
| 349 | #endif /* __LINUX_MFD_CROS_EC_H */ | 328 | #endif /* __LINUX_MFD_CROS_EC_H */ |
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h index 9a9631f0559e..fc91082d4c35 100644 --- a/include/linux/mfd/cros_ec_commands.h +++ b/include/linux/mfd/cros_ec_commands.h | |||
| @@ -2791,6 +2791,100 @@ struct ec_response_battery_vendor_param { | |||
| 2791 | } __packed; | 2791 | } __packed; |
| 2792 | 2792 | ||
| 2793 | /*****************************************************************************/ | 2793 | /*****************************************************************************/ |
| 2794 | /* Commands for I2S recording on audio codec. */ | ||
| 2795 | |||
| 2796 | #define EC_CMD_CODEC_I2S 0x00BC | ||
| 2797 | |||
| 2798 | enum ec_codec_i2s_subcmd { | ||
| 2799 | EC_CODEC_SET_SAMPLE_DEPTH = 0x0, | ||
| 2800 | EC_CODEC_SET_GAIN = 0x1, | ||
| 2801 | EC_CODEC_GET_GAIN = 0x2, | ||
| 2802 | EC_CODEC_I2S_ENABLE = 0x3, | ||
| 2803 | EC_CODEC_I2S_SET_CONFIG = 0x4, | ||
| 2804 | EC_CODEC_I2S_SET_TDM_CONFIG = 0x5, | ||
| 2805 | EC_CODEC_I2S_SET_BCLK = 0x6, | ||
| 2806 | }; | ||
| 2807 | |||
| 2808 | enum ec_sample_depth_value { | ||
| 2809 | EC_CODEC_SAMPLE_DEPTH_16 = 0, | ||
| 2810 | EC_CODEC_SAMPLE_DEPTH_24 = 1, | ||
| 2811 | }; | ||
| 2812 | |||
| 2813 | enum ec_i2s_config { | ||
| 2814 | EC_DAI_FMT_I2S = 0, | ||
| 2815 | EC_DAI_FMT_RIGHT_J = 1, | ||
| 2816 | EC_DAI_FMT_LEFT_J = 2, | ||
| 2817 | EC_DAI_FMT_PCM_A = 3, | ||
| 2818 | EC_DAI_FMT_PCM_B = 4, | ||
| 2819 | EC_DAI_FMT_PCM_TDM = 5, | ||
| 2820 | }; | ||
| 2821 | |||
| 2822 | struct ec_param_codec_i2s { | ||
| 2823 | /* | ||
| 2824 | * enum ec_codec_i2s_subcmd | ||
| 2825 | */ | ||
| 2826 | uint8_t cmd; | ||
| 2827 | union { | ||
| 2828 | /* | ||
| 2829 | * EC_CODEC_SET_SAMPLE_DEPTH | ||
| 2830 | * Value should be one of ec_sample_depth_value. | ||
| 2831 | */ | ||
| 2832 | uint8_t depth; | ||
| 2833 | |||
| 2834 | /* | ||
| 2835 | * EC_CODEC_SET_GAIN | ||
| 2836 | * Value should be 0~43 for both channels. | ||
| 2837 | */ | ||
| 2838 | struct ec_param_codec_i2s_set_gain { | ||
| 2839 | uint8_t left; | ||
| 2840 | uint8_t right; | ||
| 2841 | } __packed gain; | ||
| 2842 | |||
| 2843 | /* | ||
| 2844 | * EC_CODEC_I2S_ENABLE | ||
| 2845 | * 1 to enable, 0 to disable. | ||
| 2846 | */ | ||
| 2847 | uint8_t i2s_enable; | ||
| 2848 | |||
| 2849 | /* | ||
| 2850 | * EC_CODEC_I2S_SET_COFNIG | ||
| 2851 | * Value should be one of ec_i2s_config. | ||
| 2852 | */ | ||
| 2853 | uint8_t i2s_config; | ||
| 2854 | |||
| 2855 | /* | ||
| 2856 | * EC_CODEC_I2S_SET_TDM_CONFIG | ||
| 2857 | * Value should be one of ec_i2s_config. | ||
| 2858 | */ | ||
| 2859 | struct ec_param_codec_i2s_tdm { | ||
| 2860 | /* | ||
| 2861 | * 0 to 496 | ||
| 2862 | */ | ||
| 2863 | int16_t ch0_delay; | ||
| 2864 | /* | ||
| 2865 | * -1 to 496 | ||
| 2866 | */ | ||
| 2867 | int16_t ch1_delay; | ||
| 2868 | uint8_t adjacent_to_ch0; | ||
| 2869 | uint8_t adjacent_to_ch1; | ||
| 2870 | } __packed tdm_param; | ||
| 2871 | |||
| 2872 | /* | ||
| 2873 | * EC_CODEC_I2S_SET_BCLK | ||
| 2874 | */ | ||
| 2875 | uint32_t bclk; | ||
| 2876 | }; | ||
| 2877 | } __packed; | ||
| 2878 | |||
| 2879 | /* | ||
| 2880 | * For subcommand EC_CODEC_GET_GAIN. | ||
| 2881 | */ | ||
| 2882 | struct ec_response_codec_gain { | ||
| 2883 | uint8_t left; | ||
| 2884 | uint8_t right; | ||
| 2885 | } __packed; | ||
| 2886 | |||
| 2887 | /*****************************************************************************/ | ||
| 2794 | /* System commands */ | 2888 | /* System commands */ |
| 2795 | 2889 | ||
| 2796 | /* | 2890 | /* |
diff --git a/include/linux/mfd/ingenic-tcu.h b/include/linux/mfd/ingenic-tcu.h index ab16ad283def..2083fa20821d 100644 --- a/include/linux/mfd/ingenic-tcu.h +++ b/include/linux/mfd/ingenic-tcu.h | |||
| @@ -41,7 +41,7 @@ | |||
| 41 | #define TCU_TCSR_PRESCALE_LSB 3 | 41 | #define TCU_TCSR_PRESCALE_LSB 3 |
| 42 | #define TCU_TCSR_PRESCALE_MASK 0x38 | 42 | #define TCU_TCSR_PRESCALE_MASK 0x38 |
| 43 | 43 | ||
| 44 | #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown abruptly 1: gracefully */ | 44 | #define TCU_TCSR_PWM_SD BIT(9) /* 0: Shutdown gracefully 1: abruptly */ |
| 45 | #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ | 45 | #define TCU_TCSR_PWM_INITL_HIGH BIT(8) /* Sets the initial output level */ |
| 46 | #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ | 46 | #define TCU_TCSR_PWM_EN BIT(7) /* PWM pin output enable */ |
| 47 | 47 | ||
diff --git a/include/linux/mfd/intel_soc_pmic.h b/include/linux/mfd/intel_soc_pmic.h index ed1dfba5e5f9..bfecd6bd4990 100644 --- a/include/linux/mfd/intel_soc_pmic.h +++ b/include/linux/mfd/intel_soc_pmic.h | |||
| @@ -26,4 +26,7 @@ struct intel_soc_pmic { | |||
| 26 | struct device *dev; | 26 | struct device *dev; |
| 27 | }; | 27 | }; |
| 28 | 28 | ||
| 29 | int intel_soc_pmic_exec_mipi_pmic_seq_element(u16 i2c_address, u32 reg_address, | ||
| 30 | u32 value, u32 mask); | ||
| 31 | |||
| 29 | #endif /* __INTEL_SOC_PMIC_H__ */ | 32 | #endif /* __INTEL_SOC_PMIC_H__ */ |
diff --git a/include/linux/mfd/lochnagar.h b/include/linux/mfd/lochnagar.h new file mode 100644 index 000000000000..ff9e64cfc9fb --- /dev/null +++ b/include/linux/mfd/lochnagar.h | |||
| @@ -0,0 +1,55 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Lochnagar internals | ||
| 4 | * | ||
| 5 | * Copyright (c) 2013-2018 Cirrus Logic, Inc. and | ||
| 6 | * Cirrus Logic International Semiconductor Ltd. | ||
| 7 | * | ||
| 8 | * Author: Charles Keepax <ckeepax@opensource.cirrus.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #include <linux/device.h> | ||
| 12 | #include <linux/mutex.h> | ||
| 13 | #include <linux/regmap.h> | ||
| 14 | |||
| 15 | #ifndef CIRRUS_LOCHNAGAR_H | ||
| 16 | #define CIRRUS_LOCHNAGAR_H | ||
| 17 | |||
| 18 | enum lochnagar_type { | ||
| 19 | LOCHNAGAR1, | ||
| 20 | LOCHNAGAR2, | ||
| 21 | }; | ||
| 22 | |||
| 23 | /** | ||
| 24 | * struct lochnagar - Core data for the Lochnagar audio board driver. | ||
| 25 | * | ||
| 26 | * @type: The type of Lochnagar device connected. | ||
| 27 | * @dev: A pointer to the struct device for the main MFD. | ||
| 28 | * @regmap: The devices main register map. | ||
| 29 | * @analogue_config_lock: Lock used to protect updates in the analogue | ||
| 30 | * configuration as these must not be changed whilst the hardware is processing | ||
| 31 | * the last update. | ||
| 32 | */ | ||
| 33 | struct lochnagar { | ||
| 34 | enum lochnagar_type type; | ||
| 35 | struct device *dev; | ||
| 36 | struct regmap *regmap; | ||
| 37 | |||
| 38 | /* Lock to protect updates to the analogue configuration */ | ||
| 39 | struct mutex analogue_config_lock; | ||
| 40 | }; | ||
| 41 | |||
| 42 | /* Register Addresses */ | ||
| 43 | #define LOCHNAGAR_SOFTWARE_RESET 0x00 | ||
| 44 | #define LOCHNAGAR_FIRMWARE_ID1 0x01 | ||
| 45 | #define LOCHNAGAR_FIRMWARE_ID2 0x02 | ||
| 46 | |||
| 47 | /* (0x0000) Software Reset */ | ||
| 48 | #define LOCHNAGAR_DEVICE_ID_MASK 0xFFFC | ||
| 49 | #define LOCHNAGAR_DEVICE_ID_SHIFT 2 | ||
| 50 | #define LOCHNAGAR_REV_ID_MASK 0x0003 | ||
| 51 | #define LOCHNAGAR_REV_ID_SHIFT 0 | ||
| 52 | |||
| 53 | int lochnagar_update_config(struct lochnagar *lochnagar); | ||
| 54 | |||
| 55 | #endif | ||
diff --git a/include/linux/mfd/lochnagar1_regs.h b/include/linux/mfd/lochnagar1_regs.h new file mode 100644 index 000000000000..114b846245d9 --- /dev/null +++ b/include/linux/mfd/lochnagar1_regs.h | |||
| @@ -0,0 +1,157 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Lochnagar1 register definitions | ||
| 4 | * | ||
| 5 | * Copyright (c) 2017-2018 Cirrus Logic, Inc. and | ||
| 6 | * Cirrus Logic International Semiconductor Ltd. | ||
| 7 | * | ||
| 8 | * Author: Charles Keepax <ckeepax@opensource.cirrus.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef LOCHNAGAR1_REGISTERS_H | ||
| 12 | #define LOCHNAGAR1_REGISTERS_H | ||
| 13 | |||
| 14 | /* Register Addresses */ | ||
| 15 | #define LOCHNAGAR1_CDC_AIF1_SEL 0x0008 | ||
| 16 | #define LOCHNAGAR1_CDC_AIF2_SEL 0x0009 | ||
| 17 | #define LOCHNAGAR1_CDC_AIF3_SEL 0x000A | ||
| 18 | #define LOCHNAGAR1_CDC_MCLK1_SEL 0x000B | ||
| 19 | #define LOCHNAGAR1_CDC_MCLK2_SEL 0x000C | ||
| 20 | #define LOCHNAGAR1_CDC_AIF_CTRL1 0x000D | ||
| 21 | #define LOCHNAGAR1_CDC_AIF_CTRL2 0x000E | ||
| 22 | #define LOCHNAGAR1_EXT_AIF_CTRL 0x000F | ||
| 23 | #define LOCHNAGAR1_DSP_AIF1_SEL 0x0010 | ||
| 24 | #define LOCHNAGAR1_DSP_AIF2_SEL 0x0011 | ||
| 25 | #define LOCHNAGAR1_DSP_CLKIN_SEL 0x0012 | ||
| 26 | #define LOCHNAGAR1_DSP_AIF 0x0013 | ||
| 27 | #define LOCHNAGAR1_GF_AIF1 0x0014 | ||
| 28 | #define LOCHNAGAR1_GF_AIF2 0x0015 | ||
| 29 | #define LOCHNAGAR1_PSIA_AIF 0x0016 | ||
| 30 | #define LOCHNAGAR1_PSIA1_SEL 0x0017 | ||
| 31 | #define LOCHNAGAR1_PSIA2_SEL 0x0018 | ||
| 32 | #define LOCHNAGAR1_SPDIF_AIF_SEL 0x0019 | ||
| 33 | #define LOCHNAGAR1_GF_AIF3_SEL 0x001C | ||
| 34 | #define LOCHNAGAR1_GF_AIF4_SEL 0x001D | ||
| 35 | #define LOCHNAGAR1_GF_CLKOUT1_SEL 0x001E | ||
| 36 | #define LOCHNAGAR1_GF_AIF1_SEL 0x001F | ||
| 37 | #define LOCHNAGAR1_GF_AIF2_SEL 0x0020 | ||
| 38 | #define LOCHNAGAR1_GF_GPIO2 0x0026 | ||
| 39 | #define LOCHNAGAR1_GF_GPIO3 0x0027 | ||
| 40 | #define LOCHNAGAR1_GF_GPIO7 0x0028 | ||
| 41 | #define LOCHNAGAR1_RST 0x0029 | ||
| 42 | #define LOCHNAGAR1_LED1 0x002A | ||
| 43 | #define LOCHNAGAR1_LED2 0x002B | ||
| 44 | #define LOCHNAGAR1_I2C_CTRL 0x0046 | ||
| 45 | |||
| 46 | /* | ||
| 47 | * (0x0008 - 0x000C, 0x0010 - 0x0012, 0x0017 - 0x0020) | ||
| 48 | * CDC_AIF1_SEL - GF_AIF2_SEL | ||
| 49 | */ | ||
| 50 | #define LOCHNAGAR1_SRC_MASK 0xFF | ||
| 51 | #define LOCHNAGAR1_SRC_SHIFT 0 | ||
| 52 | |||
| 53 | /* (0x000D) CDC_AIF_CTRL1 */ | ||
| 54 | #define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_MASK 0x40 | ||
| 55 | #define LOCHNAGAR1_CDC_AIF2_LRCLK_DIR_SHIFT 6 | ||
| 56 | #define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_MASK 0x20 | ||
| 57 | #define LOCHNAGAR1_CDC_AIF2_BCLK_DIR_SHIFT 5 | ||
| 58 | #define LOCHNAGAR1_CDC_AIF2_ENA_MASK 0x10 | ||
| 59 | #define LOCHNAGAR1_CDC_AIF2_ENA_SHIFT 4 | ||
| 60 | #define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_MASK 0x04 | ||
| 61 | #define LOCHNAGAR1_CDC_AIF1_LRCLK_DIR_SHIFT 2 | ||
| 62 | #define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_MASK 0x02 | ||
| 63 | #define LOCHNAGAR1_CDC_AIF1_BCLK_DIR_SHIFT 1 | ||
| 64 | #define LOCHNAGAR1_CDC_AIF1_ENA_MASK 0x01 | ||
| 65 | #define LOCHNAGAR1_CDC_AIF1_ENA_SHIFT 0 | ||
| 66 | |||
| 67 | /* (0x000E) CDC_AIF_CTRL2 */ | ||
| 68 | #define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_MASK 0x40 | ||
| 69 | #define LOCHNAGAR1_CDC_AIF3_LRCLK_DIR_SHIFT 6 | ||
| 70 | #define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_MASK 0x20 | ||
| 71 | #define LOCHNAGAR1_CDC_AIF3_BCLK_DIR_SHIFT 5 | ||
| 72 | #define LOCHNAGAR1_CDC_AIF3_ENA_MASK 0x10 | ||
| 73 | #define LOCHNAGAR1_CDC_AIF3_ENA_SHIFT 4 | ||
| 74 | #define LOCHNAGAR1_CDC_MCLK1_ENA_MASK 0x02 | ||
| 75 | #define LOCHNAGAR1_CDC_MCLK1_ENA_SHIFT 1 | ||
| 76 | #define LOCHNAGAR1_CDC_MCLK2_ENA_MASK 0x01 | ||
| 77 | #define LOCHNAGAR1_CDC_MCLK2_ENA_SHIFT 0 | ||
| 78 | |||
| 79 | /* (0x000F) EXT_AIF_CTRL */ | ||
| 80 | #define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_MASK 0x20 | ||
| 81 | #define LOCHNAGAR1_SPDIF_AIF_LRCLK_DIR_SHIFT 5 | ||
| 82 | #define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_MASK 0x10 | ||
| 83 | #define LOCHNAGAR1_SPDIF_AIF_BCLK_DIR_SHIFT 4 | ||
| 84 | #define LOCHNAGAR1_SPDIF_AIF_ENA_MASK 0x08 | ||
| 85 | #define LOCHNAGAR1_SPDIF_AIF_ENA_SHIFT 3 | ||
| 86 | |||
| 87 | /* (0x0013) DSP_AIF */ | ||
| 88 | #define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_MASK 0x40 | ||
| 89 | #define LOCHNAGAR1_DSP_AIF2_LRCLK_DIR_SHIFT 6 | ||
| 90 | #define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_MASK 0x20 | ||
| 91 | #define LOCHNAGAR1_DSP_AIF2_BCLK_DIR_SHIFT 5 | ||
| 92 | #define LOCHNAGAR1_DSP_AIF2_ENA_MASK 0x10 | ||
| 93 | #define LOCHNAGAR1_DSP_AIF2_ENA_SHIFT 4 | ||
| 94 | #define LOCHNAGAR1_DSP_CLKIN_ENA_MASK 0x08 | ||
| 95 | #define LOCHNAGAR1_DSP_CLKIN_ENA_SHIFT 3 | ||
| 96 | #define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_MASK 0x04 | ||
| 97 | #define LOCHNAGAR1_DSP_AIF1_LRCLK_DIR_SHIFT 2 | ||
| 98 | #define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_MASK 0x02 | ||
| 99 | #define LOCHNAGAR1_DSP_AIF1_BCLK_DIR_SHIFT 1 | ||
| 100 | #define LOCHNAGAR1_DSP_AIF1_ENA_MASK 0x01 | ||
| 101 | #define LOCHNAGAR1_DSP_AIF1_ENA_SHIFT 0 | ||
| 102 | |||
| 103 | /* (0x0014) GF_AIF1 */ | ||
| 104 | #define LOCHNAGAR1_GF_CLKOUT1_ENA_MASK 0x40 | ||
| 105 | #define LOCHNAGAR1_GF_CLKOUT1_ENA_SHIFT 6 | ||
| 106 | #define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_MASK 0x20 | ||
| 107 | #define LOCHNAGAR1_GF_AIF3_LRCLK_DIR_SHIFT 5 | ||
| 108 | #define LOCHNAGAR1_GF_AIF3_BCLK_DIR_MASK 0x10 | ||
| 109 | #define LOCHNAGAR1_GF_AIF3_BCLK_DIR_SHIFT 4 | ||
| 110 | #define LOCHNAGAR1_GF_AIF3_ENA_MASK 0x08 | ||
| 111 | #define LOCHNAGAR1_GF_AIF3_ENA_SHIFT 3 | ||
| 112 | #define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_MASK 0x04 | ||
| 113 | #define LOCHNAGAR1_GF_AIF1_LRCLK_DIR_SHIFT 2 | ||
| 114 | #define LOCHNAGAR1_GF_AIF1_BCLK_DIR_MASK 0x02 | ||
| 115 | #define LOCHNAGAR1_GF_AIF1_BCLK_DIR_SHIFT 1 | ||
| 116 | #define LOCHNAGAR1_GF_AIF1_ENA_MASK 0x01 | ||
| 117 | #define LOCHNAGAR1_GF_AIF1_ENA_SHIFT 0 | ||
| 118 | |||
| 119 | /* (0x0015) GF_AIF2 */ | ||
| 120 | #define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_MASK 0x20 | ||
| 121 | #define LOCHNAGAR1_GF_AIF4_LRCLK_DIR_SHIFT 5 | ||
| 122 | #define LOCHNAGAR1_GF_AIF4_BCLK_DIR_MASK 0x10 | ||
| 123 | #define LOCHNAGAR1_GF_AIF4_BCLK_DIR_SHIFT 4 | ||
| 124 | #define LOCHNAGAR1_GF_AIF4_ENA_MASK 0x08 | ||
| 125 | #define LOCHNAGAR1_GF_AIF4_ENA_SHIFT 3 | ||
| 126 | #define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_MASK 0x04 | ||
| 127 | #define LOCHNAGAR1_GF_AIF2_LRCLK_DIR_SHIFT 2 | ||
| 128 | #define LOCHNAGAR1_GF_AIF2_BCLK_DIR_MASK 0x02 | ||
| 129 | #define LOCHNAGAR1_GF_AIF2_BCLK_DIR_SHIFT 1 | ||
| 130 | #define LOCHNAGAR1_GF_AIF2_ENA_MASK 0x01 | ||
| 131 | #define LOCHNAGAR1_GF_AIF2_ENA_SHIFT 0 | ||
| 132 | |||
| 133 | /* (0x0016) PSIA_AIF */ | ||
| 134 | #define LOCHNAGAR1_PSIA2_LRCLK_DIR_MASK 0x40 | ||
| 135 | #define LOCHNAGAR1_PSIA2_LRCLK_DIR_SHIFT 6 | ||
| 136 | #define LOCHNAGAR1_PSIA2_BCLK_DIR_MASK 0x20 | ||
| 137 | #define LOCHNAGAR1_PSIA2_BCLK_DIR_SHIFT 5 | ||
| 138 | #define LOCHNAGAR1_PSIA2_ENA_MASK 0x10 | ||
| 139 | #define LOCHNAGAR1_PSIA2_ENA_SHIFT 4 | ||
| 140 | #define LOCHNAGAR1_PSIA1_LRCLK_DIR_MASK 0x04 | ||
| 141 | #define LOCHNAGAR1_PSIA1_LRCLK_DIR_SHIFT 2 | ||
| 142 | #define LOCHNAGAR1_PSIA1_BCLK_DIR_MASK 0x02 | ||
| 143 | #define LOCHNAGAR1_PSIA1_BCLK_DIR_SHIFT 1 | ||
| 144 | #define LOCHNAGAR1_PSIA1_ENA_MASK 0x01 | ||
| 145 | #define LOCHNAGAR1_PSIA1_ENA_SHIFT 0 | ||
| 146 | |||
| 147 | /* (0x0029) RST */ | ||
| 148 | #define LOCHNAGAR1_DSP_RESET_MASK 0x02 | ||
| 149 | #define LOCHNAGAR1_DSP_RESET_SHIFT 1 | ||
| 150 | #define LOCHNAGAR1_CDC_RESET_MASK 0x01 | ||
| 151 | #define LOCHNAGAR1_CDC_RESET_SHIFT 0 | ||
| 152 | |||
| 153 | /* (0x0046) I2C_CTRL */ | ||
| 154 | #define LOCHNAGAR1_CDC_CIF_MODE_MASK 0x01 | ||
| 155 | #define LOCHNAGAR1_CDC_CIF_MODE_SHIFT 0 | ||
| 156 | |||
| 157 | #endif | ||
diff --git a/include/linux/mfd/lochnagar2_regs.h b/include/linux/mfd/lochnagar2_regs.h new file mode 100644 index 000000000000..419b25a332fd --- /dev/null +++ b/include/linux/mfd/lochnagar2_regs.h | |||
| @@ -0,0 +1,291 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Lochnagar2 register definitions | ||
| 4 | * | ||
| 5 | * Copyright (c) 2017-2018 Cirrus Logic, Inc. and | ||
| 6 | * Cirrus Logic International Semiconductor Ltd. | ||
| 7 | * | ||
| 8 | * Author: Charles Keepax <ckeepax@opensource.cirrus.com> | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef LOCHNAGAR2_REGISTERS_H | ||
| 12 | #define LOCHNAGAR2_REGISTERS_H | ||
| 13 | |||
| 14 | /* Register Addresses */ | ||
| 15 | #define LOCHNAGAR2_CDC_AIF1_CTRL 0x000D | ||
| 16 | #define LOCHNAGAR2_CDC_AIF2_CTRL 0x000E | ||
| 17 | #define LOCHNAGAR2_CDC_AIF3_CTRL 0x000F | ||
| 18 | #define LOCHNAGAR2_DSP_AIF1_CTRL 0x0010 | ||
| 19 | #define LOCHNAGAR2_DSP_AIF2_CTRL 0x0011 | ||
| 20 | #define LOCHNAGAR2_PSIA1_CTRL 0x0012 | ||
| 21 | #define LOCHNAGAR2_PSIA2_CTRL 0x0013 | ||
| 22 | #define LOCHNAGAR2_GF_AIF3_CTRL 0x0014 | ||
| 23 | #define LOCHNAGAR2_GF_AIF4_CTRL 0x0015 | ||
| 24 | #define LOCHNAGAR2_GF_AIF1_CTRL 0x0016 | ||
| 25 | #define LOCHNAGAR2_GF_AIF2_CTRL 0x0017 | ||
| 26 | #define LOCHNAGAR2_SPDIF_AIF_CTRL 0x0018 | ||
| 27 | #define LOCHNAGAR2_USB_AIF1_CTRL 0x0019 | ||
| 28 | #define LOCHNAGAR2_USB_AIF2_CTRL 0x001A | ||
| 29 | #define LOCHNAGAR2_ADAT_AIF_CTRL 0x001B | ||
| 30 | #define LOCHNAGAR2_CDC_MCLK1_CTRL 0x001E | ||
| 31 | #define LOCHNAGAR2_CDC_MCLK2_CTRL 0x001F | ||
| 32 | #define LOCHNAGAR2_DSP_CLKIN_CTRL 0x0020 | ||
| 33 | #define LOCHNAGAR2_PSIA1_MCLK_CTRL 0x0021 | ||
| 34 | #define LOCHNAGAR2_PSIA2_MCLK_CTRL 0x0022 | ||
| 35 | #define LOCHNAGAR2_SPDIF_MCLK_CTRL 0x0023 | ||
| 36 | #define LOCHNAGAR2_GF_CLKOUT1_CTRL 0x0024 | ||
| 37 | #define LOCHNAGAR2_GF_CLKOUT2_CTRL 0x0025 | ||
| 38 | #define LOCHNAGAR2_ADAT_MCLK_CTRL 0x0026 | ||
| 39 | #define LOCHNAGAR2_SOUNDCARD_MCLK_CTRL 0x0027 | ||
| 40 | #define LOCHNAGAR2_GPIO_FPGA_GPIO1 0x0031 | ||
| 41 | #define LOCHNAGAR2_GPIO_FPGA_GPIO2 0x0032 | ||
| 42 | #define LOCHNAGAR2_GPIO_FPGA_GPIO3 0x0033 | ||
| 43 | #define LOCHNAGAR2_GPIO_FPGA_GPIO4 0x0034 | ||
| 44 | #define LOCHNAGAR2_GPIO_FPGA_GPIO5 0x0035 | ||
| 45 | #define LOCHNAGAR2_GPIO_FPGA_GPIO6 0x0036 | ||
| 46 | #define LOCHNAGAR2_GPIO_CDC_GPIO1 0x0037 | ||
| 47 | #define LOCHNAGAR2_GPIO_CDC_GPIO2 0x0038 | ||
| 48 | #define LOCHNAGAR2_GPIO_CDC_GPIO3 0x0039 | ||
| 49 | #define LOCHNAGAR2_GPIO_CDC_GPIO4 0x003A | ||
| 50 | #define LOCHNAGAR2_GPIO_CDC_GPIO5 0x003B | ||
| 51 | #define LOCHNAGAR2_GPIO_CDC_GPIO6 0x003C | ||
| 52 | #define LOCHNAGAR2_GPIO_CDC_GPIO7 0x003D | ||
| 53 | #define LOCHNAGAR2_GPIO_CDC_GPIO8 0x003E | ||
| 54 | #define LOCHNAGAR2_GPIO_DSP_GPIO1 0x003F | ||
| 55 | #define LOCHNAGAR2_GPIO_DSP_GPIO2 0x0040 | ||
| 56 | #define LOCHNAGAR2_GPIO_DSP_GPIO3 0x0041 | ||
| 57 | #define LOCHNAGAR2_GPIO_DSP_GPIO4 0x0042 | ||
| 58 | #define LOCHNAGAR2_GPIO_DSP_GPIO5 0x0043 | ||
| 59 | #define LOCHNAGAR2_GPIO_DSP_GPIO6 0x0044 | ||
| 60 | #define LOCHNAGAR2_GPIO_GF_GPIO2 0x0045 | ||
| 61 | #define LOCHNAGAR2_GPIO_GF_GPIO3 0x0046 | ||
| 62 | #define LOCHNAGAR2_GPIO_GF_GPIO7 0x0047 | ||
| 63 | #define LOCHNAGAR2_GPIO_CDC_AIF1_BCLK 0x0048 | ||
| 64 | #define LOCHNAGAR2_GPIO_CDC_AIF1_RXDAT 0x0049 | ||
| 65 | #define LOCHNAGAR2_GPIO_CDC_AIF1_LRCLK 0x004A | ||
| 66 | #define LOCHNAGAR2_GPIO_CDC_AIF1_TXDAT 0x004B | ||
| 67 | #define LOCHNAGAR2_GPIO_CDC_AIF2_BCLK 0x004C | ||
| 68 | #define LOCHNAGAR2_GPIO_CDC_AIF2_RXDAT 0x004D | ||
| 69 | #define LOCHNAGAR2_GPIO_CDC_AIF2_LRCLK 0x004E | ||
| 70 | #define LOCHNAGAR2_GPIO_CDC_AIF2_TXDAT 0x004F | ||
| 71 | #define LOCHNAGAR2_GPIO_CDC_AIF3_BCLK 0x0050 | ||
| 72 | #define LOCHNAGAR2_GPIO_CDC_AIF3_RXDAT 0x0051 | ||
| 73 | #define LOCHNAGAR2_GPIO_CDC_AIF3_LRCLK 0x0052 | ||
| 74 | #define LOCHNAGAR2_GPIO_CDC_AIF3_TXDAT 0x0053 | ||
| 75 | #define LOCHNAGAR2_GPIO_DSP_AIF1_BCLK 0x0054 | ||
| 76 | #define LOCHNAGAR2_GPIO_DSP_AIF1_RXDAT 0x0055 | ||
| 77 | #define LOCHNAGAR2_GPIO_DSP_AIF1_LRCLK 0x0056 | ||
| 78 | #define LOCHNAGAR2_GPIO_DSP_AIF1_TXDAT 0x0057 | ||
| 79 | #define LOCHNAGAR2_GPIO_DSP_AIF2_BCLK 0x0058 | ||
| 80 | #define LOCHNAGAR2_GPIO_DSP_AIF2_RXDAT 0x0059 | ||
| 81 | #define LOCHNAGAR2_GPIO_DSP_AIF2_LRCLK 0x005A | ||
| 82 | #define LOCHNAGAR2_GPIO_DSP_AIF2_TXDAT 0x005B | ||
| 83 | #define LOCHNAGAR2_GPIO_PSIA1_BCLK 0x005C | ||
| 84 | #define LOCHNAGAR2_GPIO_PSIA1_RXDAT 0x005D | ||
| 85 | #define LOCHNAGAR2_GPIO_PSIA1_LRCLK 0x005E | ||
| 86 | #define LOCHNAGAR2_GPIO_PSIA1_TXDAT 0x005F | ||
| 87 | #define LOCHNAGAR2_GPIO_PSIA2_BCLK 0x0060 | ||
| 88 | #define LOCHNAGAR2_GPIO_PSIA2_RXDAT 0x0061 | ||
| 89 | #define LOCHNAGAR2_GPIO_PSIA2_LRCLK 0x0062 | ||
| 90 | #define LOCHNAGAR2_GPIO_PSIA2_TXDAT 0x0063 | ||
| 91 | #define LOCHNAGAR2_GPIO_GF_AIF3_BCLK 0x0064 | ||
| 92 | #define LOCHNAGAR2_GPIO_GF_AIF3_RXDAT 0x0065 | ||
| 93 | #define LOCHNAGAR2_GPIO_GF_AIF3_LRCLK 0x0066 | ||
| 94 | #define LOCHNAGAR2_GPIO_GF_AIF3_TXDAT 0x0067 | ||
| 95 | #define LOCHNAGAR2_GPIO_GF_AIF4_BCLK 0x0068 | ||
| 96 | #define LOCHNAGAR2_GPIO_GF_AIF4_RXDAT 0x0069 | ||
| 97 | #define LOCHNAGAR2_GPIO_GF_AIF4_LRCLK 0x006A | ||
| 98 | #define LOCHNAGAR2_GPIO_GF_AIF4_TXDAT 0x006B | ||
| 99 | #define LOCHNAGAR2_GPIO_GF_AIF1_BCLK 0x006C | ||
| 100 | #define LOCHNAGAR2_GPIO_GF_AIF1_RXDAT 0x006D | ||
| 101 | #define LOCHNAGAR2_GPIO_GF_AIF1_LRCLK 0x006E | ||
| 102 | #define LOCHNAGAR2_GPIO_GF_AIF1_TXDAT 0x006F | ||
| 103 | #define LOCHNAGAR2_GPIO_GF_AIF2_BCLK 0x0070 | ||
| 104 | #define LOCHNAGAR2_GPIO_GF_AIF2_RXDAT 0x0071 | ||
| 105 | #define LOCHNAGAR2_GPIO_GF_AIF2_LRCLK 0x0072 | ||
| 106 | #define LOCHNAGAR2_GPIO_GF_AIF2_TXDAT 0x0073 | ||
| 107 | #define LOCHNAGAR2_GPIO_DSP_UART1_RX 0x0074 | ||
| 108 | #define LOCHNAGAR2_GPIO_DSP_UART1_TX 0x0075 | ||
| 109 | #define LOCHNAGAR2_GPIO_DSP_UART2_RX 0x0076 | ||
| 110 | #define LOCHNAGAR2_GPIO_DSP_UART2_TX 0x0077 | ||
| 111 | #define LOCHNAGAR2_GPIO_GF_UART2_RX 0x0078 | ||
| 112 | #define LOCHNAGAR2_GPIO_GF_UART2_TX 0x0079 | ||
| 113 | #define LOCHNAGAR2_GPIO_USB_UART_RX 0x007A | ||
| 114 | #define LOCHNAGAR2_GPIO_CDC_PDMCLK1 0x007C | ||
| 115 | #define LOCHNAGAR2_GPIO_CDC_PDMDAT1 0x007D | ||
| 116 | #define LOCHNAGAR2_GPIO_CDC_PDMCLK2 0x007E | ||
| 117 | #define LOCHNAGAR2_GPIO_CDC_PDMDAT2 0x007F | ||
| 118 | #define LOCHNAGAR2_GPIO_CDC_DMICCLK1 0x0080 | ||
| 119 | #define LOCHNAGAR2_GPIO_CDC_DMICDAT1 0x0081 | ||
| 120 | #define LOCHNAGAR2_GPIO_CDC_DMICCLK2 0x0082 | ||
| 121 | #define LOCHNAGAR2_GPIO_CDC_DMICDAT2 0x0083 | ||
| 122 | #define LOCHNAGAR2_GPIO_CDC_DMICCLK3 0x0084 | ||
| 123 | #define LOCHNAGAR2_GPIO_CDC_DMICDAT3 0x0085 | ||
| 124 | #define LOCHNAGAR2_GPIO_CDC_DMICCLK4 0x0086 | ||
| 125 | #define LOCHNAGAR2_GPIO_CDC_DMICDAT4 0x0087 | ||
| 126 | #define LOCHNAGAR2_GPIO_DSP_DMICCLK1 0x0088 | ||
| 127 | #define LOCHNAGAR2_GPIO_DSP_DMICDAT1 0x0089 | ||
| 128 | #define LOCHNAGAR2_GPIO_DSP_DMICCLK2 0x008A | ||
| 129 | #define LOCHNAGAR2_GPIO_DSP_DMICDAT2 0x008B | ||
| 130 | #define LOCHNAGAR2_GPIO_I2C2_SCL 0x008C | ||
| 131 | #define LOCHNAGAR2_GPIO_I2C2_SDA 0x008D | ||
| 132 | #define LOCHNAGAR2_GPIO_I2C3_SCL 0x008E | ||
| 133 | #define LOCHNAGAR2_GPIO_I2C3_SDA 0x008F | ||
| 134 | #define LOCHNAGAR2_GPIO_I2C4_SCL 0x0090 | ||
| 135 | #define LOCHNAGAR2_GPIO_I2C4_SDA 0x0091 | ||
| 136 | #define LOCHNAGAR2_GPIO_DSP_STANDBY 0x0092 | ||
| 137 | #define LOCHNAGAR2_GPIO_CDC_MCLK1 0x0093 | ||
| 138 | #define LOCHNAGAR2_GPIO_CDC_MCLK2 0x0094 | ||
| 139 | #define LOCHNAGAR2_GPIO_DSP_CLKIN 0x0095 | ||
| 140 | #define LOCHNAGAR2_GPIO_PSIA1_MCLK 0x0096 | ||
| 141 | #define LOCHNAGAR2_GPIO_PSIA2_MCLK 0x0097 | ||
| 142 | #define LOCHNAGAR2_GPIO_GF_GPIO1 0x0098 | ||
| 143 | #define LOCHNAGAR2_GPIO_GF_GPIO5 0x0099 | ||
| 144 | #define LOCHNAGAR2_GPIO_DSP_GPIO20 0x009A | ||
| 145 | #define LOCHNAGAR2_GPIO_CHANNEL1 0x00B9 | ||
| 146 | #define LOCHNAGAR2_GPIO_CHANNEL2 0x00BA | ||
| 147 | #define LOCHNAGAR2_GPIO_CHANNEL3 0x00BB | ||
| 148 | #define LOCHNAGAR2_GPIO_CHANNEL4 0x00BC | ||
| 149 | #define LOCHNAGAR2_GPIO_CHANNEL5 0x00BD | ||
| 150 | #define LOCHNAGAR2_GPIO_CHANNEL6 0x00BE | ||
| 151 | #define LOCHNAGAR2_GPIO_CHANNEL7 0x00BF | ||
| 152 | #define LOCHNAGAR2_GPIO_CHANNEL8 0x00C0 | ||
| 153 | #define LOCHNAGAR2_GPIO_CHANNEL9 0x00C1 | ||
| 154 | #define LOCHNAGAR2_GPIO_CHANNEL10 0x00C2 | ||
| 155 | #define LOCHNAGAR2_GPIO_CHANNEL11 0x00C3 | ||
| 156 | #define LOCHNAGAR2_GPIO_CHANNEL12 0x00C4 | ||
| 157 | #define LOCHNAGAR2_GPIO_CHANNEL13 0x00C5 | ||
| 158 | #define LOCHNAGAR2_GPIO_CHANNEL14 0x00C6 | ||
| 159 | #define LOCHNAGAR2_GPIO_CHANNEL15 0x00C7 | ||
| 160 | #define LOCHNAGAR2_GPIO_CHANNEL16 0x00C8 | ||
| 161 | #define LOCHNAGAR2_MINICARD_RESETS 0x00DF | ||
| 162 | #define LOCHNAGAR2_ANALOGUE_PATH_CTRL1 0x00E3 | ||
| 163 | #define LOCHNAGAR2_ANALOGUE_PATH_CTRL2 0x00E4 | ||
| 164 | #define LOCHNAGAR2_COMMS_CTRL4 0x00F0 | ||
| 165 | #define LOCHNAGAR2_SPDIF_CTRL 0x00FE | ||
| 166 | #define LOCHNAGAR2_IMON_CTRL1 0x0108 | ||
| 167 | #define LOCHNAGAR2_IMON_CTRL2 0x0109 | ||
| 168 | #define LOCHNAGAR2_IMON_CTRL3 0x010A | ||
| 169 | #define LOCHNAGAR2_IMON_CTRL4 0x010B | ||
| 170 | #define LOCHNAGAR2_IMON_DATA1 0x010C | ||
| 171 | #define LOCHNAGAR2_IMON_DATA2 0x010D | ||
| 172 | #define LOCHNAGAR2_POWER_CTRL 0x0116 | ||
| 173 | #define LOCHNAGAR2_MICVDD_CTRL1 0x0119 | ||
| 174 | #define LOCHNAGAR2_MICVDD_CTRL2 0x011B | ||
| 175 | #define LOCHNAGAR2_VDDCORE_CDC_CTRL1 0x011E | ||
| 176 | #define LOCHNAGAR2_VDDCORE_CDC_CTRL2 0x0120 | ||
| 177 | #define LOCHNAGAR2_SOUNDCARD_AIF_CTRL 0x0180 | ||
| 178 | |||
| 179 | /* (0x000D-0x001B, 0x0180) CDC_AIF1_CTRL - SOUNCARD_AIF_CTRL */ | ||
| 180 | #define LOCHNAGAR2_AIF_ENA_MASK 0x8000 | ||
| 181 | #define LOCHNAGAR2_AIF_ENA_SHIFT 15 | ||
| 182 | #define LOCHNAGAR2_AIF_LRCLK_DIR_MASK 0x4000 | ||
| 183 | #define LOCHNAGAR2_AIF_LRCLK_DIR_SHIFT 14 | ||
| 184 | #define LOCHNAGAR2_AIF_BCLK_DIR_MASK 0x2000 | ||
| 185 | #define LOCHNAGAR2_AIF_BCLK_DIR_SHIFT 13 | ||
| 186 | #define LOCHNAGAR2_AIF_SRC_MASK 0x00FF | ||
| 187 | #define LOCHNAGAR2_AIF_SRC_SHIFT 0 | ||
| 188 | |||
| 189 | /* (0x001E - 0x0027) CDC_MCLK1_CTRL - SOUNDCARD_MCLK_CTRL */ | ||
| 190 | #define LOCHNAGAR2_CLK_ENA_MASK 0x8000 | ||
| 191 | #define LOCHNAGAR2_CLK_ENA_SHIFT 15 | ||
| 192 | #define LOCHNAGAR2_CLK_SRC_MASK 0x00FF | ||
| 193 | #define LOCHNAGAR2_CLK_SRC_SHIFT 0 | ||
| 194 | |||
| 195 | /* (0x0031 - 0x009A) GPIO_FPGA_GPIO1 - GPIO_DSP_GPIO20 */ | ||
| 196 | #define LOCHNAGAR2_GPIO_SRC_MASK 0x00FF | ||
| 197 | #define LOCHNAGAR2_GPIO_SRC_SHIFT 0 | ||
| 198 | |||
| 199 | /* (0x00B9 - 0x00C8) GPIO_CHANNEL1 - GPIO_CHANNEL16 */ | ||
| 200 | #define LOCHNAGAR2_GPIO_CHANNEL_STS_MASK 0x8000 | ||
| 201 | #define LOCHNAGAR2_GPIO_CHANNEL_STS_SHIFT 15 | ||
| 202 | #define LOCHNAGAR2_GPIO_CHANNEL_SRC_MASK 0x00FF | ||
| 203 | #define LOCHNAGAR2_GPIO_CHANNEL_SRC_SHIFT 0 | ||
| 204 | |||
| 205 | /* (0x00DF) MINICARD_RESETS */ | ||
| 206 | #define LOCHNAGAR2_DSP_RESET_MASK 0x0002 | ||
| 207 | #define LOCHNAGAR2_DSP_RESET_SHIFT 1 | ||
| 208 | #define LOCHNAGAR2_CDC_RESET_MASK 0x0001 | ||
| 209 | #define LOCHNAGAR2_CDC_RESET_SHIFT 0 | ||
| 210 | |||
| 211 | /* (0x00E3) ANALOGUE_PATH_CTRL1 */ | ||
| 212 | #define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_MASK 0x8000 | ||
| 213 | #define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_SHIFT 15 | ||
| 214 | #define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_MASK 0x4000 | ||
| 215 | #define LOCHNAGAR2_ANALOGUE_PATH_UPDATE_STS_SHIFT 14 | ||
| 216 | |||
| 217 | /* (0x00E4) ANALOGUE_PATH_CTRL2 */ | ||
| 218 | #define LOCHNAGAR2_P2_INPUT_BIAS_ENA_MASK 0x0080 | ||
| 219 | #define LOCHNAGAR2_P2_INPUT_BIAS_ENA_SHIFT 7 | ||
| 220 | #define LOCHNAGAR2_P1_INPUT_BIAS_ENA_MASK 0x0040 | ||
| 221 | #define LOCHNAGAR2_P1_INPUT_BIAS_ENA_SHIFT 6 | ||
| 222 | #define LOCHNAGAR2_P2_MICBIAS_SRC_MASK 0x0038 | ||
| 223 | #define LOCHNAGAR2_P2_MICBIAS_SRC_SHIFT 3 | ||
| 224 | #define LOCHNAGAR2_P1_MICBIAS_SRC_MASK 0x0007 | ||
| 225 | #define LOCHNAGAR2_P1_MICBIAS_SRC_SHIFT 0 | ||
| 226 | |||
| 227 | /* (0x00F0) COMMS_CTRL4 */ | ||
| 228 | #define LOCHNAGAR2_CDC_CIF1MODE_MASK 0x0001 | ||
| 229 | #define LOCHNAGAR2_CDC_CIF1MODE_SHIFT 0 | ||
| 230 | |||
| 231 | /* (0x00FE) SPDIF_CTRL */ | ||
| 232 | #define LOCHNAGAR2_SPDIF_HWMODE_MASK 0x0008 | ||
| 233 | #define LOCHNAGAR2_SPDIF_HWMODE_SHIFT 3 | ||
| 234 | #define LOCHNAGAR2_SPDIF_RESET_MASK 0x0001 | ||
| 235 | #define LOCHNAGAR2_SPDIF_RESET_SHIFT 0 | ||
| 236 | |||
| 237 | /* (0x0108) IMON_CTRL1 */ | ||
| 238 | #define LOCHNAGAR2_IMON_ENA_MASK 0x8000 | ||
| 239 | #define LOCHNAGAR2_IMON_ENA_SHIFT 15 | ||
| 240 | #define LOCHNAGAR2_IMON_MEASURED_CHANNELS_MASK 0x03FC | ||
| 241 | #define LOCHNAGAR2_IMON_MEASURED_CHANNELS_SHIFT 2 | ||
| 242 | #define LOCHNAGAR2_IMON_MODE_SEL_MASK 0x0003 | ||
| 243 | #define LOCHNAGAR2_IMON_MODE_SEL_SHIFT 0 | ||
| 244 | |||
| 245 | /* (0x0109) IMON_CTRL2 */ | ||
| 246 | #define LOCHNAGAR2_IMON_FSR_MASK 0x03FF | ||
| 247 | #define LOCHNAGAR2_IMON_FSR_SHIFT 0 | ||
| 248 | |||
| 249 | /* (0x010A) IMON_CTRL3 */ | ||
| 250 | #define LOCHNAGAR2_IMON_DONE_MASK 0x0004 | ||
| 251 | #define LOCHNAGAR2_IMON_DONE_SHIFT 2 | ||
| 252 | #define LOCHNAGAR2_IMON_CONFIGURE_MASK 0x0002 | ||
| 253 | #define LOCHNAGAR2_IMON_CONFIGURE_SHIFT 1 | ||
| 254 | #define LOCHNAGAR2_IMON_MEASURE_MASK 0x0001 | ||
| 255 | #define LOCHNAGAR2_IMON_MEASURE_SHIFT 0 | ||
| 256 | |||
| 257 | /* (0x010B) IMON_CTRL4 */ | ||
| 258 | #define LOCHNAGAR2_IMON_DATA_REQ_MASK 0x0080 | ||
| 259 | #define LOCHNAGAR2_IMON_DATA_REQ_SHIFT 7 | ||
| 260 | #define LOCHNAGAR2_IMON_CH_SEL_MASK 0x0070 | ||
| 261 | #define LOCHNAGAR2_IMON_CH_SEL_SHIFT 4 | ||
| 262 | #define LOCHNAGAR2_IMON_DATA_RDY_MASK 0x0008 | ||
| 263 | #define LOCHNAGAR2_IMON_DATA_RDY_SHIFT 3 | ||
| 264 | #define LOCHNAGAR2_IMON_CH_SRC_MASK 0x0007 | ||
| 265 | #define LOCHNAGAR2_IMON_CH_SRC_SHIFT 0 | ||
| 266 | |||
| 267 | /* (0x010C, 0x010D) IMON_DATA1, IMON_DATA2 */ | ||
| 268 | #define LOCHNAGAR2_IMON_DATA_MASK 0xFFFF | ||
| 269 | #define LOCHNAGAR2_IMON_DATA_SHIFT 0 | ||
| 270 | |||
| 271 | /* (0x0116) POWER_CTRL */ | ||
| 272 | #define LOCHNAGAR2_PWR_ENA_MASK 0x0001 | ||
| 273 | #define LOCHNAGAR2_PWR_ENA_SHIFT 0 | ||
| 274 | |||
| 275 | /* (0x0119) MICVDD_CTRL1 */ | ||
| 276 | #define LOCHNAGAR2_MICVDD_REG_ENA_MASK 0x8000 | ||
| 277 | #define LOCHNAGAR2_MICVDD_REG_ENA_SHIFT 15 | ||
| 278 | |||
| 279 | /* (0x011B) MICVDD_CTRL2 */ | ||
| 280 | #define LOCHNAGAR2_MICVDD_VSEL_MASK 0x001F | ||
| 281 | #define LOCHNAGAR2_MICVDD_VSEL_SHIFT 0 | ||
| 282 | |||
| 283 | /* (0x011E) VDDCORE_CDC_CTRL1 */ | ||
| 284 | #define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_MASK 0x8000 | ||
| 285 | #define LOCHNAGAR2_VDDCORE_CDC_REG_ENA_SHIFT 15 | ||
| 286 | |||
| 287 | /* (0x0120) VDDCORE_CDC_CTRL2 */ | ||
| 288 | #define LOCHNAGAR2_VDDCORE_CDC_VSEL_MASK 0x007F | ||
| 289 | #define LOCHNAGAR2_VDDCORE_CDC_VSEL_SHIFT 0 | ||
| 290 | |||
| 291 | #endif | ||
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h index fe69c0f4398f..4d5d51a9c8a6 100644 --- a/include/linux/mfd/madera/core.h +++ b/include/linux/mfd/madera/core.h | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/gpio/consumer.h> | 15 | #include <linux/gpio/consumer.h> |
| 16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 17 | #include <linux/mfd/madera/pdata.h> | 17 | #include <linux/mfd/madera/pdata.h> |
| 18 | #include <linux/mutex.h> | ||
| 18 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
| 19 | #include <linux/regmap.h> | 20 | #include <linux/regmap.h> |
| 20 | #include <linux/regulator/consumer.h> | 21 | #include <linux/regulator/consumer.h> |
| @@ -37,6 +38,8 @@ enum madera_type { | |||
| 37 | 38 | ||
| 38 | #define MADERA_MAX_MICBIAS 4 | 39 | #define MADERA_MAX_MICBIAS 4 |
| 39 | 40 | ||
| 41 | #define MADERA_MAX_HP_OUTPUT 3 | ||
| 42 | |||
| 40 | /* Notifier events */ | 43 | /* Notifier events */ |
| 41 | #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 | 44 | #define MADERA_NOTIFY_VOICE_TRIGGER 0x1 |
| 42 | #define MADERA_NOTIFY_HPDET 0x2 | 45 | #define MADERA_NOTIFY_HPDET 0x2 |
| @@ -183,6 +186,10 @@ struct madera { | |||
| 183 | unsigned int num_childbias[MADERA_MAX_MICBIAS]; | 186 | unsigned int num_childbias[MADERA_MAX_MICBIAS]; |
| 184 | 187 | ||
| 185 | struct snd_soc_dapm_context *dapm; | 188 | struct snd_soc_dapm_context *dapm; |
| 189 | struct mutex dapm_ptr_lock; | ||
| 190 | unsigned int hp_ena; | ||
| 191 | bool out_clamp[MADERA_MAX_HP_OUTPUT]; | ||
| 192 | bool out_shorted[MADERA_MAX_HP_OUTPUT]; | ||
| 186 | 193 | ||
| 187 | struct blocking_notifier_head notifier; | 194 | struct blocking_notifier_head notifier; |
| 188 | }; | 195 | }; |
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h index 4a827af17e59..07f55aac9390 100644 --- a/include/linux/mfd/stmpe.h +++ b/include/linux/mfd/stmpe.h | |||
| @@ -10,6 +10,20 @@ | |||
| 10 | 10 | ||
| 11 | #include <linux/mutex.h> | 11 | #include <linux/mutex.h> |
| 12 | 12 | ||
| 13 | #define STMPE_SAMPLE_TIME(x) ((x & 0xf) << 4) | ||
| 14 | #define STMPE_MOD_12B(x) ((x & 0x1) << 3) | ||
| 15 | #define STMPE_REF_SEL(x) ((x & 0x1) << 1) | ||
| 16 | #define STMPE_ADC_FREQ(x) (x & 0x3) | ||
| 17 | #define STMPE_AVE_CTRL(x) ((x & 0x3) << 6) | ||
| 18 | #define STMPE_DET_DELAY(x) ((x & 0x7) << 3) | ||
| 19 | #define STMPE_SETTLING(x) (x & 0x7) | ||
| 20 | #define STMPE_FRACTION_Z(x) (x & 0x7) | ||
| 21 | #define STMPE_I_DRIVE(x) (x & 0x1) | ||
| 22 | #define STMPE_OP_MODE(x) ((x & 0x7) << 1) | ||
| 23 | |||
| 24 | #define STMPE811_REG_ADC_CTRL1 0x20 | ||
| 25 | #define STMPE811_REG_ADC_CTRL2 0x21 | ||
| 26 | |||
| 13 | struct device; | 27 | struct device; |
| 14 | struct regulator; | 28 | struct regulator; |
| 15 | 29 | ||
| @@ -123,6 +137,12 @@ struct stmpe { | |||
| 123 | u8 ier[2]; | 137 | u8 ier[2]; |
| 124 | u8 oldier[2]; | 138 | u8 oldier[2]; |
| 125 | struct stmpe_platform_data *pdata; | 139 | struct stmpe_platform_data *pdata; |
| 140 | |||
| 141 | /* For devices that use an ADC */ | ||
| 142 | u8 sample_time; | ||
| 143 | u8 mod_12b; | ||
| 144 | u8 ref_sel; | ||
| 145 | u8 adc_freq; | ||
| 126 | }; | 146 | }; |
| 127 | 147 | ||
| 128 | extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); | 148 | extern int stmpe_reg_write(struct stmpe *stmpe, u8 reg, u8 data); |
| @@ -136,6 +156,7 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins, | |||
| 136 | enum stmpe_block block); | 156 | enum stmpe_block block); |
| 137 | extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); | 157 | extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks); |
| 138 | extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); | 158 | extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks); |
| 159 | extern int stmpe811_adc_common_init(struct stmpe *stmpe); | ||
| 139 | 160 | ||
| 140 | #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) | 161 | #define STMPE_GPIO_NOREQ_811_TOUCH (0xf0) |
| 141 | 162 | ||
diff --git a/include/linux/mfd/stpmic1.h b/include/linux/mfd/stpmic1.h new file mode 100644 index 000000000000..fa3f99f7e9a1 --- /dev/null +++ b/include/linux/mfd/stpmic1.h | |||
| @@ -0,0 +1,212 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * Copyright (C) STMicroelectronics 2018 - All Rights Reserved | ||
| 4 | * Author: Philippe Peurichard <philippe.peurichard@st.com>, | ||
| 5 | * Pascal Paillet <p.paillet@st.com> for STMicroelectronics. | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef __LINUX_MFD_STPMIC1_H | ||
| 9 | #define __LINUX_MFD_STPMIC1_H | ||
| 10 | |||
| 11 | #define TURN_ON_SR 0x1 | ||
| 12 | #define TURN_OFF_SR 0x2 | ||
| 13 | #define ICC_LDO_TURN_OFF_SR 0x3 | ||
| 14 | #define ICC_BUCK_TURN_OFF_SR 0x4 | ||
| 15 | #define RREQ_STATE_SR 0x5 | ||
| 16 | #define VERSION_SR 0x6 | ||
| 17 | |||
| 18 | #define SWOFF_PWRCTRL_CR 0x10 | ||
| 19 | #define PADS_PULL_CR 0x11 | ||
| 20 | #define BUCKS_PD_CR 0x12 | ||
| 21 | #define LDO14_PD_CR 0x13 | ||
| 22 | #define LDO56_VREF_PD_CR 0x14 | ||
| 23 | #define VBUS_DET_VIN_CR 0x15 | ||
| 24 | #define PKEY_TURNOFF_CR 0x16 | ||
| 25 | #define BUCKS_MASK_RANK_CR 0x17 | ||
| 26 | #define BUCKS_MASK_RESET_CR 0x18 | ||
| 27 | #define LDOS_MASK_RANK_CR 0x19 | ||
| 28 | #define LDOS_MASK_RESET_CR 0x1A | ||
| 29 | #define WCHDG_CR 0x1B | ||
| 30 | #define WCHDG_TIMER_CR 0x1C | ||
| 31 | #define BUCKS_ICCTO_CR 0x1D | ||
| 32 | #define LDOS_ICCTO_CR 0x1E | ||
| 33 | |||
| 34 | #define BUCK1_ACTIVE_CR 0x20 | ||
| 35 | #define BUCK2_ACTIVE_CR 0x21 | ||
| 36 | #define BUCK3_ACTIVE_CR 0x22 | ||
| 37 | #define BUCK4_ACTIVE_CR 0x23 | ||
| 38 | #define VREF_DDR_ACTIVE_CR 0x24 | ||
| 39 | #define LDO1_ACTIVE_CR 0x25 | ||
| 40 | #define LDO2_ACTIVE_CR 0x26 | ||
| 41 | #define LDO3_ACTIVE_CR 0x27 | ||
| 42 | #define LDO4_ACTIVE_CR 0x28 | ||
| 43 | #define LDO5_ACTIVE_CR 0x29 | ||
| 44 | #define LDO6_ACTIVE_CR 0x2A | ||
| 45 | |||
| 46 | #define BUCK1_STDBY_CR 0x30 | ||
| 47 | #define BUCK2_STDBY_CR 0x31 | ||
| 48 | #define BUCK3_STDBY_CR 0x32 | ||
| 49 | #define BUCK4_STDBY_CR 0x33 | ||
| 50 | #define VREF_DDR_STDBY_CR 0x34 | ||
| 51 | #define LDO1_STDBY_CR 0x35 | ||
| 52 | #define LDO2_STDBY_CR 0x36 | ||
| 53 | #define LDO3_STDBY_CR 0x37 | ||
| 54 | #define LDO4_STDBY_CR 0x38 | ||
| 55 | #define LDO5_STDBY_CR 0x39 | ||
| 56 | #define LDO6_STDBY_CR 0x3A | ||
| 57 | |||
| 58 | #define BST_SW_CR 0x40 | ||
| 59 | |||
| 60 | #define INT_PENDING_R1 0x50 | ||
| 61 | #define INT_PENDING_R2 0x51 | ||
| 62 | #define INT_PENDING_R3 0x52 | ||
| 63 | #define INT_PENDING_R4 0x53 | ||
| 64 | |||
| 65 | #define INT_DBG_LATCH_R1 0x60 | ||
| 66 | #define INT_DBG_LATCH_R2 0x61 | ||
| 67 | #define INT_DBG_LATCH_R3 0x62 | ||
| 68 | #define INT_DBG_LATCH_R4 0x63 | ||
| 69 | |||
| 70 | #define INT_CLEAR_R1 0x70 | ||
| 71 | #define INT_CLEAR_R2 0x71 | ||
| 72 | #define INT_CLEAR_R3 0x72 | ||
| 73 | #define INT_CLEAR_R4 0x73 | ||
| 74 | |||
| 75 | #define INT_MASK_R1 0x80 | ||
| 76 | #define INT_MASK_R2 0x81 | ||
| 77 | #define INT_MASK_R3 0x82 | ||
| 78 | #define INT_MASK_R4 0x83 | ||
| 79 | |||
| 80 | #define INT_SET_MASK_R1 0x90 | ||
| 81 | #define INT_SET_MASK_R2 0x91 | ||
| 82 | #define INT_SET_MASK_R3 0x92 | ||
| 83 | #define INT_SET_MASK_R4 0x93 | ||
| 84 | |||
| 85 | #define INT_CLEAR_MASK_R1 0xA0 | ||
| 86 | #define INT_CLEAR_MASK_R2 0xA1 | ||
| 87 | #define INT_CLEAR_MASK_R3 0xA2 | ||
| 88 | #define INT_CLEAR_MASK_R4 0xA3 | ||
| 89 | |||
| 90 | #define INT_SRC_R1 0xB0 | ||
| 91 | #define INT_SRC_R2 0xB1 | ||
| 92 | #define INT_SRC_R3 0xB2 | ||
| 93 | #define INT_SRC_R4 0xB3 | ||
| 94 | |||
| 95 | #define PMIC_MAX_REGISTER_ADDRESS INT_SRC_R4 | ||
| 96 | |||
| 97 | #define STPMIC1_PMIC_NUM_IRQ_REGS 4 | ||
| 98 | |||
| 99 | #define TURN_OFF_SR_ICC_EVENT 0x08 | ||
| 100 | |||
| 101 | #define LDO_VOLTAGE_MASK GENMASK(6, 2) | ||
| 102 | #define BUCK_VOLTAGE_MASK GENMASK(7, 2) | ||
| 103 | #define LDO_BUCK_VOLTAGE_SHIFT 2 | ||
| 104 | |||
| 105 | #define LDO_ENABLE_MASK BIT(0) | ||
| 106 | #define BUCK_ENABLE_MASK BIT(0) | ||
| 107 | |||
| 108 | #define BUCK_HPLP_ENABLE_MASK BIT(1) | ||
| 109 | #define BUCK_HPLP_SHIFT 1 | ||
| 110 | |||
| 111 | #define STDBY_ENABLE_MASK BIT(0) | ||
| 112 | |||
| 113 | #define BUCKS_PD_CR_REG_MASK GENMASK(7, 0) | ||
| 114 | #define BUCK_MASK_RANK_REGISTER_MASK GENMASK(3, 0) | ||
| 115 | #define BUCK_MASK_RESET_REGISTER_MASK GENMASK(3, 0) | ||
| 116 | #define LDO1234_PULL_DOWN_REGISTER_MASK GENMASK(7, 0) | ||
| 117 | #define LDO56_VREF_PD_CR_REG_MASK GENMASK(5, 0) | ||
| 118 | #define LDO_MASK_RANK_REGISTER_MASK GENMASK(5, 0) | ||
| 119 | #define LDO_MASK_RESET_REGISTER_MASK GENMASK(5, 0) | ||
| 120 | |||
| 121 | #define BUCK1_PULL_DOWN_REG BUCKS_PD_CR | ||
| 122 | #define BUCK1_PULL_DOWN_MASK BIT(0) | ||
| 123 | #define BUCK2_PULL_DOWN_REG BUCKS_PD_CR | ||
| 124 | #define BUCK2_PULL_DOWN_MASK BIT(2) | ||
| 125 | #define BUCK3_PULL_DOWN_REG BUCKS_PD_CR | ||
| 126 | #define BUCK3_PULL_DOWN_MASK BIT(4) | ||
| 127 | #define BUCK4_PULL_DOWN_REG BUCKS_PD_CR | ||
| 128 | #define BUCK4_PULL_DOWN_MASK BIT(6) | ||
| 129 | |||
| 130 | #define LDO1_PULL_DOWN_REG LDO14_PD_CR | ||
| 131 | #define LDO1_PULL_DOWN_MASK BIT(0) | ||
| 132 | #define LDO2_PULL_DOWN_REG LDO14_PD_CR | ||
| 133 | #define LDO2_PULL_DOWN_MASK BIT(2) | ||
| 134 | #define LDO3_PULL_DOWN_REG LDO14_PD_CR | ||
| 135 | #define LDO3_PULL_DOWN_MASK BIT(4) | ||
| 136 | #define LDO4_PULL_DOWN_REG LDO14_PD_CR | ||
| 137 | #define LDO4_PULL_DOWN_MASK BIT(6) | ||
| 138 | #define LDO5_PULL_DOWN_REG LDO56_VREF_PD_CR | ||
| 139 | #define LDO5_PULL_DOWN_MASK BIT(0) | ||
| 140 | #define LDO6_PULL_DOWN_REG LDO56_VREF_PD_CR | ||
| 141 | #define LDO6_PULL_DOWN_MASK BIT(2) | ||
| 142 | #define VREF_DDR_PULL_DOWN_REG LDO56_VREF_PD_CR | ||
| 143 | #define VREF_DDR_PULL_DOWN_MASK BIT(4) | ||
| 144 | |||
| 145 | #define BUCKS_ICCTO_CR_REG_MASK GENMASK(6, 0) | ||
| 146 | #define LDOS_ICCTO_CR_REG_MASK GENMASK(5, 0) | ||
| 147 | |||
| 148 | #define LDO_BYPASS_MASK BIT(7) | ||
| 149 | |||
| 150 | /* Main PMIC Control Register | ||
| 151 | * SWOFF_PWRCTRL_CR | ||
| 152 | * Address : 0x10 | ||
| 153 | */ | ||
| 154 | #define ICC_EVENT_ENABLED BIT(4) | ||
| 155 | #define PWRCTRL_POLARITY_HIGH BIT(3) | ||
| 156 | #define PWRCTRL_PIN_VALID BIT(2) | ||
| 157 | #define RESTART_REQUEST_ENABLED BIT(1) | ||
| 158 | #define SOFTWARE_SWITCH_OFF_ENABLED BIT(0) | ||
| 159 | |||
| 160 | /* Main PMIC PADS Control Register | ||
| 161 | * PADS_PULL_CR | ||
| 162 | * Address : 0x11 | ||
| 163 | */ | ||
| 164 | #define WAKEUP_DETECTOR_DISABLED BIT(4) | ||
| 165 | #define PWRCTRL_PD_ACTIVE BIT(3) | ||
| 166 | #define PWRCTRL_PU_ACTIVE BIT(2) | ||
| 167 | #define WAKEUP_PD_ACTIVE BIT(1) | ||
| 168 | #define PONKEY_PU_INACTIVE BIT(0) | ||
| 169 | |||
| 170 | /* Main PMIC VINLOW Control Register | ||
| 171 | * VBUS_DET_VIN_CRC DMSC | ||
| 172 | * Address : 0x15 | ||
| 173 | */ | ||
| 174 | #define SWIN_DETECTOR_ENABLED BIT(7) | ||
| 175 | #define SWOUT_DETECTOR_ENABLED BIT(6) | ||
| 176 | #define VINLOW_ENABLED BIT(0) | ||
| 177 | #define VINLOW_CTRL_REG_MASK GENMASK(7, 0) | ||
| 178 | |||
| 179 | /* USB Control Register | ||
| 180 | * Address : 0x40 | ||
| 181 | */ | ||
| 182 | #define BOOST_OVP_DISABLED BIT(7) | ||
| 183 | #define VBUS_OTG_DETECTION_DISABLED BIT(6) | ||
| 184 | #define SW_OUT_DISCHARGE BIT(5) | ||
| 185 | #define VBUS_OTG_DISCHARGE BIT(4) | ||
| 186 | #define OCP_LIMIT_HIGH BIT(3) | ||
| 187 | #define SWIN_SWOUT_ENABLED BIT(2) | ||
| 188 | #define USBSW_OTG_SWITCH_ENABLED BIT(1) | ||
| 189 | #define BOOST_ENABLED BIT(0) | ||
| 190 | |||
| 191 | /* PKEY_TURNOFF_CR | ||
| 192 | * Address : 0x16 | ||
| 193 | */ | ||
| 194 | #define PONKEY_PWR_OFF BIT(7) | ||
| 195 | #define PONKEY_CC_FLAG_CLEAR BIT(6) | ||
| 196 | #define PONKEY_TURNOFF_TIMER_MASK GENMASK(3, 0) | ||
| 197 | #define PONKEY_TURNOFF_MASK GENMASK(7, 0) | ||
| 198 | |||
| 199 | /* | ||
| 200 | * struct stpmic1 - stpmic1 master device for sub-drivers | ||
| 201 | * @dev: master device of the chip (can be used to access platform data) | ||
| 202 | * @irq: main IRQ number | ||
| 203 | * @regmap_irq_chip_data: irq chip data | ||
| 204 | */ | ||
| 205 | struct stpmic1 { | ||
| 206 | struct device *dev; | ||
| 207 | struct regmap *regmap; | ||
| 208 | int irq; | ||
| 209 | struct regmap_irq_chip_data *irq_data; | ||
| 210 | }; | ||
| 211 | |||
| 212 | #endif /* __LINUX_MFD_STPMIC1_H */ | ||
diff --git a/include/linux/mfd/ti_am335x_tscadc.h b/include/linux/mfd/ti_am335x_tscadc.h index b9a53e013bff..483168403ae5 100644 --- a/include/linux/mfd/ti_am335x_tscadc.h +++ b/include/linux/mfd/ti_am335x_tscadc.h | |||
| @@ -78,6 +78,8 @@ | |||
| 78 | #define STEPCONFIG_YNN BIT(8) | 78 | #define STEPCONFIG_YNN BIT(8) |
| 79 | #define STEPCONFIG_XNP BIT(9) | 79 | #define STEPCONFIG_XNP BIT(9) |
| 80 | #define STEPCONFIG_YPN BIT(10) | 80 | #define STEPCONFIG_YPN BIT(10) |
| 81 | #define STEPCONFIG_RFP(val) ((val) << 12) | ||
| 82 | #define STEPCONFIG_RFP_VREFP (0x3 << 12) | ||
| 81 | #define STEPCONFIG_INM_MASK (0xF << 15) | 83 | #define STEPCONFIG_INM_MASK (0xF << 15) |
| 82 | #define STEPCONFIG_INM(val) ((val) << 15) | 84 | #define STEPCONFIG_INM(val) ((val) << 15) |
| 83 | #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) | 85 | #define STEPCONFIG_INM_ADCREFM STEPCONFIG_INM(8) |
| @@ -86,6 +88,8 @@ | |||
| 86 | #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) | 88 | #define STEPCONFIG_INP_AN4 STEPCONFIG_INP(4) |
| 87 | #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) | 89 | #define STEPCONFIG_INP_ADCREFM STEPCONFIG_INP(8) |
| 88 | #define STEPCONFIG_FIFO1 BIT(26) | 90 | #define STEPCONFIG_FIFO1 BIT(26) |
| 91 | #define STEPCONFIG_RFM(val) ((val) << 23) | ||
| 92 | #define STEPCONFIG_RFM_VREFN (0x3 << 23) | ||
| 89 | 93 | ||
| 90 | /* Delay register */ | 94 | /* Delay register */ |
| 91 | #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) | 95 | #define STEPDELAY_OPEN_MASK (0x3FFFF << 0) |
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h index e2687a30e5a1..739b7bf37eaa 100644 --- a/include/linux/mfd/tmio.h +++ b/include/linux/mfd/tmio.h | |||
| @@ -79,7 +79,7 @@ | |||
| 79 | /* Some controllers have a CBSY bit */ | 79 | /* Some controllers have a CBSY bit */ |
| 80 | #define TMIO_MMC_HAVE_CBSY BIT(11) | 80 | #define TMIO_MMC_HAVE_CBSY BIT(11) |
| 81 | 81 | ||
| 82 | /* Some controllers that support HS400 use use 4 taps while others use 8. */ | 82 | /* Some controllers that support HS400 use 4 taps while others use 8. */ |
| 83 | #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) | 83 | #define TMIO_MMC_HAVE_4TAP_HS400 BIT(13) |
| 84 | 84 | ||
| 85 | int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); | 85 | int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base); |
diff --git a/include/linux/mfd/tps65218.h b/include/linux/mfd/tps65218.h index c204d9a79436..b0470c35162d 100644 --- a/include/linux/mfd/tps65218.h +++ b/include/linux/mfd/tps65218.h | |||
| @@ -137,6 +137,10 @@ | |||
| 137 | #define TPS65218_CONFIG1_PGDLY_MASK 0x18 | 137 | #define TPS65218_CONFIG1_PGDLY_MASK 0x18 |
| 138 | #define TPS65218_CONFIG1_STRICT BIT(2) | 138 | #define TPS65218_CONFIG1_STRICT BIT(2) |
| 139 | #define TPS65218_CONFIG1_UVLO_MASK 0x3 | 139 | #define TPS65218_CONFIG1_UVLO_MASK 0x3 |
| 140 | #define TPS65218_CONFIG1_UVLO_2750000 0x0 | ||
| 141 | #define TPS65218_CONFIG1_UVLO_2950000 0x1 | ||
| 142 | #define TPS65218_CONFIG1_UVLO_3250000 0x2 | ||
| 143 | #define TPS65218_CONFIG1_UVLO_3350000 0x3 | ||
| 140 | 144 | ||
| 141 | #define TPS65218_CONFIG2_DC12_RST BIT(7) | 145 | #define TPS65218_CONFIG2_DC12_RST BIT(7) |
| 142 | #define TPS65218_CONFIG2_UVLOHYS BIT(6) | 146 | #define TPS65218_CONFIG2_UVLOHYS BIT(6) |
| @@ -208,6 +212,7 @@ enum tps65218_regulator_id { | |||
| 208 | /* LDOs */ | 212 | /* LDOs */ |
| 209 | TPS65218_LDO_1, | 213 | TPS65218_LDO_1, |
| 210 | /* LS's */ | 214 | /* LS's */ |
| 215 | TPS65218_LS_2, | ||
| 211 | TPS65218_LS_3, | 216 | TPS65218_LS_3, |
| 212 | }; | 217 | }; |
| 213 | 218 | ||
| @@ -218,7 +223,7 @@ enum tps65218_regulator_id { | |||
| 218 | /* Number of LDO voltage regulators available */ | 223 | /* Number of LDO voltage regulators available */ |
| 219 | #define TPS65218_NUM_LDO 1 | 224 | #define TPS65218_NUM_LDO 1 |
| 220 | /* Number of total LS current regulators available */ | 225 | /* Number of total LS current regulators available */ |
| 221 | #define TPS65218_NUM_LS 1 | 226 | #define TPS65218_NUM_LS 2 |
| 222 | /* Number of total regulators available */ | 227 | /* Number of total regulators available */ |
| 223 | #define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ | 228 | #define TPS65218_NUM_REGULATOR (TPS65218_NUM_DCDC + TPS65218_NUM_LDO \ |
| 224 | + TPS65218_NUM_LS) | 229 | + TPS65218_NUM_LS) |
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index b49fa67612f1..6fcb8eb00282 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h | |||
| @@ -418,7 +418,6 @@ int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg, | |||
| 418 | int count, u16 *buf); | 418 | int count, u16 *buf); |
| 419 | 419 | ||
| 420 | int wm831x_device_init(struct wm831x *wm831x, int irq); | 420 | int wm831x_device_init(struct wm831x *wm831x, int irq); |
| 421 | void wm831x_device_exit(struct wm831x *wm831x); | ||
| 422 | int wm831x_device_suspend(struct wm831x *wm831x); | 421 | int wm831x_device_suspend(struct wm831x *wm831x); |
| 423 | void wm831x_device_shutdown(struct wm831x *wm831x); | 422 | void wm831x_device_shutdown(struct wm831x *wm831x); |
| 424 | int wm831x_irq_init(struct wm831x *wm831x, int irq); | 423 | int wm831x_irq_init(struct wm831x *wm831x, int irq); |
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h index 509481d9cf19..202d9bde2c7c 100644 --- a/include/linux/mfd/wm8350/core.h +++ b/include/linux/mfd/wm8350/core.h | |||
| @@ -643,7 +643,6 @@ struct wm8350_platform_data { | |||
| 643 | */ | 643 | */ |
| 644 | int wm8350_device_init(struct wm8350 *wm8350, int irq, | 644 | int wm8350_device_init(struct wm8350 *wm8350, int irq, |
| 645 | struct wm8350_platform_data *pdata); | 645 | struct wm8350_platform_data *pdata); |
| 646 | void wm8350_device_exit(struct wm8350 *wm8350); | ||
| 647 | 646 | ||
| 648 | /* | 647 | /* |
| 649 | * WM8350 device IO | 648 | * WM8350 device IO |
diff --git a/include/linux/mii.h b/include/linux/mii.h index 6fee8b1a4400..5cd824c1c0ca 100644 --- a/include/linux/mii.h +++ b/include/linux/mii.h | |||
| @@ -469,7 +469,7 @@ static inline u32 linkmode_adv_to_lcl_adv_t(unsigned long *advertising) | |||
| 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 469 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, |
| 470 | advertising)) | 470 | advertising)) |
| 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; | 471 | lcl_adv |= ADVERTISE_PAUSE_CAP; |
| 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Pause_BIT, | 472 | if (linkmode_test_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, |
| 473 | advertising)) | 473 | advertising)) |
| 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; | 474 | lcl_adv |= ADVERTISE_PAUSE_ASYM; |
| 475 | 475 | ||
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h index 8c4a820bd4c1..f93a5598b942 100644 --- a/include/linux/mlx5/device.h +++ b/include/linux/mlx5/device.h | |||
| @@ -67,7 +67,7 @@ | |||
| 67 | #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) | 67 | #define MLX5_UN_SZ_BYTES(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 8) |
| 68 | #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) | 68 | #define MLX5_UN_SZ_DW(typ) (sizeof(union mlx5_ifc_##typ##_bits) / 32) |
| 69 | #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) | 69 | #define MLX5_BYTE_OFF(typ, fld) (__mlx5_bit_off(typ, fld) / 8) |
| 70 | #define MLX5_ADDR_OF(typ, p, fld) ((char *)(p) + MLX5_BYTE_OFF(typ, fld)) | 70 | #define MLX5_ADDR_OF(typ, p, fld) ((void *)((uint8_t *)(p) + MLX5_BYTE_OFF(typ, fld))) |
| 71 | 71 | ||
| 72 | /* insert a value to a struct */ | 72 | /* insert a value to a struct */ |
| 73 | #define MLX5_SET(typ, p, fld, v) do { \ | 73 | #define MLX5_SET(typ, p, fld, v) do { \ |
| @@ -342,6 +342,8 @@ enum mlx5_event { | |||
| 342 | MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, | 342 | MLX5_EVENT_TYPE_PAGE_FAULT = 0xc, |
| 343 | MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, | 343 | MLX5_EVENT_TYPE_NIC_VPORT_CHANGE = 0xd, |
| 344 | 344 | ||
| 345 | MLX5_EVENT_TYPE_HOST_PARAMS_CHANGE = 0xe, | ||
| 346 | |||
| 345 | MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, | 347 | MLX5_EVENT_TYPE_DCT_DRAINED = 0x1c, |
| 346 | 348 | ||
| 347 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, | 349 | MLX5_EVENT_TYPE_FPGA_ERROR = 0x20, |
| @@ -591,7 +593,7 @@ struct mlx5_eqe_cmd { | |||
| 591 | }; | 593 | }; |
| 592 | 594 | ||
| 593 | struct mlx5_eqe_page_req { | 595 | struct mlx5_eqe_page_req { |
| 594 | u8 rsvd0[2]; | 596 | __be16 ec_function; |
| 595 | __be16 func_id; | 597 | __be16 func_id; |
| 596 | __be32 num_pages; | 598 | __be32 num_pages; |
| 597 | __be32 rsvd1[5]; | 599 | __be32 rsvd1[5]; |
| @@ -1201,6 +1203,9 @@ enum mlx5_qcam_feature_groups { | |||
| 1201 | #define MLX5_CAP_ODP(mdev, cap)\ | 1203 | #define MLX5_CAP_ODP(mdev, cap)\ |
| 1202 | MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) | 1204 | MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap) |
| 1203 | 1205 | ||
| 1206 | #define MLX5_CAP_ODP_MAX(mdev, cap)\ | ||
| 1207 | MLX5_GET(odp_cap, mdev->caps.hca_max[MLX5_CAP_ODP], cap) | ||
| 1208 | |||
| 1204 | #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ | 1209 | #define MLX5_CAP_VECTOR_CALC(mdev, cap) \ |
| 1205 | MLX5_GET(vector_calc_cap, \ | 1210 | MLX5_GET(vector_calc_cap, \ |
| 1206 | mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) | 1211 | mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap) |
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h index 54299251d40d..0d0729648844 100644 --- a/include/linux/mlx5/driver.h +++ b/include/linux/mlx5/driver.h | |||
| @@ -195,6 +195,7 @@ struct mlx5_rsc_debug { | |||
| 195 | 195 | ||
| 196 | enum mlx5_dev_event { | 196 | enum mlx5_dev_event { |
| 197 | MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ | 197 | MLX5_DEV_EVENT_SYS_ERROR = 128, /* 0 - 127 are FW events */ |
| 198 | MLX5_DEV_EVENT_PORT_AFFINITY = 129, | ||
| 198 | }; | 199 | }; |
| 199 | 200 | ||
| 200 | enum mlx5_port_status { | 201 | enum mlx5_port_status { |
| @@ -364,6 +365,7 @@ struct mlx5_core_sig_ctx { | |||
| 364 | enum { | 365 | enum { |
| 365 | MLX5_MKEY_MR = 1, | 366 | MLX5_MKEY_MR = 1, |
| 366 | MLX5_MKEY_MW, | 367 | MLX5_MKEY_MW, |
| 368 | MLX5_MKEY_INDIRECT_DEVX, | ||
| 367 | }; | 369 | }; |
| 368 | 370 | ||
| 369 | struct mlx5_core_mkey { | 371 | struct mlx5_core_mkey { |
| @@ -522,6 +524,7 @@ struct mlx5_priv { | |||
| 522 | atomic_t reg_pages; | 524 | atomic_t reg_pages; |
| 523 | struct list_head free_list; | 525 | struct list_head free_list; |
| 524 | int vfs_pages; | 526 | int vfs_pages; |
| 527 | int peer_pf_pages; | ||
| 525 | 528 | ||
| 526 | struct mlx5_core_health health; | 529 | struct mlx5_core_health health; |
| 527 | 530 | ||
| @@ -591,6 +594,8 @@ enum mlx5_pagefault_type_flags { | |||
| 591 | }; | 594 | }; |
| 592 | 595 | ||
| 593 | struct mlx5_td { | 596 | struct mlx5_td { |
| 597 | /* protects tirs list changes while tirs refresh */ | ||
| 598 | struct mutex list_lock; | ||
| 594 | struct list_head tirs_list; | 599 | struct list_head tirs_list; |
| 595 | u32 tdn; | 600 | u32 tdn; |
| 596 | }; | 601 | }; |
| @@ -652,6 +657,7 @@ struct mlx5_core_dev { | |||
| 652 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; | 657 | u32 mcam[MLX5_ST_SZ_DW(mcam_reg)]; |
| 653 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; | 658 | u32 fpga[MLX5_ST_SZ_DW(fpga_cap)]; |
| 654 | u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; | 659 | u32 qcam[MLX5_ST_SZ_DW(qcam_reg)]; |
| 660 | u8 embedded_cpu; | ||
| 655 | } caps; | 661 | } caps; |
| 656 | u64 sys_image_guid; | 662 | u64 sys_image_guid; |
| 657 | phys_addr_t iseg_base; | 663 | phys_addr_t iseg_base; |
| @@ -850,11 +856,30 @@ void mlx5_cmd_cleanup(struct mlx5_core_dev *dev); | |||
| 850 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); | 856 | void mlx5_cmd_use_events(struct mlx5_core_dev *dev); |
| 851 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); | 857 | void mlx5_cmd_use_polling(struct mlx5_core_dev *dev); |
| 852 | 858 | ||
| 859 | struct mlx5_async_ctx { | ||
| 860 | struct mlx5_core_dev *dev; | ||
| 861 | atomic_t num_inflight; | ||
| 862 | struct wait_queue_head wait; | ||
| 863 | }; | ||
| 864 | |||
| 865 | struct mlx5_async_work; | ||
| 866 | |||
| 867 | typedef void (*mlx5_async_cbk_t)(int status, struct mlx5_async_work *context); | ||
| 868 | |||
| 869 | struct mlx5_async_work { | ||
| 870 | struct mlx5_async_ctx *ctx; | ||
| 871 | mlx5_async_cbk_t user_callback; | ||
| 872 | }; | ||
| 873 | |||
| 874 | void mlx5_cmd_init_async_ctx(struct mlx5_core_dev *dev, | ||
| 875 | struct mlx5_async_ctx *ctx); | ||
| 876 | void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx *ctx); | ||
| 877 | int mlx5_cmd_exec_cb(struct mlx5_async_ctx *ctx, void *in, int in_size, | ||
| 878 | void *out, int out_size, mlx5_async_cbk_t callback, | ||
| 879 | struct mlx5_async_work *work); | ||
| 880 | |||
| 853 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, | 881 | int mlx5_cmd_exec(struct mlx5_core_dev *dev, void *in, int in_size, void *out, |
| 854 | int out_size); | 882 | int out_size); |
| 855 | int mlx5_cmd_exec_cb(struct mlx5_core_dev *dev, void *in, int in_size, | ||
| 856 | void *out, int out_size, mlx5_cmd_cbk_t callback, | ||
| 857 | void *context); | ||
| 858 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, | 883 | int mlx5_cmd_exec_polling(struct mlx5_core_dev *dev, void *in, int in_size, |
| 859 | void *out, int out_size); | 884 | void *out, int out_size); |
| 860 | void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); | 885 | void mlx5_cmd_mbox_status(void *out, u8 *status, u32 *syndrome); |
| @@ -885,9 +910,10 @@ void mlx5_init_mkey_table(struct mlx5_core_dev *dev); | |||
| 885 | void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); | 910 | void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev); |
| 886 | int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, | 911 | int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev, |
| 887 | struct mlx5_core_mkey *mkey, | 912 | struct mlx5_core_mkey *mkey, |
| 888 | u32 *in, int inlen, | 913 | struct mlx5_async_ctx *async_ctx, u32 *in, |
| 889 | u32 *out, int outlen, | 914 | int inlen, u32 *out, int outlen, |
| 890 | mlx5_cmd_cbk_t callback, void *context); | 915 | mlx5_async_cbk_t callback, |
| 916 | struct mlx5_async_work *context); | ||
| 891 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, | 917 | int mlx5_core_create_mkey(struct mlx5_core_dev *dev, |
| 892 | struct mlx5_core_mkey *mkey, | 918 | struct mlx5_core_mkey *mkey, |
| 893 | u32 *in, int inlen); | 919 | u32 *in, int inlen); |
| @@ -897,14 +923,12 @@ int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey, | |||
| 897 | u32 *out, int outlen); | 923 | u32 *out, int outlen); |
| 898 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); | 924 | int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn); |
| 899 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); | 925 | int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn); |
| 900 | int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, | ||
| 901 | u16 opmod, u8 port); | ||
| 902 | int mlx5_pagealloc_init(struct mlx5_core_dev *dev); | 926 | int mlx5_pagealloc_init(struct mlx5_core_dev *dev); |
| 903 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); | 927 | void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev); |
| 904 | void mlx5_pagealloc_start(struct mlx5_core_dev *dev); | 928 | void mlx5_pagealloc_start(struct mlx5_core_dev *dev); |
| 905 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); | 929 | void mlx5_pagealloc_stop(struct mlx5_core_dev *dev); |
| 906 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, | 930 | void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id, |
| 907 | s32 npages); | 931 | s32 npages, bool ec_function); |
| 908 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); | 932 | int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot); |
| 909 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); | 933 | int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev); |
| 910 | void mlx5_register_debugfs(void); | 934 | void mlx5_register_debugfs(void); |
| @@ -939,10 +963,6 @@ int mlx5_query_odp_caps(struct mlx5_core_dev *dev, | |||
| 939 | struct mlx5_odp_caps *odp_caps); | 963 | struct mlx5_odp_caps *odp_caps); |
| 940 | int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, | 964 | int mlx5_core_query_ib_ppcnt(struct mlx5_core_dev *dev, |
| 941 | u8 port_num, void *out, size_t sz); | 965 | u8 port_num, void *out, size_t sz); |
| 942 | #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING | ||
| 943 | int mlx5_core_page_fault_resume(struct mlx5_core_dev *dev, u32 token, | ||
| 944 | u32 wq_num, u8 type, int error); | ||
| 945 | #endif | ||
| 946 | 966 | ||
| 947 | int mlx5_init_rl_table(struct mlx5_core_dev *dev); | 967 | int mlx5_init_rl_table(struct mlx5_core_dev *dev); |
| 948 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); | 968 | void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev); |
| @@ -1021,6 +1041,7 @@ int mlx5_cmd_create_vport_lag(struct mlx5_core_dev *dev); | |||
| 1021 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); | 1041 | int mlx5_cmd_destroy_vport_lag(struct mlx5_core_dev *dev); |
| 1022 | bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); | 1042 | bool mlx5_lag_is_roce(struct mlx5_core_dev *dev); |
| 1023 | bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); | 1043 | bool mlx5_lag_is_sriov(struct mlx5_core_dev *dev); |
| 1044 | bool mlx5_lag_is_multipath(struct mlx5_core_dev *dev); | ||
| 1024 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); | 1045 | bool mlx5_lag_is_active(struct mlx5_core_dev *dev); |
| 1025 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); | 1046 | struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev); |
| 1026 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, | 1047 | int mlx5_lag_query_cong_counters(struct mlx5_core_dev *dev, |
| @@ -1058,11 +1079,29 @@ static inline int mlx5_core_is_pf(struct mlx5_core_dev *dev) | |||
| 1058 | return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); | 1079 | return !(dev->priv.pci_dev_data & MLX5_PCI_DEV_IS_VF); |
| 1059 | } | 1080 | } |
| 1060 | 1081 | ||
| 1061 | #define MLX5_TOTAL_VPORTS(mdev) (1 + pci_sriov_get_totalvfs((mdev)->pdev)) | 1082 | static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev) |
| 1062 | #define MLX5_VPORT_MANAGER(mdev) \ | 1083 | { |
| 1063 | (MLX5_CAP_GEN(mdev, vport_group_manager) && \ | 1084 | return dev->caps.embedded_cpu; |
| 1064 | (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ | 1085 | } |
| 1065 | mlx5_core_is_pf(mdev)) | 1086 | |
| 1087 | static inline bool mlx5_core_is_ecpf_esw_manager(struct mlx5_core_dev *dev) | ||
| 1088 | { | ||
| 1089 | return dev->caps.embedded_cpu && MLX5_CAP_GEN(dev, eswitch_manager); | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | static inline bool mlx5_ecpf_vport_exists(struct mlx5_core_dev *dev) | ||
| 1093 | { | ||
| 1094 | return mlx5_core_is_pf(dev) && MLX5_CAP_ESW(dev, ecpf_vport_exists); | ||
| 1095 | } | ||
| 1096 | |||
| 1097 | #define MLX5_HOST_PF_MAX_VFS (127u) | ||
| 1098 | static inline u16 mlx5_core_max_vfs(struct mlx5_core_dev *dev) | ||
| 1099 | { | ||
| 1100 | if (mlx5_core_is_ecpf_esw_manager(dev)) | ||
| 1101 | return MLX5_HOST_PF_MAX_VFS; | ||
| 1102 | else | ||
| 1103 | return pci_sriov_get_totalvfs(dev->pdev); | ||
| 1104 | } | ||
| 1066 | 1105 | ||
| 1067 | static inline int mlx5_get_gid_table_len(u16 param) | 1106 | static inline int mlx5_get_gid_table_len(u16 param) |
| 1068 | { | 1107 | { |
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h index fab5121ffb8f..96d8435421de 100644 --- a/include/linux/mlx5/eswitch.h +++ b/include/linux/mlx5/eswitch.h | |||
| @@ -22,6 +22,12 @@ enum { | |||
| 22 | NUM_REP_TYPES, | 22 | NUM_REP_TYPES, |
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | enum { | ||
| 26 | REP_UNREGISTERED, | ||
| 27 | REP_REGISTERED, | ||
| 28 | REP_LOADED, | ||
| 29 | }; | ||
| 30 | |||
| 25 | struct mlx5_eswitch_rep; | 31 | struct mlx5_eswitch_rep; |
| 26 | struct mlx5_eswitch_rep_if { | 32 | struct mlx5_eswitch_rep_if { |
| 27 | int (*load)(struct mlx5_core_dev *dev, | 33 | int (*load)(struct mlx5_core_dev *dev, |
| @@ -29,7 +35,7 @@ struct mlx5_eswitch_rep_if { | |||
| 29 | void (*unload)(struct mlx5_eswitch_rep *rep); | 35 | void (*unload)(struct mlx5_eswitch_rep *rep); |
| 30 | void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); | 36 | void *(*get_proto_dev)(struct mlx5_eswitch_rep *rep); |
| 31 | void *priv; | 37 | void *priv; |
| 32 | bool valid; | 38 | u8 state; |
| 33 | }; | 39 | }; |
| 34 | 40 | ||
| 35 | struct mlx5_eswitch_rep { | 41 | struct mlx5_eswitch_rep { |
| @@ -40,13 +46,10 @@ struct mlx5_eswitch_rep { | |||
| 40 | u32 vlan_refcount; | 46 | u32 vlan_refcount; |
| 41 | }; | 47 | }; |
| 42 | 48 | ||
| 43 | void mlx5_eswitch_register_vport_rep(struct mlx5_eswitch *esw, | 49 | void mlx5_eswitch_register_vport_reps(struct mlx5_eswitch *esw, |
| 44 | int vport_index, | 50 | struct mlx5_eswitch_rep_if *rep_if, |
| 45 | struct mlx5_eswitch_rep_if *rep_if, | 51 | u8 rep_type); |
| 46 | u8 rep_type); | 52 | void mlx5_eswitch_unregister_vport_reps(struct mlx5_eswitch *esw, u8 rep_type); |
| 47 | void mlx5_eswitch_unregister_vport_rep(struct mlx5_eswitch *esw, | ||
| 48 | int vport_index, | ||
| 49 | u8 rep_type); | ||
| 50 | void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, | 53 | void *mlx5_eswitch_get_proto_dev(struct mlx5_eswitch *esw, |
| 51 | int vport, | 54 | int vport, |
| 52 | u8 rep_type); | 55 | u8 rep_type); |
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h index 35fe5217b244..3b83288749c6 100644 --- a/include/linux/mlx5/mlx5_ifc.h +++ b/include/linux/mlx5/mlx5_ifc.h | |||
| @@ -72,6 +72,7 @@ enum { | |||
| 72 | 72 | ||
| 73 | enum { | 73 | enum { |
| 74 | MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, | 74 | MLX5_SET_HCA_CAP_OP_MOD_GENERAL_DEVICE = 0x0, |
| 75 | MLX5_SET_HCA_CAP_OP_MOD_ODP = 0x2, | ||
| 75 | MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, | 76 | MLX5_SET_HCA_CAP_OP_MOD_ATOMIC = 0x3, |
| 76 | }; | 77 | }; |
| 77 | 78 | ||
| @@ -141,6 +142,7 @@ enum { | |||
| 141 | MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, | 142 | MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY = 0x725, |
| 142 | MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, | 143 | MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY = 0x726, |
| 143 | MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, | 144 | MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS = 0x727, |
| 145 | MLX5_CMD_OP_QUERY_HOST_PARAMS = 0x740, | ||
| 144 | MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, | 146 | MLX5_CMD_OP_QUERY_VPORT_STATE = 0x750, |
| 145 | MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, | 147 | MLX5_CMD_OP_MODIFY_VPORT_STATE = 0x751, |
| 146 | MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, | 148 | MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT = 0x752, |
| @@ -629,7 +631,8 @@ struct mlx5_ifc_e_switch_cap_bits { | |||
| 629 | u8 vport_svlan_insert[0x1]; | 631 | u8 vport_svlan_insert[0x1]; |
| 630 | u8 vport_cvlan_insert_if_not_exist[0x1]; | 632 | u8 vport_cvlan_insert_if_not_exist[0x1]; |
| 631 | u8 vport_cvlan_insert_overwrite[0x1]; | 633 | u8 vport_cvlan_insert_overwrite[0x1]; |
| 632 | u8 reserved_at_5[0x17]; | 634 | u8 reserved_at_5[0x16]; |
| 635 | u8 ecpf_vport_exists[0x1]; | ||
| 633 | u8 counter_eswitch_affinity[0x1]; | 636 | u8 counter_eswitch_affinity[0x1]; |
| 634 | u8 merged_eswitch[0x1]; | 637 | u8 merged_eswitch[0x1]; |
| 635 | u8 nic_vport_node_guid_modify[0x1]; | 638 | u8 nic_vport_node_guid_modify[0x1]; |
| @@ -831,7 +834,9 @@ struct mlx5_ifc_odp_cap_bits { | |||
| 831 | 834 | ||
| 832 | struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; | 835 | struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps; |
| 833 | 836 | ||
| 834 | u8 reserved_at_e0[0x720]; | 837 | struct mlx5_ifc_odp_per_transport_service_cap_bits xrc_odp_caps; |
| 838 | |||
| 839 | u8 reserved_at_100[0x700]; | ||
| 835 | }; | 840 | }; |
| 836 | 841 | ||
| 837 | struct mlx5_ifc_calc_op { | 842 | struct mlx5_ifc_calc_op { |
| @@ -4438,7 +4443,8 @@ struct mlx5_ifc_query_pages_out_bits { | |||
| 4438 | 4443 | ||
| 4439 | u8 syndrome[0x20]; | 4444 | u8 syndrome[0x20]; |
| 4440 | 4445 | ||
| 4441 | u8 reserved_at_40[0x10]; | 4446 | u8 embedded_cpu_function[0x1]; |
| 4447 | u8 reserved_at_41[0xf]; | ||
| 4442 | u8 function_id[0x10]; | 4448 | u8 function_id[0x10]; |
| 4443 | 4449 | ||
| 4444 | u8 num_pages[0x20]; | 4450 | u8 num_pages[0x20]; |
| @@ -4457,7 +4463,8 @@ struct mlx5_ifc_query_pages_in_bits { | |||
| 4457 | u8 reserved_at_20[0x10]; | 4463 | u8 reserved_at_20[0x10]; |
| 4458 | u8 op_mod[0x10]; | 4464 | u8 op_mod[0x10]; |
| 4459 | 4465 | ||
| 4460 | u8 reserved_at_40[0x10]; | 4466 | u8 embedded_cpu_function[0x1]; |
| 4467 | u8 reserved_at_41[0xf]; | ||
| 4461 | u8 function_id[0x10]; | 4468 | u8 function_id[0x10]; |
| 4462 | 4469 | ||
| 4463 | u8 reserved_at_60[0x20]; | 4470 | u8 reserved_at_60[0x20]; |
| @@ -5877,7 +5884,8 @@ struct mlx5_ifc_manage_pages_in_bits { | |||
| 5877 | u8 reserved_at_20[0x10]; | 5884 | u8 reserved_at_20[0x10]; |
| 5878 | u8 op_mod[0x10]; | 5885 | u8 op_mod[0x10]; |
| 5879 | 5886 | ||
| 5880 | u8 reserved_at_40[0x10]; | 5887 | u8 embedded_cpu_function[0x1]; |
| 5888 | u8 reserved_at_41[0xf]; | ||
| 5881 | u8 function_id[0x10]; | 5889 | u8 function_id[0x10]; |
| 5882 | 5890 | ||
| 5883 | u8 input_num_entries[0x20]; | 5891 | u8 input_num_entries[0x20]; |
| @@ -6055,7 +6063,8 @@ struct mlx5_ifc_enable_hca_in_bits { | |||
| 6055 | u8 reserved_at_20[0x10]; | 6063 | u8 reserved_at_20[0x10]; |
| 6056 | u8 op_mod[0x10]; | 6064 | u8 op_mod[0x10]; |
| 6057 | 6065 | ||
| 6058 | u8 reserved_at_40[0x10]; | 6066 | u8 embedded_cpu_function[0x1]; |
| 6067 | u8 reserved_at_41[0xf]; | ||
| 6059 | u8 function_id[0x10]; | 6068 | u8 function_id[0x10]; |
| 6060 | 6069 | ||
| 6061 | u8 reserved_at_60[0x20]; | 6070 | u8 reserved_at_60[0x20]; |
| @@ -6099,7 +6108,8 @@ struct mlx5_ifc_disable_hca_in_bits { | |||
| 6099 | u8 reserved_at_20[0x10]; | 6108 | u8 reserved_at_20[0x10]; |
| 6100 | u8 op_mod[0x10]; | 6109 | u8 op_mod[0x10]; |
| 6101 | 6110 | ||
| 6102 | u8 reserved_at_40[0x10]; | 6111 | u8 embedded_cpu_function[0x1]; |
| 6112 | u8 reserved_at_41[0xf]; | ||
| 6103 | u8 function_id[0x10]; | 6113 | u8 function_id[0x10]; |
| 6104 | 6114 | ||
| 6105 | u8 reserved_at_60[0x20]; | 6115 | u8 reserved_at_60[0x20]; |
| @@ -7817,21 +7827,23 @@ struct mlx5_ifc_ptys_reg_bits { | |||
| 7817 | u8 proto_mask[0x3]; | 7827 | u8 proto_mask[0x3]; |
| 7818 | 7828 | ||
| 7819 | u8 an_status[0x4]; | 7829 | u8 an_status[0x4]; |
| 7820 | u8 reserved_at_24[0x3c]; | 7830 | u8 reserved_at_24[0x1c]; |
| 7831 | |||
| 7832 | u8 ext_eth_proto_capability[0x20]; | ||
| 7821 | 7833 | ||
| 7822 | u8 eth_proto_capability[0x20]; | 7834 | u8 eth_proto_capability[0x20]; |
| 7823 | 7835 | ||
| 7824 | u8 ib_link_width_capability[0x10]; | 7836 | u8 ib_link_width_capability[0x10]; |
| 7825 | u8 ib_proto_capability[0x10]; | 7837 | u8 ib_proto_capability[0x10]; |
| 7826 | 7838 | ||
| 7827 | u8 reserved_at_a0[0x20]; | 7839 | u8 ext_eth_proto_admin[0x20]; |
| 7828 | 7840 | ||
| 7829 | u8 eth_proto_admin[0x20]; | 7841 | u8 eth_proto_admin[0x20]; |
| 7830 | 7842 | ||
| 7831 | u8 ib_link_width_admin[0x10]; | 7843 | u8 ib_link_width_admin[0x10]; |
| 7832 | u8 ib_proto_admin[0x10]; | 7844 | u8 ib_proto_admin[0x10]; |
| 7833 | 7845 | ||
| 7834 | u8 reserved_at_100[0x20]; | 7846 | u8 ext_eth_proto_oper[0x20]; |
| 7835 | 7847 | ||
| 7836 | u8 eth_proto_oper[0x20]; | 7848 | u8 eth_proto_oper[0x20]; |
| 7837 | 7849 | ||
| @@ -8280,7 +8292,9 @@ struct mlx5_ifc_mpegc_reg_bits { | |||
| 8280 | struct mlx5_ifc_pcam_enhanced_features_bits { | 8292 | struct mlx5_ifc_pcam_enhanced_features_bits { |
| 8281 | u8 reserved_at_0[0x6d]; | 8293 | u8 reserved_at_0[0x6d]; |
| 8282 | u8 rx_icrc_encapsulated_counter[0x1]; | 8294 | u8 rx_icrc_encapsulated_counter[0x1]; |
| 8283 | u8 reserved_at_6e[0x8]; | 8295 | u8 reserved_at_6e[0x4]; |
| 8296 | u8 ptys_extended_ethernet[0x1]; | ||
| 8297 | u8 reserved_at_73[0x3]; | ||
| 8284 | u8 pfcc_mask[0x1]; | 8298 | u8 pfcc_mask[0x1]; |
| 8285 | u8 reserved_at_77[0x3]; | 8299 | u8 reserved_at_77[0x3]; |
| 8286 | u8 per_lane_error_counters[0x1]; | 8300 | u8 per_lane_error_counters[0x1]; |
| @@ -8459,9 +8473,17 @@ struct mlx5_ifc_pamp_reg_bits { | |||
| 8459 | struct mlx5_ifc_pcmr_reg_bits { | 8473 | struct mlx5_ifc_pcmr_reg_bits { |
| 8460 | u8 reserved_at_0[0x8]; | 8474 | u8 reserved_at_0[0x8]; |
| 8461 | u8 local_port[0x8]; | 8475 | u8 local_port[0x8]; |
| 8462 | u8 reserved_at_10[0x2e]; | 8476 | u8 reserved_at_10[0x10]; |
| 8477 | u8 entropy_force_cap[0x1]; | ||
| 8478 | u8 entropy_calc_cap[0x1]; | ||
| 8479 | u8 entropy_gre_calc_cap[0x1]; | ||
| 8480 | u8 reserved_at_23[0x1b]; | ||
| 8463 | u8 fcs_cap[0x1]; | 8481 | u8 fcs_cap[0x1]; |
| 8464 | u8 reserved_at_3f[0x1f]; | 8482 | u8 reserved_at_3f[0x1]; |
| 8483 | u8 entropy_force[0x1]; | ||
| 8484 | u8 entropy_calc[0x1]; | ||
| 8485 | u8 entropy_gre_calc[0x1]; | ||
| 8486 | u8 reserved_at_43[0x1b]; | ||
| 8465 | u8 fcs_chk[0x1]; | 8487 | u8 fcs_chk[0x1]; |
| 8466 | u8 reserved_at_5f[0x1]; | 8488 | u8 reserved_at_5f[0x1]; |
| 8467 | }; | 8489 | }; |
| @@ -8746,7 +8768,8 @@ struct mlx5_ifc_initial_seg_bits { | |||
| 8746 | u8 initializing[0x1]; | 8768 | u8 initializing[0x1]; |
| 8747 | u8 reserved_at_fe1[0x4]; | 8769 | u8 reserved_at_fe1[0x4]; |
| 8748 | u8 nic_interface_supported[0x3]; | 8770 | u8 nic_interface_supported[0x3]; |
| 8749 | u8 reserved_at_fe8[0x18]; | 8771 | u8 embedded_cpu[0x1]; |
| 8772 | u8 reserved_at_fe9[0x17]; | ||
| 8750 | 8773 | ||
| 8751 | struct mlx5_ifc_health_buffer_bits health_buffer; | 8774 | struct mlx5_ifc_health_buffer_bits health_buffer; |
| 8752 | 8775 | ||
| @@ -9513,4 +9536,44 @@ struct mlx5_ifc_mtrc_ctrl_bits { | |||
| 9513 | u8 reserved_at_80[0x180]; | 9536 | u8 reserved_at_80[0x180]; |
| 9514 | }; | 9537 | }; |
| 9515 | 9538 | ||
| 9539 | struct mlx5_ifc_host_params_context_bits { | ||
| 9540 | u8 host_number[0x8]; | ||
| 9541 | u8 reserved_at_8[0x8]; | ||
| 9542 | u8 host_num_of_vfs[0x10]; | ||
| 9543 | |||
| 9544 | u8 reserved_at_20[0x10]; | ||
| 9545 | u8 host_pci_bus[0x10]; | ||
| 9546 | |||
| 9547 | u8 reserved_at_40[0x10]; | ||
| 9548 | u8 host_pci_device[0x10]; | ||
| 9549 | |||
| 9550 | u8 reserved_at_60[0x10]; | ||
| 9551 | u8 host_pci_function[0x10]; | ||
| 9552 | |||
| 9553 | u8 reserved_at_80[0x180]; | ||
| 9554 | }; | ||
| 9555 | |||
| 9556 | struct mlx5_ifc_query_host_params_in_bits { | ||
| 9557 | u8 opcode[0x10]; | ||
| 9558 | u8 reserved_at_10[0x10]; | ||
| 9559 | |||
| 9560 | u8 reserved_at_20[0x10]; | ||
| 9561 | u8 op_mod[0x10]; | ||
| 9562 | |||
| 9563 | u8 reserved_at_40[0x40]; | ||
| 9564 | }; | ||
| 9565 | |||
| 9566 | struct mlx5_ifc_query_host_params_out_bits { | ||
| 9567 | u8 status[0x8]; | ||
| 9568 | u8 reserved_at_8[0x18]; | ||
| 9569 | |||
| 9570 | u8 syndrome[0x20]; | ||
| 9571 | |||
| 9572 | u8 reserved_at_40[0x40]; | ||
| 9573 | |||
| 9574 | struct mlx5_ifc_host_params_context_bits host_params_context; | ||
| 9575 | |||
| 9576 | u8 reserved_at_280[0x180]; | ||
| 9577 | }; | ||
| 9578 | |||
| 9516 | #endif /* MLX5_IFC_H */ | 9579 | #endif /* MLX5_IFC_H */ |
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h index bf4bc01ffb0c..64e78394fc9c 100644 --- a/include/linux/mlx5/port.h +++ b/include/linux/mlx5/port.h | |||
| @@ -92,6 +92,22 @@ enum mlx5e_link_mode { | |||
| 92 | MLX5E_LINK_MODES_NUMBER, | 92 | MLX5E_LINK_MODES_NUMBER, |
| 93 | }; | 93 | }; |
| 94 | 94 | ||
| 95 | enum mlx5e_ext_link_mode { | ||
| 96 | MLX5E_SGMII_100M = 0, | ||
| 97 | MLX5E_1000BASE_X_SGMII = 1, | ||
| 98 | MLX5E_5GBASE_R = 3, | ||
| 99 | MLX5E_10GBASE_XFI_XAUI_1 = 4, | ||
| 100 | MLX5E_40GBASE_XLAUI_4_XLPPI_4 = 5, | ||
| 101 | MLX5E_25GAUI_1_25GBASE_CR_KR = 6, | ||
| 102 | MLX5E_50GAUI_2_LAUI_2_50GBASE_CR2_KR2 = 7, | ||
| 103 | MLX5E_50GAUI_1_LAUI_1_50GBASE_CR_KR = 8, | ||
| 104 | MLX5E_CAUI_4_100GBASE_CR4_KR4 = 9, | ||
| 105 | MLX5E_100GAUI_2_100GBASE_CR2_KR2 = 10, | ||
| 106 | MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12, | ||
| 107 | MLX5E_400GAUI_8 = 15, | ||
| 108 | MLX5E_EXT_LINK_MODES_NUMBER, | ||
| 109 | }; | ||
| 110 | |||
| 95 | enum mlx5e_connector_type { | 111 | enum mlx5e_connector_type { |
| 96 | MLX5E_PORT_UNKNOWN = 0, | 112 | MLX5E_PORT_UNKNOWN = 0, |
| 97 | MLX5E_PORT_NONE = 1, | 113 | MLX5E_PORT_NONE = 1, |
| @@ -106,31 +122,23 @@ enum mlx5e_connector_type { | |||
| 106 | }; | 122 | }; |
| 107 | 123 | ||
| 108 | #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) | 124 | #define MLX5E_PROT_MASK(link_mode) (1 << link_mode) |
| 125 | #define MLX5_GET_ETH_PROTO(reg, out, ext, field) \ | ||
| 126 | (ext ? MLX5_GET(reg, out, ext_##field) : \ | ||
| 127 | MLX5_GET(reg, out, field)) | ||
| 109 | 128 | ||
| 110 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); | 129 | int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps); |
| 111 | int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, | 130 | int mlx5_query_port_ptys(struct mlx5_core_dev *dev, u32 *ptys, |
| 112 | int ptys_size, int proto_mask, u8 local_port); | 131 | int ptys_size, int proto_mask, u8 local_port); |
| 113 | int mlx5_query_port_proto_cap(struct mlx5_core_dev *dev, | ||
| 114 | u32 *proto_cap, int proto_mask); | ||
| 115 | int mlx5_query_port_proto_admin(struct mlx5_core_dev *dev, | ||
| 116 | u32 *proto_admin, int proto_mask); | ||
| 117 | int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, | 132 | int mlx5_query_port_link_width_oper(struct mlx5_core_dev *dev, |
| 118 | u8 *link_width_oper, u8 local_port); | 133 | u8 *link_width_oper, u8 local_port); |
| 119 | int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, | 134 | int mlx5_query_port_ib_proto_oper(struct mlx5_core_dev *dev, |
| 120 | u8 *proto_oper, u8 local_port); | 135 | u8 *proto_oper, u8 local_port); |
| 121 | int mlx5_query_port_eth_proto_oper(struct mlx5_core_dev *dev, | ||
| 122 | u32 *proto_oper, u8 local_port); | ||
| 123 | int mlx5_set_port_ptys(struct mlx5_core_dev *dev, bool an_disable, | ||
| 124 | u32 proto_admin, int proto_mask); | ||
| 125 | void mlx5_toggle_port_link(struct mlx5_core_dev *dev); | 136 | void mlx5_toggle_port_link(struct mlx5_core_dev *dev); |
| 126 | int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, | 137 | int mlx5_set_port_admin_status(struct mlx5_core_dev *dev, |
| 127 | enum mlx5_port_status status); | 138 | enum mlx5_port_status status); |
| 128 | int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, | 139 | int mlx5_query_port_admin_status(struct mlx5_core_dev *dev, |
| 129 | enum mlx5_port_status *status); | 140 | enum mlx5_port_status *status); |
| 130 | int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); | 141 | int mlx5_set_port_beacon(struct mlx5_core_dev *dev, u16 beacon_duration); |
| 131 | void mlx5_query_port_autoneg(struct mlx5_core_dev *dev, int proto_mask, | ||
| 132 | u8 *an_status, | ||
| 133 | u8 *an_disable_cap, u8 *an_disable_admin); | ||
| 134 | 142 | ||
| 135 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); | 143 | int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port); |
| 136 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); | 144 | void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port); |
| @@ -174,6 +182,8 @@ int mlx5_query_port_ets_rate_limit(struct mlx5_core_dev *mdev, | |||
| 174 | int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); | 182 | int mlx5_set_port_wol(struct mlx5_core_dev *mdev, u8 wol_mode); |
| 175 | int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); | 183 | int mlx5_query_port_wol(struct mlx5_core_dev *mdev, u8 *wol_mode); |
| 176 | 184 | ||
| 185 | int mlx5_query_ports_check(struct mlx5_core_dev *mdev, u32 *out, int outlen); | ||
| 186 | int mlx5_set_ports_check(struct mlx5_core_dev *mdev, u32 *in, int inlen); | ||
| 177 | int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); | 187 | int mlx5_set_port_fcs(struct mlx5_core_dev *mdev, u8 enable); |
| 178 | void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, | 188 | void mlx5_query_port_fcs(struct mlx5_core_dev *mdev, bool *supported, |
| 179 | bool *enabled); | 189 | bool *enabled); |
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h index b26ea9077384..0343c81d4c5f 100644 --- a/include/linux/mlx5/qp.h +++ b/include/linux/mlx5/qp.h | |||
| @@ -557,7 +557,8 @@ static inline struct mlx5_core_mkey *__mlx5_mr_lookup(struct mlx5_core_dev *dev, | |||
| 557 | 557 | ||
| 558 | int mlx5_core_create_dct(struct mlx5_core_dev *dev, | 558 | int mlx5_core_create_dct(struct mlx5_core_dev *dev, |
| 559 | struct mlx5_core_dct *qp, | 559 | struct mlx5_core_dct *qp, |
| 560 | u32 *in, int inlen); | 560 | u32 *in, int inlen, |
| 561 | u32 *out, int outlen); | ||
| 561 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, | 562 | int mlx5_core_create_qp(struct mlx5_core_dev *dev, |
| 562 | struct mlx5_core_qp *qp, | 563 | struct mlx5_core_qp *qp, |
| 563 | u32 *in, | 564 | u32 *in, |
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h index 9c694808c212..0eef548b9946 100644 --- a/include/linux/mlx5/vport.h +++ b/include/linux/mlx5/vport.h | |||
| @@ -36,15 +36,38 @@ | |||
| 36 | #include <linux/mlx5/driver.h> | 36 | #include <linux/mlx5/driver.h> |
| 37 | #include <linux/mlx5/device.h> | 37 | #include <linux/mlx5/device.h> |
| 38 | 38 | ||
| 39 | #define MLX5_VPORT_PF_PLACEHOLDER (1u) | ||
| 40 | #define MLX5_VPORT_UPLINK_PLACEHOLDER (1u) | ||
| 41 | #define MLX5_VPORT_ECPF_PLACEHOLDER(mdev) (mlx5_ecpf_vport_exists(mdev)) | ||
| 42 | |||
| 43 | #define MLX5_SPECIAL_VPORTS(mdev) (MLX5_VPORT_PF_PLACEHOLDER + \ | ||
| 44 | MLX5_VPORT_UPLINK_PLACEHOLDER + \ | ||
| 45 | MLX5_VPORT_ECPF_PLACEHOLDER(mdev)) | ||
| 46 | |||
| 47 | #define MLX5_TOTAL_VPORTS(mdev) (MLX5_SPECIAL_VPORTS(mdev) + \ | ||
| 48 | mlx5_core_max_vfs(mdev)) | ||
| 49 | |||
| 50 | #define MLX5_VPORT_MANAGER(mdev) \ | ||
| 51 | (MLX5_CAP_GEN(mdev, vport_group_manager) && \ | ||
| 52 | (MLX5_CAP_GEN(mdev, port_type) == MLX5_CAP_PORT_TYPE_ETH) && \ | ||
| 53 | mlx5_core_is_pf(mdev)) | ||
| 54 | |||
| 39 | enum { | 55 | enum { |
| 40 | MLX5_CAP_INLINE_MODE_L2, | 56 | MLX5_CAP_INLINE_MODE_L2, |
| 41 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, | 57 | MLX5_CAP_INLINE_MODE_VPORT_CONTEXT, |
| 42 | MLX5_CAP_INLINE_MODE_NOT_REQUIRED, | 58 | MLX5_CAP_INLINE_MODE_NOT_REQUIRED, |
| 43 | }; | 59 | }; |
| 44 | 60 | ||
| 61 | enum { | ||
| 62 | MLX5_VPORT_PF = 0x0, | ||
| 63 | MLX5_VPORT_FIRST_VF = 0x1, | ||
| 64 | MLX5_VPORT_ECPF = 0xfffe, | ||
| 65 | MLX5_VPORT_UPLINK = 0xffff | ||
| 66 | }; | ||
| 67 | |||
| 45 | u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); | 68 | u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport); |
| 46 | int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, | 69 | int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod, |
| 47 | u16 vport, u8 state); | 70 | u16 vport, u8 other_vport, u8 state); |
| 48 | int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, | 71 | int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev, |
| 49 | u16 vport, u8 *addr); | 72 | u16 vport, u8 *addr); |
| 50 | int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, | 73 | int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev, |
| @@ -60,7 +83,7 @@ int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev, | |||
| 60 | u64 *system_image_guid); | 83 | u64 *system_image_guid); |
| 61 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); | 84 | int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid); |
| 62 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, | 85 | int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev, |
| 63 | u32 vport, u64 node_guid); | 86 | u16 vport, u64 node_guid); |
| 64 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, | 87 | int mlx5_query_nic_vport_qkey_viol_cntr(struct mlx5_core_dev *mdev, |
| 65 | u16 *qkey_viol_cntr); | 88 | u16 *qkey_viol_cntr); |
| 66 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, | 89 | int mlx5_query_hca_vport_gid(struct mlx5_core_dev *dev, u8 other_vport, |
| @@ -78,7 +101,7 @@ int mlx5_query_hca_vport_system_image_guid(struct mlx5_core_dev *dev, | |||
| 78 | int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, | 101 | int mlx5_query_hca_vport_node_guid(struct mlx5_core_dev *dev, |
| 79 | u64 *node_guid); | 102 | u64 *node_guid); |
| 80 | int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, | 103 | int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev, |
| 81 | u32 vport, | 104 | u16 vport, |
| 82 | enum mlx5_list_type list_type, | 105 | enum mlx5_list_type list_type, |
| 83 | u8 addr_list[][ETH_ALEN], | 106 | u8 addr_list[][ETH_ALEN], |
| 84 | int *list_size); | 107 | int *list_size); |
| @@ -87,7 +110,7 @@ int mlx5_modify_nic_vport_mac_list(struct mlx5_core_dev *dev, | |||
| 87 | u8 addr_list[][ETH_ALEN], | 110 | u8 addr_list[][ETH_ALEN], |
| 88 | int list_size); | 111 | int list_size); |
| 89 | int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, | 112 | int mlx5_query_nic_vport_promisc(struct mlx5_core_dev *mdev, |
| 90 | u32 vport, | 113 | u16 vport, |
| 91 | int *promisc_uc, | 114 | int *promisc_uc, |
| 92 | int *promisc_mc, | 115 | int *promisc_mc, |
| 93 | int *promisc_all); | 116 | int *promisc_all); |
| @@ -96,7 +119,7 @@ int mlx5_modify_nic_vport_promisc(struct mlx5_core_dev *mdev, | |||
| 96 | int promisc_mc, | 119 | int promisc_mc, |
| 97 | int promisc_all); | 120 | int promisc_all); |
| 98 | int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, | 121 | int mlx5_query_nic_vport_vlans(struct mlx5_core_dev *dev, |
| 99 | u32 vport, | 122 | u16 vport, |
| 100 | u16 vlans[], | 123 | u16 vlans[], |
| 101 | int *size); | 124 | int *size); |
| 102 | int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, | 125 | int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, |
| @@ -106,7 +129,7 @@ int mlx5_modify_nic_vport_vlans(struct mlx5_core_dev *dev, | |||
| 106 | int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); | 129 | int mlx5_nic_vport_enable_roce(struct mlx5_core_dev *mdev); |
| 107 | int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); | 130 | int mlx5_nic_vport_disable_roce(struct mlx5_core_dev *mdev); |
| 108 | int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, | 131 | int mlx5_query_vport_down_stats(struct mlx5_core_dev *mdev, u16 vport, |
| 109 | u64 *rx_discard_vport_down, | 132 | u8 other_vport, u64 *rx_discard_vport_down, |
| 110 | u64 *tx_discard_vport_down); | 133 | u64 *tx_discard_vport_down); |
| 111 | int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, | 134 | int mlx5_core_query_vport_counter(struct mlx5_core_dev *dev, u8 other_vport, |
| 112 | int vf, u8 port_num, void *out, | 135 | int vf, u8 port_num, void *out, |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 80bb6408fe73..6b10c21630f5 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
| @@ -26,6 +26,7 @@ | |||
| 26 | #include <linux/page_ref.h> | 26 | #include <linux/page_ref.h> |
| 27 | #include <linux/memremap.h> | 27 | #include <linux/memremap.h> |
| 28 | #include <linux/overflow.h> | 28 | #include <linux/overflow.h> |
| 29 | #include <linux/sizes.h> | ||
| 29 | 30 | ||
| 30 | struct mempolicy; | 31 | struct mempolicy; |
| 31 | struct anon_vma; | 32 | struct anon_vma; |
| @@ -965,6 +966,10 @@ static inline bool is_pci_p2pdma_page(const struct page *page) | |||
| 965 | } | 966 | } |
| 966 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ | 967 | #endif /* CONFIG_DEV_PAGEMAP_OPS */ |
| 967 | 968 | ||
| 969 | /* 127: arbitrary random number, small enough to assemble well */ | ||
| 970 | #define page_ref_zero_or_close_to_overflow(page) \ | ||
| 971 | ((unsigned int) page_ref_count(page) + 127u <= 127u) | ||
| 972 | |||
| 968 | static inline void get_page(struct page *page) | 973 | static inline void get_page(struct page *page) |
| 969 | { | 974 | { |
| 970 | page = compound_head(page); | 975 | page = compound_head(page); |
| @@ -972,10 +977,19 @@ static inline void get_page(struct page *page) | |||
| 972 | * Getting a normal page or the head of a compound page | 977 | * Getting a normal page or the head of a compound page |
| 973 | * requires to already have an elevated page->_refcount. | 978 | * requires to already have an elevated page->_refcount. |
| 974 | */ | 979 | */ |
| 975 | VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page); | 980 | VM_BUG_ON_PAGE(page_ref_zero_or_close_to_overflow(page), page); |
| 976 | page_ref_inc(page); | 981 | page_ref_inc(page); |
| 977 | } | 982 | } |
| 978 | 983 | ||
| 984 | static inline __must_check bool try_get_page(struct page *page) | ||
| 985 | { | ||
| 986 | page = compound_head(page); | ||
| 987 | if (WARN_ON_ONCE(page_ref_count(page) <= 0)) | ||
| 988 | return false; | ||
| 989 | page_ref_inc(page); | ||
| 990 | return true; | ||
| 991 | } | ||
| 992 | |||
| 979 | static inline void put_page(struct page *page) | 993 | static inline void put_page(struct page *page) |
| 980 | { | 994 | { |
| 981 | page = compound_head(page); | 995 | page = compound_head(page); |
| @@ -1323,52 +1337,6 @@ static inline void clear_page_pfmemalloc(struct page *page) | |||
| 1323 | } | 1337 | } |
| 1324 | 1338 | ||
| 1325 | /* | 1339 | /* |
| 1326 | * Different kinds of faults, as returned by handle_mm_fault(). | ||
| 1327 | * Used to decide whether a process gets delivered SIGBUS or | ||
| 1328 | * just gets major/minor fault counters bumped up. | ||
| 1329 | */ | ||
| 1330 | |||
| 1331 | #define VM_FAULT_OOM 0x0001 | ||
| 1332 | #define VM_FAULT_SIGBUS 0x0002 | ||
| 1333 | #define VM_FAULT_MAJOR 0x0004 | ||
| 1334 | #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ | ||
| 1335 | #define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ | ||
| 1336 | #define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ | ||
| 1337 | #define VM_FAULT_SIGSEGV 0x0040 | ||
| 1338 | |||
| 1339 | #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ | ||
| 1340 | #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ | ||
| 1341 | #define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ | ||
| 1342 | #define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */ | ||
| 1343 | #define VM_FAULT_DONE_COW 0x1000 /* ->fault has fully handled COW */ | ||
| 1344 | #define VM_FAULT_NEEDDSYNC 0x2000 /* ->fault did not modify page tables | ||
| 1345 | * and needs fsync() to complete (for | ||
| 1346 | * synchronous page faults in DAX) */ | ||
| 1347 | |||
| 1348 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \ | ||
| 1349 | VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \ | ||
| 1350 | VM_FAULT_FALLBACK) | ||
| 1351 | |||
| 1352 | #define VM_FAULT_RESULT_TRACE \ | ||
| 1353 | { VM_FAULT_OOM, "OOM" }, \ | ||
| 1354 | { VM_FAULT_SIGBUS, "SIGBUS" }, \ | ||
| 1355 | { VM_FAULT_MAJOR, "MAJOR" }, \ | ||
| 1356 | { VM_FAULT_WRITE, "WRITE" }, \ | ||
| 1357 | { VM_FAULT_HWPOISON, "HWPOISON" }, \ | ||
| 1358 | { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ | ||
| 1359 | { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ | ||
| 1360 | { VM_FAULT_NOPAGE, "NOPAGE" }, \ | ||
| 1361 | { VM_FAULT_LOCKED, "LOCKED" }, \ | ||
| 1362 | { VM_FAULT_RETRY, "RETRY" }, \ | ||
| 1363 | { VM_FAULT_FALLBACK, "FALLBACK" }, \ | ||
| 1364 | { VM_FAULT_DONE_COW, "DONE_COW" }, \ | ||
| 1365 | { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } | ||
| 1366 | |||
| 1367 | /* Encode hstate index for a hwpoisoned large page */ | ||
| 1368 | #define VM_FAULT_SET_HINDEX(x) ((x) << 12) | ||
| 1369 | #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) | ||
| 1370 | |||
| 1371 | /* | ||
| 1372 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. | 1340 | * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. |
| 1373 | */ | 1341 | */ |
| 1374 | extern void pagefault_out_of_memory(void); | 1342 | extern void pagefault_out_of_memory(void); |
| @@ -1536,7 +1504,8 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, | |||
| 1536 | unsigned int gup_flags, struct page **pages, int *locked); | 1504 | unsigned int gup_flags, struct page **pages, int *locked); |
| 1537 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, | 1505 | long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, |
| 1538 | struct page **pages, unsigned int gup_flags); | 1506 | struct page **pages, unsigned int gup_flags); |
| 1539 | #ifdef CONFIG_FS_DAX | 1507 | |
| 1508 | #if defined(CONFIG_FS_DAX) || defined(CONFIG_CMA) | ||
| 1540 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, | 1509 | long get_user_pages_longterm(unsigned long start, unsigned long nr_pages, |
| 1541 | unsigned int gup_flags, struct page **pages, | 1510 | unsigned int gup_flags, struct page **pages, |
| 1542 | struct vm_area_struct **vmas); | 1511 | struct vm_area_struct **vmas); |
| @@ -2447,8 +2416,7 @@ int __must_check write_one_page(struct page *page); | |||
| 2447 | void task_dirty_inc(struct task_struct *tsk); | 2416 | void task_dirty_inc(struct task_struct *tsk); |
| 2448 | 2417 | ||
| 2449 | /* readahead.c */ | 2418 | /* readahead.c */ |
| 2450 | #define VM_MAX_READAHEAD 128 /* kbytes */ | 2419 | #define VM_READAHEAD_PAGES (SZ_128K / PAGE_SIZE) |
| 2451 | #define VM_MIN_READAHEAD 16 /* kbytes (includes current page) */ | ||
| 2452 | 2420 | ||
| 2453 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, | 2421 | int force_page_cache_readahead(struct address_space *mapping, struct file *filp, |
| 2454 | pgoff_t offset, unsigned long nr_to_read); | 2422 | pgoff_t offset, unsigned long nr_to_read); |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 2c471a2c43fa..4ef4bbe78a1d 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
| @@ -22,7 +22,6 @@ | |||
| 22 | #endif | 22 | #endif |
| 23 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) | 23 | #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1)) |
| 24 | 24 | ||
| 25 | typedef int vm_fault_t; | ||
| 26 | 25 | ||
| 27 | struct address_space; | 26 | struct address_space; |
| 28 | struct mem_cgroup; | 27 | struct mem_cgroup; |
| @@ -80,7 +79,7 @@ struct page { | |||
| 80 | struct { /* Page cache and anonymous pages */ | 79 | struct { /* Page cache and anonymous pages */ |
| 81 | /** | 80 | /** |
| 82 | * @lru: Pageout list, eg. active_list protected by | 81 | * @lru: Pageout list, eg. active_list protected by |
| 83 | * zone_lru_lock. Sometimes used as a generic list | 82 | * pgdat->lru_lock. Sometimes used as a generic list |
| 84 | * by the page owner. | 83 | * by the page owner. |
| 85 | */ | 84 | */ |
| 86 | struct list_head lru; | 85 | struct list_head lru; |
| @@ -95,6 +94,13 @@ struct page { | |||
| 95 | */ | 94 | */ |
| 96 | unsigned long private; | 95 | unsigned long private; |
| 97 | }; | 96 | }; |
| 97 | struct { /* page_pool used by netstack */ | ||
| 98 | /** | ||
| 99 | * @dma_addr: might require a 64-bit value even on | ||
| 100 | * 32-bit architectures. | ||
| 101 | */ | ||
| 102 | dma_addr_t dma_addr; | ||
| 103 | }; | ||
| 98 | struct { /* slab, slob and slub */ | 104 | struct { /* slab, slob and slub */ |
| 99 | union { | 105 | union { |
| 100 | struct list_head slab_list; /* uses lru */ | 106 | struct list_head slab_list; /* uses lru */ |
| @@ -405,7 +411,7 @@ struct mm_struct { | |||
| 405 | 411 | ||
| 406 | unsigned long total_vm; /* Total pages mapped */ | 412 | unsigned long total_vm; /* Total pages mapped */ |
| 407 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ | 413 | unsigned long locked_vm; /* Pages that have PG_mlocked set */ |
| 408 | unsigned long pinned_vm; /* Refcount permanently increased */ | 414 | atomic64_t pinned_vm; /* Refcount permanently increased */ |
| 409 | unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ | 415 | unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */ |
| 410 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ | 416 | unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */ |
| 411 | unsigned long stack_vm; /* VM_STACK */ | 417 | unsigned long stack_vm; /* VM_STACK */ |
| @@ -614,6 +620,78 @@ static inline bool mm_tlb_flush_nested(struct mm_struct *mm) | |||
| 614 | 620 | ||
| 615 | struct vm_fault; | 621 | struct vm_fault; |
| 616 | 622 | ||
| 623 | /** | ||
| 624 | * typedef vm_fault_t - Return type for page fault handlers. | ||
| 625 | * | ||
| 626 | * Page fault handlers return a bitmask of %VM_FAULT values. | ||
| 627 | */ | ||
| 628 | typedef __bitwise unsigned int vm_fault_t; | ||
| 629 | |||
| 630 | /** | ||
| 631 | * enum vm_fault_reason - Page fault handlers return a bitmask of | ||
| 632 | * these values to tell the core VM what happened when handling the | ||
| 633 | * fault. Used to decide whether a process gets delivered SIGBUS or | ||
| 634 | * just gets major/minor fault counters bumped up. | ||
| 635 | * | ||
| 636 | * @VM_FAULT_OOM: Out Of Memory | ||
| 637 | * @VM_FAULT_SIGBUS: Bad access | ||
| 638 | * @VM_FAULT_MAJOR: Page read from storage | ||
| 639 | * @VM_FAULT_WRITE: Special case for get_user_pages | ||
| 640 | * @VM_FAULT_HWPOISON: Hit poisoned small page | ||
| 641 | * @VM_FAULT_HWPOISON_LARGE: Hit poisoned large page. Index encoded | ||
| 642 | * in upper bits | ||
| 643 | * @VM_FAULT_SIGSEGV: segmentation fault | ||
| 644 | * @VM_FAULT_NOPAGE: ->fault installed the pte, not return page | ||
| 645 | * @VM_FAULT_LOCKED: ->fault locked the returned page | ||
| 646 | * @VM_FAULT_RETRY: ->fault blocked, must retry | ||
| 647 | * @VM_FAULT_FALLBACK: huge page fault failed, fall back to small | ||
| 648 | * @VM_FAULT_DONE_COW: ->fault has fully handled COW | ||
| 649 | * @VM_FAULT_NEEDDSYNC: ->fault did not modify page tables and needs | ||
| 650 | * fsync() to complete (for synchronous page faults | ||
| 651 | * in DAX) | ||
| 652 | * @VM_FAULT_HINDEX_MASK: mask HINDEX value | ||
| 653 | * | ||
| 654 | */ | ||
| 655 | enum vm_fault_reason { | ||
| 656 | VM_FAULT_OOM = (__force vm_fault_t)0x000001, | ||
| 657 | VM_FAULT_SIGBUS = (__force vm_fault_t)0x000002, | ||
| 658 | VM_FAULT_MAJOR = (__force vm_fault_t)0x000004, | ||
| 659 | VM_FAULT_WRITE = (__force vm_fault_t)0x000008, | ||
| 660 | VM_FAULT_HWPOISON = (__force vm_fault_t)0x000010, | ||
| 661 | VM_FAULT_HWPOISON_LARGE = (__force vm_fault_t)0x000020, | ||
| 662 | VM_FAULT_SIGSEGV = (__force vm_fault_t)0x000040, | ||
| 663 | VM_FAULT_NOPAGE = (__force vm_fault_t)0x000100, | ||
| 664 | VM_FAULT_LOCKED = (__force vm_fault_t)0x000200, | ||
| 665 | VM_FAULT_RETRY = (__force vm_fault_t)0x000400, | ||
| 666 | VM_FAULT_FALLBACK = (__force vm_fault_t)0x000800, | ||
| 667 | VM_FAULT_DONE_COW = (__force vm_fault_t)0x001000, | ||
| 668 | VM_FAULT_NEEDDSYNC = (__force vm_fault_t)0x002000, | ||
| 669 | VM_FAULT_HINDEX_MASK = (__force vm_fault_t)0x0f0000, | ||
| 670 | }; | ||
| 671 | |||
| 672 | /* Encode hstate index for a hwpoisoned large page */ | ||
| 673 | #define VM_FAULT_SET_HINDEX(x) ((__force vm_fault_t)((x) << 16)) | ||
| 674 | #define VM_FAULT_GET_HINDEX(x) (((__force unsigned int)(x) >> 16) & 0xf) | ||
| 675 | |||
| 676 | #define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | \ | ||
| 677 | VM_FAULT_SIGSEGV | VM_FAULT_HWPOISON | \ | ||
| 678 | VM_FAULT_HWPOISON_LARGE | VM_FAULT_FALLBACK) | ||
| 679 | |||
| 680 | #define VM_FAULT_RESULT_TRACE \ | ||
| 681 | { VM_FAULT_OOM, "OOM" }, \ | ||
| 682 | { VM_FAULT_SIGBUS, "SIGBUS" }, \ | ||
| 683 | { VM_FAULT_MAJOR, "MAJOR" }, \ | ||
| 684 | { VM_FAULT_WRITE, "WRITE" }, \ | ||
| 685 | { VM_FAULT_HWPOISON, "HWPOISON" }, \ | ||
| 686 | { VM_FAULT_HWPOISON_LARGE, "HWPOISON_LARGE" }, \ | ||
| 687 | { VM_FAULT_SIGSEGV, "SIGSEGV" }, \ | ||
| 688 | { VM_FAULT_NOPAGE, "NOPAGE" }, \ | ||
| 689 | { VM_FAULT_LOCKED, "LOCKED" }, \ | ||
| 690 | { VM_FAULT_RETRY, "RETRY" }, \ | ||
| 691 | { VM_FAULT_FALLBACK, "FALLBACK" }, \ | ||
| 692 | { VM_FAULT_DONE_COW, "DONE_COW" }, \ | ||
| 693 | { VM_FAULT_NEEDDSYNC, "NEEDDSYNC" } | ||
| 694 | |||
| 617 | struct vm_special_mapping { | 695 | struct vm_special_mapping { |
| 618 | const char *name; /* The name, e.g. "[vdso]". */ | 696 | const char *name; /* The name, e.g. "[vdso]". */ |
| 619 | 697 | ||
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h index de7377815b6b..19566ab9decb 100644 --- a/include/linux/mmc/card.h +++ b/include/linux/mmc/card.h | |||
| @@ -133,6 +133,8 @@ struct mmc_ext_csd { | |||
| 133 | struct sd_scr { | 133 | struct sd_scr { |
| 134 | unsigned char sda_vsn; | 134 | unsigned char sda_vsn; |
| 135 | unsigned char sda_spec3; | 135 | unsigned char sda_spec3; |
| 136 | unsigned char sda_spec4; | ||
| 137 | unsigned char sda_specx; | ||
| 136 | unsigned char bus_widths; | 138 | unsigned char bus_widths; |
| 137 | #define SD_SCR_BUS_WIDTH_1 (1<<0) | 139 | #define SD_SCR_BUS_WIDTH_1 (1<<0) |
| 138 | #define SD_SCR_BUS_WIDTH_4 (1<<2) | 140 | #define SD_SCR_BUS_WIDTH_4 (1<<2) |
| @@ -277,6 +279,7 @@ struct mmc_card { | |||
| 277 | unsigned int erase_shift; /* if erase unit is power 2 */ | 279 | unsigned int erase_shift; /* if erase unit is power 2 */ |
| 278 | unsigned int pref_erase; /* in sectors */ | 280 | unsigned int pref_erase; /* in sectors */ |
| 279 | unsigned int eg_boundary; /* don't cross erase-group boundaries */ | 281 | unsigned int eg_boundary; /* don't cross erase-group boundaries */ |
| 282 | unsigned int erase_arg; /* erase / trim / discard */ | ||
| 280 | u8 erased_byte; /* value of erased bytes */ | 283 | u8 erased_byte; /* value of erased bytes */ |
| 281 | 284 | ||
| 282 | u32 raw_cid[4]; /* raw card CID */ | 285 | u32 raw_cid[4]; /* raw card CID */ |
| @@ -308,6 +311,7 @@ struct mmc_card { | |||
| 308 | unsigned int nr_parts; | 311 | unsigned int nr_parts; |
| 309 | 312 | ||
| 310 | unsigned int bouncesz; /* Bounce buffer size */ | 313 | unsigned int bouncesz; /* Bounce buffer size */ |
| 314 | struct workqueue_struct *complete_wq; /* Private workqueue */ | ||
| 311 | }; | 315 | }; |
| 312 | 316 | ||
| 313 | static inline bool mmc_large_sector(struct mmc_card *card) | 317 | static inline bool mmc_large_sector(struct mmc_card *card) |
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h index 4d35ff36ceff..43d0f0c496f6 100644 --- a/include/linux/mmc/host.h +++ b/include/linux/mmc/host.h | |||
| @@ -478,6 +478,11 @@ static inline void *mmc_priv(struct mmc_host *host) | |||
| 478 | return (void *)host->private; | 478 | return (void *)host->private; |
| 479 | } | 479 | } |
| 480 | 480 | ||
| 481 | static inline struct mmc_host *mmc_from_priv(void *priv) | ||
| 482 | { | ||
| 483 | return container_of(priv, struct mmc_host, private); | ||
| 484 | } | ||
| 485 | |||
| 481 | #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) | 486 | #define mmc_host_is_spi(host) ((host)->caps & MMC_CAP_SPI) |
| 482 | 487 | ||
| 483 | #define mmc_dev(x) ((x)->parent) | 488 | #define mmc_dev(x) ((x)->parent) |
| @@ -502,17 +507,11 @@ void sdio_run_irqs(struct mmc_host *host); | |||
| 502 | void sdio_signal_irq(struct mmc_host *host); | 507 | void sdio_signal_irq(struct mmc_host *host); |
| 503 | 508 | ||
| 504 | #ifdef CONFIG_REGULATOR | 509 | #ifdef CONFIG_REGULATOR |
| 505 | int mmc_regulator_get_ocrmask(struct regulator *supply); | ||
| 506 | int mmc_regulator_set_ocr(struct mmc_host *mmc, | 510 | int mmc_regulator_set_ocr(struct mmc_host *mmc, |
| 507 | struct regulator *supply, | 511 | struct regulator *supply, |
| 508 | unsigned short vdd_bit); | 512 | unsigned short vdd_bit); |
| 509 | int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); | 513 | int mmc_regulator_set_vqmmc(struct mmc_host *mmc, struct mmc_ios *ios); |
| 510 | #else | 514 | #else |
| 511 | static inline int mmc_regulator_get_ocrmask(struct regulator *supply) | ||
| 512 | { | ||
| 513 | return 0; | ||
| 514 | } | ||
| 515 | |||
| 516 | static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, | 515 | static inline int mmc_regulator_set_ocr(struct mmc_host *mmc, |
| 517 | struct regulator *supply, | 516 | struct regulator *supply, |
| 518 | unsigned short vdd_bit) | 517 | unsigned short vdd_bit) |
| @@ -527,7 +526,6 @@ static inline int mmc_regulator_set_vqmmc(struct mmc_host *mmc, | |||
| 527 | } | 526 | } |
| 528 | #endif | 527 | #endif |
| 529 | 528 | ||
| 530 | u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max); | ||
| 531 | int mmc_regulator_get_supply(struct mmc_host *mmc); | 529 | int mmc_regulator_get_supply(struct mmc_host *mmc); |
| 532 | 530 | ||
| 533 | static inline int mmc_card_is_removable(struct mmc_host *host) | 531 | static inline int mmc_card_is_removable(struct mmc_host *host) |
diff --git a/include/linux/mmc/sd.h b/include/linux/mmc/sd.h index 1ebcf9ba1256..ec94a5aa02bb 100644 --- a/include/linux/mmc/sd.h +++ b/include/linux/mmc/sd.h | |||
| @@ -91,4 +91,10 @@ | |||
| 91 | #define SD_SWITCH_ACCESS_DEF 0 | 91 | #define SD_SWITCH_ACCESS_DEF 0 |
| 92 | #define SD_SWITCH_ACCESS_HS 1 | 92 | #define SD_SWITCH_ACCESS_HS 1 |
| 93 | 93 | ||
| 94 | /* | ||
| 95 | * Erase/discard | ||
| 96 | */ | ||
| 97 | #define SD_ERASE_ARG 0x00000000 | ||
| 98 | #define SD_DISCARD_ARG 0x00000001 | ||
| 99 | |||
| 94 | #endif /* LINUX_MMC_SD_H */ | 100 | #endif /* LINUX_MMC_SD_H */ |
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h index feebd7aa6f5c..9fd3ce64a885 100644 --- a/include/linux/mmc/slot-gpio.h +++ b/include/linux/mmc/slot-gpio.h | |||
| @@ -22,7 +22,7 @@ int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, | |||
| 22 | unsigned int idx, bool override_active_level, | 22 | unsigned int idx, bool override_active_level, |
| 23 | unsigned int debounce, bool *gpio_invert); | 23 | unsigned int debounce, bool *gpio_invert); |
| 24 | int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, | 24 | int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id, |
| 25 | unsigned int idx, bool override_active_level, | 25 | unsigned int idx, |
| 26 | unsigned int debounce, bool *gpio_invert); | 26 | unsigned int debounce, bool *gpio_invert); |
| 27 | void mmc_gpio_set_cd_isr(struct mmc_host *host, | 27 | void mmc_gpio_set_cd_isr(struct mmc_host *host, |
| 28 | irqreturn_t (*isr)(int irq, void *dev_id)); | 28 | irqreturn_t (*isr)(int irq, void *dev_id)); |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cc4a507d7ca4..fba7741533be 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
| @@ -480,6 +480,8 @@ struct zone { | |||
| 480 | unsigned long compact_cached_free_pfn; | 480 | unsigned long compact_cached_free_pfn; |
| 481 | /* pfn where async and sync compaction migration scanner should start */ | 481 | /* pfn where async and sync compaction migration scanner should start */ |
| 482 | unsigned long compact_cached_migrate_pfn[2]; | 482 | unsigned long compact_cached_migrate_pfn[2]; |
| 483 | unsigned long compact_init_migrate_pfn; | ||
| 484 | unsigned long compact_init_free_pfn; | ||
| 483 | #endif | 485 | #endif |
| 484 | 486 | ||
| 485 | #ifdef CONFIG_COMPACTION | 487 | #ifdef CONFIG_COMPACTION |
| @@ -520,6 +522,12 @@ enum pgdat_flags { | |||
| 520 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ | 522 | PGDAT_RECLAIM_LOCKED, /* prevents concurrent reclaim */ |
| 521 | }; | 523 | }; |
| 522 | 524 | ||
| 525 | enum zone_flags { | ||
| 526 | ZONE_BOOSTED_WATERMARK, /* zone recently boosted watermarks. | ||
| 527 | * Cleared when kswapd is woken. | ||
| 528 | */ | ||
| 529 | }; | ||
| 530 | |||
| 523 | static inline unsigned long zone_managed_pages(struct zone *zone) | 531 | static inline unsigned long zone_managed_pages(struct zone *zone) |
| 524 | { | 532 | { |
| 525 | return (unsigned long)atomic_long_read(&zone->managed_pages); | 533 | return (unsigned long)atomic_long_read(&zone->managed_pages); |
| @@ -722,10 +730,6 @@ typedef struct pglist_data { | |||
| 722 | 730 | ||
| 723 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | 731 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
| 724 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) | 732 | #define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid)) |
| 725 | static inline spinlock_t *zone_lru_lock(struct zone *zone) | ||
| 726 | { | ||
| 727 | return &zone->zone_pgdat->lru_lock; | ||
| 728 | } | ||
| 729 | 733 | ||
| 730 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) | 734 | static inline struct lruvec *node_lruvec(struct pglist_data *pgdat) |
| 731 | { | 735 | { |
| @@ -1293,7 +1297,7 @@ void memory_present(int nid, unsigned long start, unsigned long end); | |||
| 1293 | 1297 | ||
| 1294 | /* | 1298 | /* |
| 1295 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we | 1299 | * If it is possible to have holes within a MAX_ORDER_NR_PAGES, then we |
| 1296 | * need to check pfn validility within that MAX_ORDER_NR_PAGES block. | 1300 | * need to check pfn validity within that MAX_ORDER_NR_PAGES block. |
| 1297 | * pfn_valid_within() should be used in this case; we optimise this away | 1301 | * pfn_valid_within() should be used in this case; we optimise this away |
| 1298 | * when we have no holes within a MAX_ORDER_NR_PAGES block. | 1302 | * when we have no holes within a MAX_ORDER_NR_PAGES block. |
| 1299 | */ | 1303 | */ |
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h index f9bd2f34b99f..448621c32e4d 100644 --- a/include/linux/mod_devicetable.h +++ b/include/linux/mod_devicetable.h | |||
| @@ -779,4 +779,25 @@ struct typec_device_id { | |||
| 779 | kernel_ulong_t driver_data; | 779 | kernel_ulong_t driver_data; |
| 780 | }; | 780 | }; |
| 781 | 781 | ||
| 782 | /** | ||
| 783 | * struct tee_client_device_id - tee based device identifier | ||
| 784 | * @uuid: For TEE based client devices we use the device uuid as | ||
| 785 | * the identifier. | ||
| 786 | */ | ||
| 787 | struct tee_client_device_id { | ||
| 788 | uuid_t uuid; | ||
| 789 | }; | ||
| 790 | |||
| 791 | /* WMI */ | ||
| 792 | |||
| 793 | #define WMI_MODULE_PREFIX "wmi:" | ||
| 794 | |||
| 795 | /** | ||
| 796 | * struct wmi_device_id - WMI device identifier | ||
| 797 | * @guid_string: 36 char string of the form fa50ff2b-f2e8-45de-83fa-65417f2f49ba | ||
| 798 | */ | ||
| 799 | struct wmi_device_id { | ||
| 800 | const char guid_string[UUID_STRING_LEN+1]; | ||
| 801 | }; | ||
| 802 | |||
| 782 | #endif /* LINUX_MOD_DEVICETABLE_H */ | 803 | #endif /* LINUX_MOD_DEVICETABLE_H */ |
diff --git a/include/linux/module.h b/include/linux/module.h index 9a21fe3509af..5bf5dcd91009 100644 --- a/include/linux/module.h +++ b/include/linux/module.h | |||
| @@ -129,13 +129,13 @@ extern void cleanup_module(void); | |||
| 129 | #define module_init(initfn) \ | 129 | #define module_init(initfn) \ |
| 130 | static inline initcall_t __maybe_unused __inittest(void) \ | 130 | static inline initcall_t __maybe_unused __inittest(void) \ |
| 131 | { return initfn; } \ | 131 | { return initfn; } \ |
| 132 | int init_module(void) __attribute__((alias(#initfn))); | 132 | int init_module(void) __copy(initfn) __attribute__((alias(#initfn))); |
| 133 | 133 | ||
| 134 | /* This is only required if you want to be unloadable. */ | 134 | /* This is only required if you want to be unloadable. */ |
| 135 | #define module_exit(exitfn) \ | 135 | #define module_exit(exitfn) \ |
| 136 | static inline exitcall_t __maybe_unused __exittest(void) \ | 136 | static inline exitcall_t __maybe_unused __exittest(void) \ |
| 137 | { return exitfn; } \ | 137 | { return exitfn; } \ |
| 138 | void cleanup_module(void) __attribute__((alias(#exitfn))); | 138 | void cleanup_module(void) __copy(exitfn) __attribute__((alias(#exitfn))); |
| 139 | 139 | ||
| 140 | #endif | 140 | #endif |
| 141 | 141 | ||
| @@ -172,7 +172,7 @@ extern void cleanup_module(void); | |||
| 172 | * The following license idents are currently accepted as indicating free | 172 | * The following license idents are currently accepted as indicating free |
| 173 | * software modules | 173 | * software modules |
| 174 | * | 174 | * |
| 175 | * "GPL" [GNU Public License v2 or later] | 175 | * "GPL" [GNU Public License v2] |
| 176 | * "GPL v2" [GNU Public License v2] | 176 | * "GPL v2" [GNU Public License v2] |
| 177 | * "GPL and additional rights" [GNU Public License v2 rights and more] | 177 | * "GPL and additional rights" [GNU Public License v2 rights and more] |
| 178 | * "Dual BSD/GPL" [GNU Public License v2 | 178 | * "Dual BSD/GPL" [GNU Public License v2 |
| @@ -186,6 +186,22 @@ extern void cleanup_module(void); | |||
| 186 | * | 186 | * |
| 187 | * "Proprietary" [Non free products] | 187 | * "Proprietary" [Non free products] |
| 188 | * | 188 | * |
| 189 | * Both "GPL v2" and "GPL" (the latter also in dual licensed strings) are | ||
| 190 | * merely stating that the module is licensed under the GPL v2, but are not | ||
| 191 | * telling whether "GPL v2 only" or "GPL v2 or later". The reason why there | ||
| 192 | * are two variants is a historic and failed attempt to convey more | ||
| 193 | * information in the MODULE_LICENSE string. For module loading the | ||
| 194 | * "only/or later" distinction is completely irrelevant and does neither | ||
| 195 | * replace the proper license identifiers in the corresponding source file | ||
| 196 | * nor amends them in any way. The sole purpose is to make the | ||
| 197 | * 'Proprietary' flagging work and to refuse to bind symbols which are | ||
| 198 | * exported with EXPORT_SYMBOL_GPL when a non free module is loaded. | ||
| 199 | * | ||
| 200 | * In the same way "BSD" is not a clear license information. It merely | ||
| 201 | * states, that the module is licensed under one of the compatible BSD | ||
| 202 | * license variants. The detailed and correct license information is again | ||
| 203 | * to be found in the corresponding source files. | ||
| 204 | * | ||
| 189 | * There are dual licensed components, but when running with Linux it is the | 205 | * There are dual licensed components, but when running with Linux it is the |
| 190 | * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL | 206 | * GPL that is relevant so this is a non issue. Similarly LGPL linked with GPL |
| 191 | * is a GPL combined work. | 207 | * is a GPL combined work. |
| @@ -828,7 +844,7 @@ static inline void module_bug_finalize(const Elf_Ehdr *hdr, | |||
| 828 | static inline void module_bug_cleanup(struct module *mod) {} | 844 | static inline void module_bug_cleanup(struct module *mod) {} |
| 829 | #endif /* CONFIG_GENERIC_BUG */ | 845 | #endif /* CONFIG_GENERIC_BUG */ |
| 830 | 846 | ||
| 831 | #ifdef RETPOLINE | 847 | #ifdef CONFIG_RETPOLINE |
| 832 | extern bool retpoline_module_ok(bool has_retpoline); | 848 | extern bool retpoline_module_ok(bool has_retpoline); |
| 833 | #else | 849 | #else |
| 834 | static inline bool retpoline_module_ok(bool has_retpoline) | 850 | static inline bool retpoline_module_ok(bool has_retpoline) |
diff --git a/include/linux/mount.h b/include/linux/mount.h index 037eed52164b..9197ddbf35fb 100644 --- a/include/linux/mount.h +++ b/include/linux/mount.h | |||
| @@ -21,6 +21,7 @@ struct super_block; | |||
| 21 | struct vfsmount; | 21 | struct vfsmount; |
| 22 | struct dentry; | 22 | struct dentry; |
| 23 | struct mnt_namespace; | 23 | struct mnt_namespace; |
| 24 | struct fs_context; | ||
| 24 | 25 | ||
| 25 | #define MNT_NOSUID 0x01 | 26 | #define MNT_NOSUID 0x01 |
| 26 | #define MNT_NODEV 0x02 | 27 | #define MNT_NODEV 0x02 |
| @@ -88,6 +89,8 @@ struct path; | |||
| 88 | extern struct vfsmount *clone_private_mount(const struct path *path); | 89 | extern struct vfsmount *clone_private_mount(const struct path *path); |
| 89 | 90 | ||
| 90 | struct file_system_type; | 91 | struct file_system_type; |
| 92 | extern struct vfsmount *fc_mount(struct fs_context *fc); | ||
| 93 | extern struct vfsmount *vfs_create_mount(struct fs_context *fc); | ||
| 91 | extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, | 94 | extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, |
| 92 | int flags, const char *name, | 95 | int flags, const char *name, |
| 93 | void *data); | 96 | void *data); |
diff --git a/include/linux/msi.h b/include/linux/msi.h index 784fb52b9900..7e9b81c3b50d 100644 --- a/include/linux/msi.h +++ b/include/linux/msi.h | |||
| @@ -83,12 +83,12 @@ struct msi_desc { | |||
| 83 | struct { | 83 | struct { |
| 84 | u32 masked; | 84 | u32 masked; |
| 85 | struct { | 85 | struct { |
| 86 | __u8 is_msix : 1; | 86 | u8 is_msix : 1; |
| 87 | __u8 multiple : 3; | 87 | u8 multiple : 3; |
| 88 | __u8 multi_cap : 3; | 88 | u8 multi_cap : 3; |
| 89 | __u8 maskbit : 1; | 89 | u8 maskbit : 1; |
| 90 | __u8 is_64 : 1; | 90 | u8 is_64 : 1; |
| 91 | __u16 entry_nr; | 91 | u16 entry_nr; |
| 92 | unsigned default_irq; | 92 | unsigned default_irq; |
| 93 | } msi_attrib; | 93 | } msi_attrib; |
| 94 | union { | 94 | union { |
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h index 33e240acdc6d..b7445a44a814 100644 --- a/include/linux/mtd/rawnand.h +++ b/include/linux/mtd/rawnand.h | |||
| @@ -16,13 +16,12 @@ | |||
| 16 | #ifndef __LINUX_MTD_RAWNAND_H | 16 | #ifndef __LINUX_MTD_RAWNAND_H |
| 17 | #define __LINUX_MTD_RAWNAND_H | 17 | #define __LINUX_MTD_RAWNAND_H |
| 18 | 18 | ||
| 19 | #include <linux/wait.h> | ||
| 20 | #include <linux/spinlock.h> | ||
| 21 | #include <linux/mtd/mtd.h> | 19 | #include <linux/mtd/mtd.h> |
| 22 | #include <linux/mtd/flashchip.h> | 20 | #include <linux/mtd/flashchip.h> |
| 23 | #include <linux/mtd/bbm.h> | 21 | #include <linux/mtd/bbm.h> |
| 24 | #include <linux/mtd/jedec.h> | 22 | #include <linux/mtd/jedec.h> |
| 25 | #include <linux/mtd/onfi.h> | 23 | #include <linux/mtd/onfi.h> |
| 24 | #include <linux/mutex.h> | ||
| 26 | #include <linux/of.h> | 25 | #include <linux/of.h> |
| 27 | #include <linux/types.h> | 26 | #include <linux/types.h> |
| 28 | 27 | ||
| @@ -897,25 +896,17 @@ struct nand_controller_ops { | |||
| 897 | /** | 896 | /** |
| 898 | * struct nand_controller - Structure used to describe a NAND controller | 897 | * struct nand_controller - Structure used to describe a NAND controller |
| 899 | * | 898 | * |
| 900 | * @lock: protection lock | 899 | * @lock: lock used to serialize accesses to the NAND controller |
| 901 | * @active: the mtd device which holds the controller currently | ||
| 902 | * @wq: wait queue to sleep on if a NAND operation is in | ||
| 903 | * progress used instead of the per chip wait queue | ||
| 904 | * when a hw controller is available. | ||
| 905 | * @ops: NAND controller operations. | 900 | * @ops: NAND controller operations. |
| 906 | */ | 901 | */ |
| 907 | struct nand_controller { | 902 | struct nand_controller { |
| 908 | spinlock_t lock; | 903 | struct mutex lock; |
| 909 | struct nand_chip *active; | ||
| 910 | wait_queue_head_t wq; | ||
| 911 | const struct nand_controller_ops *ops; | 904 | const struct nand_controller_ops *ops; |
| 912 | }; | 905 | }; |
| 913 | 906 | ||
| 914 | static inline void nand_controller_init(struct nand_controller *nfc) | 907 | static inline void nand_controller_init(struct nand_controller *nfc) |
| 915 | { | 908 | { |
| 916 | nfc->active = NULL; | 909 | mutex_init(&nfc->lock); |
| 917 | spin_lock_init(&nfc->lock); | ||
| 918 | init_waitqueue_head(&nfc->wq); | ||
| 919 | } | 910 | } |
| 920 | 911 | ||
| 921 | /** | 912 | /** |
| @@ -936,7 +927,6 @@ static inline void nand_controller_init(struct nand_controller *nfc) | |||
| 936 | * @waitfunc: hardware specific function for wait on ready. | 927 | * @waitfunc: hardware specific function for wait on ready. |
| 937 | * @block_bad: check if a block is bad, using OOB markers | 928 | * @block_bad: check if a block is bad, using OOB markers |
| 938 | * @block_markbad: mark a block bad | 929 | * @block_markbad: mark a block bad |
| 939 | * @erase: erase function | ||
| 940 | * @set_features: set the NAND chip features | 930 | * @set_features: set the NAND chip features |
| 941 | * @get_features: get the NAND chip features | 931 | * @get_features: get the NAND chip features |
| 942 | * @chip_delay: chip dependent delay for transferring data from array to read | 932 | * @chip_delay: chip dependent delay for transferring data from array to read |
| @@ -962,7 +952,6 @@ struct nand_legacy { | |||
| 962 | int (*waitfunc)(struct nand_chip *chip); | 952 | int (*waitfunc)(struct nand_chip *chip); |
| 963 | int (*block_bad)(struct nand_chip *chip, loff_t ofs); | 953 | int (*block_bad)(struct nand_chip *chip, loff_t ofs); |
| 964 | int (*block_markbad)(struct nand_chip *chip, loff_t ofs); | 954 | int (*block_markbad)(struct nand_chip *chip, loff_t ofs); |
| 965 | int (*erase)(struct nand_chip *chip, int page); | ||
| 966 | int (*set_features)(struct nand_chip *chip, int feature_addr, | 955 | int (*set_features)(struct nand_chip *chip, int feature_addr, |
| 967 | u8 *subfeature_para); | 956 | u8 *subfeature_para); |
| 968 | int (*get_features)(struct nand_chip *chip, int feature_addr, | 957 | int (*get_features)(struct nand_chip *chip, int feature_addr, |
| @@ -983,7 +972,6 @@ struct nand_legacy { | |||
| 983 | * setting the read-retry mode. Mostly needed for MLC NAND. | 972 | * setting the read-retry mode. Mostly needed for MLC NAND. |
| 984 | * @ecc: [BOARDSPECIFIC] ECC control structure | 973 | * @ecc: [BOARDSPECIFIC] ECC control structure |
| 985 | * @buf_align: minimum buffer alignment required by a platform | 974 | * @buf_align: minimum buffer alignment required by a platform |
| 986 | * @state: [INTERN] the current state of the NAND device | ||
| 987 | * @oob_poi: "poison value buffer," used for laying out OOB data | 975 | * @oob_poi: "poison value buffer," used for laying out OOB data |
| 988 | * before writing | 976 | * before writing |
| 989 | * @page_shift: [INTERN] number of address bits in a page (column | 977 | * @page_shift: [INTERN] number of address bits in a page (column |
| @@ -1034,6 +1022,9 @@ struct nand_legacy { | |||
| 1034 | * cur_cs < numchips. NAND Controller drivers should not | 1022 | * cur_cs < numchips. NAND Controller drivers should not |
| 1035 | * modify this value, but they're allowed to read it. | 1023 | * modify this value, but they're allowed to read it. |
| 1036 | * @read_retries: [INTERN] the number of read retry modes supported | 1024 | * @read_retries: [INTERN] the number of read retry modes supported |
| 1025 | * @lock: lock protecting the suspended field. Also used to | ||
| 1026 | * serialize accesses to the NAND device. | ||
| 1027 | * @suspended: set to 1 when the device is suspended, 0 when it's not. | ||
| 1037 | * @bbt: [INTERN] bad block table pointer | 1028 | * @bbt: [INTERN] bad block table pointer |
| 1038 | * @bbt_td: [REPLACEABLE] bad block table descriptor for flash | 1029 | * @bbt_td: [REPLACEABLE] bad block table descriptor for flash |
| 1039 | * lookup. | 1030 | * lookup. |
| @@ -1088,7 +1079,8 @@ struct nand_chip { | |||
| 1088 | 1079 | ||
| 1089 | int read_retries; | 1080 | int read_retries; |
| 1090 | 1081 | ||
| 1091 | flstate_t state; | 1082 | struct mutex lock; |
| 1083 | unsigned int suspended : 1; | ||
| 1092 | 1084 | ||
| 1093 | uint8_t *oob_poi; | 1085 | uint8_t *oob_poi; |
| 1094 | struct nand_controller *controller; | 1086 | struct nand_controller *controller; |
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h index fa2d89e38e40..b3d360b0ee3d 100644 --- a/include/linux/mtd/spi-nor.h +++ b/include/linux/mtd/spi-nor.h | |||
| @@ -46,9 +46,13 @@ | |||
| 46 | #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ | 46 | #define SPINOR_OP_READ_1_2_2 0xbb /* Read data bytes (Dual I/O SPI) */ |
| 47 | #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ | 47 | #define SPINOR_OP_READ_1_1_4 0x6b /* Read data bytes (Quad Output SPI) */ |
| 48 | #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ | 48 | #define SPINOR_OP_READ_1_4_4 0xeb /* Read data bytes (Quad I/O SPI) */ |
| 49 | #define SPINOR_OP_READ_1_1_8 0x8b /* Read data bytes (Octal Output SPI) */ | ||
| 50 | #define SPINOR_OP_READ_1_8_8 0xcb /* Read data bytes (Octal I/O SPI) */ | ||
| 49 | #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ | 51 | #define SPINOR_OP_PP 0x02 /* Page program (up to 256 bytes) */ |
| 50 | #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ | 52 | #define SPINOR_OP_PP_1_1_4 0x32 /* Quad page program */ |
| 51 | #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ | 53 | #define SPINOR_OP_PP_1_4_4 0x38 /* Quad page program */ |
| 54 | #define SPINOR_OP_PP_1_1_8 0x82 /* Octal page program */ | ||
| 55 | #define SPINOR_OP_PP_1_8_8 0xc2 /* Octal page program */ | ||
| 52 | #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ | 56 | #define SPINOR_OP_BE_4K 0x20 /* Erase 4KiB block */ |
| 53 | #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ | 57 | #define SPINOR_OP_BE_4K_PMC 0xd7 /* Erase 4KiB block on PMC chips */ |
| 54 | #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ | 58 | #define SPINOR_OP_BE_32K 0x52 /* Erase 32KiB block */ |
| @@ -69,9 +73,13 @@ | |||
| 69 | #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ | 73 | #define SPINOR_OP_READ_1_2_2_4B 0xbc /* Read data bytes (Dual I/O SPI) */ |
| 70 | #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ | 74 | #define SPINOR_OP_READ_1_1_4_4B 0x6c /* Read data bytes (Quad Output SPI) */ |
| 71 | #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ | 75 | #define SPINOR_OP_READ_1_4_4_4B 0xec /* Read data bytes (Quad I/O SPI) */ |
| 76 | #define SPINOR_OP_READ_1_1_8_4B 0x7c /* Read data bytes (Octal Output SPI) */ | ||
| 77 | #define SPINOR_OP_READ_1_8_8_4B 0xcc /* Read data bytes (Octal I/O SPI) */ | ||
| 72 | #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ | 78 | #define SPINOR_OP_PP_4B 0x12 /* Page program (up to 256 bytes) */ |
| 73 | #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ | 79 | #define SPINOR_OP_PP_1_1_4_4B 0x34 /* Quad page program */ |
| 74 | #define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ | 80 | #define SPINOR_OP_PP_1_4_4_4B 0x3e /* Quad page program */ |
| 81 | #define SPINOR_OP_PP_1_1_8_4B 0x84 /* Octal page program */ | ||
| 82 | #define SPINOR_OP_PP_1_8_8_4B 0x8e /* Octal page program */ | ||
| 75 | #define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ | 83 | #define SPINOR_OP_BE_4K_4B 0x21 /* Erase 4KiB block */ |
| 76 | #define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ | 84 | #define SPINOR_OP_BE_32K_4B 0x5c /* Erase 32KiB block */ |
| 77 | #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ | 85 | #define SPINOR_OP_SE_4B 0xdc /* Sector erase (usually 64KiB) */ |
| @@ -458,7 +466,7 @@ struct spi_nor_hwcaps { | |||
| 458 | /* | 466 | /* |
| 459 | *(Fast) Read capabilities. | 467 | *(Fast) Read capabilities. |
| 460 | * MUST be ordered by priority: the higher bit position, the higher priority. | 468 | * MUST be ordered by priority: the higher bit position, the higher priority. |
| 461 | * As a matter of performances, it is relevant to use Octo SPI protocols first, | 469 | * As a matter of performances, it is relevant to use Octal SPI protocols first, |
| 462 | * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly | 470 | * then Quad SPI protocols before Dual SPI protocols, Fast Read and lastly |
| 463 | * (Slow) Read. | 471 | * (Slow) Read. |
| 464 | */ | 472 | */ |
| @@ -479,7 +487,7 @@ struct spi_nor_hwcaps { | |||
| 479 | #define SNOR_HWCAPS_READ_4_4_4 BIT(9) | 487 | #define SNOR_HWCAPS_READ_4_4_4 BIT(9) |
| 480 | #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) | 488 | #define SNOR_HWCAPS_READ_1_4_4_DTR BIT(10) |
| 481 | 489 | ||
| 482 | #define SNOR_HWCPAS_READ_OCTO GENMASK(14, 11) | 490 | #define SNOR_HWCAPS_READ_OCTAL GENMASK(14, 11) |
| 483 | #define SNOR_HWCAPS_READ_1_1_8 BIT(11) | 491 | #define SNOR_HWCAPS_READ_1_1_8 BIT(11) |
| 484 | #define SNOR_HWCAPS_READ_1_8_8 BIT(12) | 492 | #define SNOR_HWCAPS_READ_1_8_8 BIT(12) |
| 485 | #define SNOR_HWCAPS_READ_8_8_8 BIT(13) | 493 | #define SNOR_HWCAPS_READ_8_8_8 BIT(13) |
| @@ -488,7 +496,7 @@ struct spi_nor_hwcaps { | |||
| 488 | /* | 496 | /* |
| 489 | * Page Program capabilities. | 497 | * Page Program capabilities. |
| 490 | * MUST be ordered by priority: the higher bit position, the higher priority. | 498 | * MUST be ordered by priority: the higher bit position, the higher priority. |
| 491 | * Like (Fast) Read capabilities, Octo/Quad SPI protocols are preferred to the | 499 | * Like (Fast) Read capabilities, Octal/Quad SPI protocols are preferred to the |
| 492 | * legacy SPI 1-1-1 protocol. | 500 | * legacy SPI 1-1-1 protocol. |
| 493 | * Note that Dual Page Programs are not supported because there is no existing | 501 | * Note that Dual Page Programs are not supported because there is no existing |
| 494 | * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory | 502 | * JEDEC/SFDP standard to define them. Also at this moment no SPI flash memory |
| @@ -502,7 +510,7 @@ struct spi_nor_hwcaps { | |||
| 502 | #define SNOR_HWCAPS_PP_1_4_4 BIT(18) | 510 | #define SNOR_HWCAPS_PP_1_4_4 BIT(18) |
| 503 | #define SNOR_HWCAPS_PP_4_4_4 BIT(19) | 511 | #define SNOR_HWCAPS_PP_4_4_4 BIT(19) |
| 504 | 512 | ||
| 505 | #define SNOR_HWCAPS_PP_OCTO GENMASK(22, 20) | 513 | #define SNOR_HWCAPS_PP_OCTAL GENMASK(22, 20) |
| 506 | #define SNOR_HWCAPS_PP_1_1_8 BIT(20) | 514 | #define SNOR_HWCAPS_PP_1_1_8 BIT(20) |
| 507 | #define SNOR_HWCAPS_PP_1_8_8 BIT(21) | 515 | #define SNOR_HWCAPS_PP_1_8_8 BIT(21) |
| 508 | #define SNOR_HWCAPS_PP_8_8_8 BIT(22) | 516 | #define SNOR_HWCAPS_PP_8_8_8 BIT(22) |
diff --git a/include/linux/namei.h b/include/linux/namei.h index a78606e8e3df..9138b4471dbf 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h | |||
| @@ -24,6 +24,8 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; | |||
| 24 | * - internal "there are more path components" flag | 24 | * - internal "there are more path components" flag |
| 25 | * - dentry cache is untrusted; force a real lookup | 25 | * - dentry cache is untrusted; force a real lookup |
| 26 | * - suppress terminal automount | 26 | * - suppress terminal automount |
| 27 | * - skip revalidation | ||
| 28 | * - don't fetch xattrs on audit_inode | ||
| 27 | */ | 29 | */ |
| 28 | #define LOOKUP_FOLLOW 0x0001 | 30 | #define LOOKUP_FOLLOW 0x0001 |
| 29 | #define LOOKUP_DIRECTORY 0x0002 | 31 | #define LOOKUP_DIRECTORY 0x0002 |
| @@ -33,6 +35,7 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND}; | |||
| 33 | #define LOOKUP_REVAL 0x0020 | 35 | #define LOOKUP_REVAL 0x0020 |
| 34 | #define LOOKUP_RCU 0x0040 | 36 | #define LOOKUP_RCU 0x0040 |
| 35 | #define LOOKUP_NO_REVAL 0x0080 | 37 | #define LOOKUP_NO_REVAL 0x0080 |
| 38 | #define LOOKUP_NO_EVAL 0x0100 | ||
| 36 | 39 | ||
| 37 | /* | 40 | /* |
| 38 | * Intent data | 41 | * Intent data |
diff --git a/include/linux/net.h b/include/linux/net.h index e0930678c8bf..c606c72311d0 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
| @@ -83,6 +83,12 @@ enum sock_type { | |||
| 83 | 83 | ||
| 84 | #endif /* ARCH_HAS_SOCKET_TYPES */ | 84 | #endif /* ARCH_HAS_SOCKET_TYPES */ |
| 85 | 85 | ||
| 86 | /** | ||
| 87 | * enum sock_shutdown_cmd - Shutdown types | ||
| 88 | * @SHUT_RD: shutdown receptions | ||
| 89 | * @SHUT_WR: shutdown transmissions | ||
| 90 | * @SHUT_RDWR: shutdown receptions/transmissions | ||
| 91 | */ | ||
| 86 | enum sock_shutdown_cmd { | 92 | enum sock_shutdown_cmd { |
| 87 | SHUT_RD, | 93 | SHUT_RD, |
| 88 | SHUT_WR, | 94 | SHUT_WR, |
| @@ -263,7 +269,7 @@ do { \ | |||
| 263 | #define net_dbg_ratelimited(fmt, ...) \ | 269 | #define net_dbg_ratelimited(fmt, ...) \ |
| 264 | do { \ | 270 | do { \ |
| 265 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ | 271 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \ |
| 266 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ | 272 | if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ |
| 267 | net_ratelimit()) \ | 273 | net_ratelimit()) \ |
| 268 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ | 274 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), \ |
| 269 | ##__VA_ARGS__); \ | 275 | ##__VA_ARGS__); \ |
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2b2a6dce1630..4c76fe2c8488 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h | |||
| @@ -11,6 +11,8 @@ | |||
| 11 | #define _LINUX_NETDEV_FEATURES_H | 11 | #define _LINUX_NETDEV_FEATURES_H |
| 12 | 12 | ||
| 13 | #include <linux/types.h> | 13 | #include <linux/types.h> |
| 14 | #include <linux/bitops.h> | ||
| 15 | #include <asm/byteorder.h> | ||
| 14 | 16 | ||
| 15 | typedef u64 netdev_features_t; | 17 | typedef u64 netdev_features_t; |
| 16 | 18 | ||
| @@ -154,8 +156,26 @@ enum { | |||
| 154 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) | 156 | #define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX) |
| 155 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) | 157 | #define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX) |
| 156 | 158 | ||
| 157 | #define for_each_netdev_feature(mask_addr, bit) \ | 159 | /* Finds the next feature with the highest number of the range of start till 0. |
| 158 | for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT) | 160 | */ |
| 161 | static inline int find_next_netdev_feature(u64 feature, unsigned long start) | ||
| 162 | { | ||
| 163 | /* like BITMAP_LAST_WORD_MASK() for u64 | ||
| 164 | * this sets the most significant 64 - start to 0. | ||
| 165 | */ | ||
| 166 | feature &= ~0ULL >> (-start & ((sizeof(feature) * 8) - 1)); | ||
| 167 | |||
| 168 | return fls64(feature) - 1; | ||
| 169 | } | ||
| 170 | |||
| 171 | /* This goes for the MSB to the LSB through the set feature bits, | ||
| 172 | * mask_addr should be a u64 and bit an int | ||
| 173 | */ | ||
| 174 | #define for_each_netdev_feature(mask_addr, bit) \ | ||
| 175 | for ((bit) = find_next_netdev_feature((mask_addr), \ | ||
| 176 | NETDEV_FEATURE_COUNT); \ | ||
| 177 | (bit) >= 0; \ | ||
| 178 | (bit) = find_next_netdev_feature((mask_addr), (bit) - 1)) | ||
| 159 | 179 | ||
| 160 | /* Features valid for ethtool to change */ | 180 | /* Features valid for ethtool to change */ |
| 161 | /* = all defined minus driver/device-class-related */ | 181 | /* = all defined minus driver/device-class-related */ |
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 1377d085ef99..324e872c91d1 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h | |||
| @@ -274,6 +274,7 @@ struct header_ops { | |||
| 274 | const struct net_device *dev, | 274 | const struct net_device *dev, |
| 275 | const unsigned char *haddr); | 275 | const unsigned char *haddr); |
| 276 | bool (*validate)(const char *ll_header, unsigned int len); | 276 | bool (*validate)(const char *ll_header, unsigned int len); |
| 277 | __be16 (*parse_protocol)(const struct sk_buff *skb); | ||
| 277 | }; | 278 | }; |
| 278 | 279 | ||
| 279 | /* These flag bits are private to the generic network queueing | 280 | /* These flag bits are private to the generic network queueing |
| @@ -630,6 +631,7 @@ struct netdev_queue { | |||
| 630 | } ____cacheline_aligned_in_smp; | 631 | } ____cacheline_aligned_in_smp; |
| 631 | 632 | ||
| 632 | extern int sysctl_fb_tunnels_only_for_init_net; | 633 | extern int sysctl_fb_tunnels_only_for_init_net; |
| 634 | extern int sysctl_devconf_inherit_init_net; | ||
| 633 | 635 | ||
| 634 | static inline bool net_has_fallback_tunnels(const struct net *net) | 636 | static inline bool net_has_fallback_tunnels(const struct net *net) |
| 635 | { | 637 | { |
| @@ -867,7 +869,6 @@ enum bpf_netdev_command { | |||
| 867 | /* BPF program for offload callbacks, invoked at program load time. */ | 869 | /* BPF program for offload callbacks, invoked at program load time. */ |
| 868 | BPF_OFFLOAD_MAP_ALLOC, | 870 | BPF_OFFLOAD_MAP_ALLOC, |
| 869 | BPF_OFFLOAD_MAP_FREE, | 871 | BPF_OFFLOAD_MAP_FREE, |
| 870 | XDP_QUERY_XSK_UMEM, | ||
| 871 | XDP_SETUP_XSK_UMEM, | 872 | XDP_SETUP_XSK_UMEM, |
| 872 | }; | 873 | }; |
| 873 | 874 | ||
| @@ -894,10 +895,10 @@ struct netdev_bpf { | |||
| 894 | struct { | 895 | struct { |
| 895 | struct bpf_offloaded_map *offmap; | 896 | struct bpf_offloaded_map *offmap; |
| 896 | }; | 897 | }; |
| 897 | /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */ | 898 | /* XDP_SETUP_XSK_UMEM */ |
| 898 | struct { | 899 | struct { |
| 899 | struct xdp_umem *umem; /* out for query*/ | 900 | struct xdp_umem *umem; |
| 900 | u16 queue_id; /* in for query */ | 901 | u16 queue_id; |
| 901 | } xsk; | 902 | } xsk; |
| 902 | }; | 903 | }; |
| 903 | }; | 904 | }; |
| @@ -940,6 +941,8 @@ struct dev_ifalias { | |||
| 940 | char ifalias[]; | 941 | char ifalias[]; |
| 941 | }; | 942 | }; |
| 942 | 943 | ||
| 944 | struct devlink; | ||
| 945 | |||
| 943 | /* | 946 | /* |
| 944 | * This structure defines the management hooks for network devices. | 947 | * This structure defines the management hooks for network devices. |
| 945 | * The following hooks can be defined; unless noted otherwise, they are | 948 | * The following hooks can be defined; unless noted otherwise, they are |
| @@ -1152,7 +1155,8 @@ struct dev_ifalias { | |||
| 1152 | * | 1155 | * |
| 1153 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], | 1156 | * int (*ndo_fdb_add)(struct ndmsg *ndm, struct nlattr *tb[], |
| 1154 | * struct net_device *dev, | 1157 | * struct net_device *dev, |
| 1155 | * const unsigned char *addr, u16 vid, u16 flags) | 1158 | * const unsigned char *addr, u16 vid, u16 flags, |
| 1159 | * struct netlink_ext_ack *extack); | ||
| 1156 | * Adds an FDB entry to dev for addr. | 1160 | * Adds an FDB entry to dev for addr. |
| 1157 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], | 1161 | * int (*ndo_fdb_del)(struct ndmsg *ndm, struct nlattr *tb[], |
| 1158 | * struct net_device *dev, | 1162 | * struct net_device *dev, |
| @@ -1186,6 +1190,10 @@ struct dev_ifalias { | |||
| 1186 | * not implement this, it is assumed that the hw is not able to have | 1190 | * not implement this, it is assumed that the hw is not able to have |
| 1187 | * multiple net devices on single physical port. | 1191 | * multiple net devices on single physical port. |
| 1188 | * | 1192 | * |
| 1193 | * int (*ndo_get_port_parent_id)(struct net_device *dev, | ||
| 1194 | * struct netdev_phys_item_id *ppid) | ||
| 1195 | * Called to get the parent ID of the physical port of this device. | ||
| 1196 | * | ||
| 1189 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, | 1197 | * void (*ndo_udp_tunnel_add)(struct net_device *dev, |
| 1190 | * struct udp_tunnel_info *ti); | 1198 | * struct udp_tunnel_info *ti); |
| 1191 | * Called by UDP tunnel to notify a driver about the UDP port and socket | 1199 | * Called by UDP tunnel to notify a driver about the UDP port and socket |
| @@ -1243,6 +1251,10 @@ struct dev_ifalias { | |||
| 1243 | * that got dropped are freed/returned via xdp_return_frame(). | 1251 | * that got dropped are freed/returned via xdp_return_frame(). |
| 1244 | * Returns negative number, means general error invoking ndo, meaning | 1252 | * Returns negative number, means general error invoking ndo, meaning |
| 1245 | * no frames were xmit'ed and core-caller will free all frames. | 1253 | * no frames were xmit'ed and core-caller will free all frames. |
| 1254 | * struct devlink *(*ndo_get_devlink)(struct net_device *dev); | ||
| 1255 | * Get devlink instance associated with a given netdev. | ||
| 1256 | * Called with a reference on the netdevice and devlink locks only, | ||
| 1257 | * rtnl_lock is not held. | ||
| 1246 | */ | 1258 | */ |
| 1247 | struct net_device_ops { | 1259 | struct net_device_ops { |
| 1248 | int (*ndo_init)(struct net_device *dev); | 1260 | int (*ndo_init)(struct net_device *dev); |
| @@ -1376,7 +1388,8 @@ struct net_device_ops { | |||
| 1376 | struct net_device *dev, | 1388 | struct net_device *dev, |
| 1377 | const unsigned char *addr, | 1389 | const unsigned char *addr, |
| 1378 | u16 vid, | 1390 | u16 vid, |
| 1379 | u16 flags); | 1391 | u16 flags, |
| 1392 | struct netlink_ext_ack *extack); | ||
| 1380 | int (*ndo_fdb_del)(struct ndmsg *ndm, | 1393 | int (*ndo_fdb_del)(struct ndmsg *ndm, |
| 1381 | struct nlattr *tb[], | 1394 | struct nlattr *tb[], |
| 1382 | struct net_device *dev, | 1395 | struct net_device *dev, |
| @@ -1409,6 +1422,8 @@ struct net_device_ops { | |||
| 1409 | bool new_carrier); | 1422 | bool new_carrier); |
| 1410 | int (*ndo_get_phys_port_id)(struct net_device *dev, | 1423 | int (*ndo_get_phys_port_id)(struct net_device *dev, |
| 1411 | struct netdev_phys_item_id *ppid); | 1424 | struct netdev_phys_item_id *ppid); |
| 1425 | int (*ndo_get_port_parent_id)(struct net_device *dev, | ||
| 1426 | struct netdev_phys_item_id *ppid); | ||
| 1412 | int (*ndo_get_phys_port_name)(struct net_device *dev, | 1427 | int (*ndo_get_phys_port_name)(struct net_device *dev, |
| 1413 | char *name, size_t len); | 1428 | char *name, size_t len); |
| 1414 | void (*ndo_udp_tunnel_add)(struct net_device *dev, | 1429 | void (*ndo_udp_tunnel_add)(struct net_device *dev, |
| @@ -1438,6 +1453,7 @@ struct net_device_ops { | |||
| 1438 | u32 flags); | 1453 | u32 flags); |
| 1439 | int (*ndo_xsk_async_xmit)(struct net_device *dev, | 1454 | int (*ndo_xsk_async_xmit)(struct net_device *dev, |
| 1440 | u32 queue_id); | 1455 | u32 queue_id); |
| 1456 | struct devlink * (*ndo_get_devlink)(struct net_device *dev); | ||
| 1441 | }; | 1457 | }; |
| 1442 | 1458 | ||
| 1443 | /** | 1459 | /** |
| @@ -1483,6 +1499,8 @@ struct net_device_ops { | |||
| 1483 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook | 1499 | * @IFF_NO_RX_HANDLER: device doesn't support the rx_handler hook |
| 1484 | * @IFF_FAILOVER: device is a failover master device | 1500 | * @IFF_FAILOVER: device is a failover master device |
| 1485 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device | 1501 | * @IFF_FAILOVER_SLAVE: device is lower dev of a failover master device |
| 1502 | * @IFF_L3MDEV_RX_HANDLER: only invoke the rx handler of L3 master device | ||
| 1503 | * @IFF_LIVE_RENAME_OK: rename is allowed while device is up and running | ||
| 1486 | */ | 1504 | */ |
| 1487 | enum netdev_priv_flags { | 1505 | enum netdev_priv_flags { |
| 1488 | IFF_802_1Q_VLAN = 1<<0, | 1506 | IFF_802_1Q_VLAN = 1<<0, |
| @@ -1514,6 +1532,8 @@ enum netdev_priv_flags { | |||
| 1514 | IFF_NO_RX_HANDLER = 1<<26, | 1532 | IFF_NO_RX_HANDLER = 1<<26, |
| 1515 | IFF_FAILOVER = 1<<27, | 1533 | IFF_FAILOVER = 1<<27, |
| 1516 | IFF_FAILOVER_SLAVE = 1<<28, | 1534 | IFF_FAILOVER_SLAVE = 1<<28, |
| 1535 | IFF_L3MDEV_RX_HANDLER = 1<<29, | ||
| 1536 | IFF_LIVE_RENAME_OK = 1<<30, | ||
| 1517 | }; | 1537 | }; |
| 1518 | 1538 | ||
| 1519 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN | 1539 | #define IFF_802_1Q_VLAN IFF_802_1Q_VLAN |
| @@ -1544,6 +1564,8 @@ enum netdev_priv_flags { | |||
| 1544 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER | 1564 | #define IFF_NO_RX_HANDLER IFF_NO_RX_HANDLER |
| 1545 | #define IFF_FAILOVER IFF_FAILOVER | 1565 | #define IFF_FAILOVER IFF_FAILOVER |
| 1546 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE | 1566 | #define IFF_FAILOVER_SLAVE IFF_FAILOVER_SLAVE |
| 1567 | #define IFF_L3MDEV_RX_HANDLER IFF_L3MDEV_RX_HANDLER | ||
| 1568 | #define IFF_LIVE_RENAME_OK IFF_LIVE_RENAME_OK | ||
| 1547 | 1569 | ||
| 1548 | /** | 1570 | /** |
| 1549 | * struct net_device - The DEVICE structure. | 1571 | * struct net_device - The DEVICE structure. |
| @@ -1824,9 +1846,6 @@ struct net_device { | |||
| 1824 | #endif | 1846 | #endif |
| 1825 | const struct net_device_ops *netdev_ops; | 1847 | const struct net_device_ops *netdev_ops; |
| 1826 | const struct ethtool_ops *ethtool_ops; | 1848 | const struct ethtool_ops *ethtool_ops; |
| 1827 | #ifdef CONFIG_NET_SWITCHDEV | ||
| 1828 | const struct switchdev_ops *switchdev_ops; | ||
| 1829 | #endif | ||
| 1830 | #ifdef CONFIG_NET_L3_MASTER_DEV | 1849 | #ifdef CONFIG_NET_L3_MASTER_DEV |
| 1831 | const struct l3mdev_ops *l3mdev_ops; | 1850 | const struct l3mdev_ops *l3mdev_ops; |
| 1832 | #endif | 1851 | #endif |
| @@ -2928,6 +2947,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, | |||
| 2928 | return dev->header_ops->parse(skb, haddr); | 2947 | return dev->header_ops->parse(skb, haddr); |
| 2929 | } | 2948 | } |
| 2930 | 2949 | ||
| 2950 | static inline __be16 dev_parse_header_protocol(const struct sk_buff *skb) | ||
| 2951 | { | ||
| 2952 | const struct net_device *dev = skb->dev; | ||
| 2953 | |||
| 2954 | if (!dev->header_ops || !dev->header_ops->parse_protocol) | ||
| 2955 | return 0; | ||
| 2956 | return dev->header_ops->parse_protocol(skb); | ||
| 2957 | } | ||
| 2958 | |||
| 2931 | /* ll_header must have at least hard_header_len allocated */ | 2959 | /* ll_header must have at least hard_header_len allocated */ |
| 2932 | static inline bool dev_validate_header(const struct net_device *dev, | 2960 | static inline bool dev_validate_header(const struct net_device *dev, |
| 2933 | char *ll_header, int len) | 2961 | char *ll_header, int len) |
| @@ -3648,7 +3676,11 @@ int dev_get_phys_port_id(struct net_device *dev, | |||
| 3648 | struct netdev_phys_item_id *ppid); | 3676 | struct netdev_phys_item_id *ppid); |
| 3649 | int dev_get_phys_port_name(struct net_device *dev, | 3677 | int dev_get_phys_port_name(struct net_device *dev, |
| 3650 | char *name, size_t len); | 3678 | char *name, size_t len); |
| 3679 | int dev_get_port_parent_id(struct net_device *dev, | ||
| 3680 | struct netdev_phys_item_id *ppid, bool recurse); | ||
| 3681 | bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b); | ||
| 3651 | int dev_change_proto_down(struct net_device *dev, bool proto_down); | 3682 | int dev_change_proto_down(struct net_device *dev, bool proto_down); |
| 3683 | int dev_change_proto_down_generic(struct net_device *dev, bool proto_down); | ||
| 3652 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); | 3684 | struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again); |
| 3653 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, | 3685 | struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, |
| 3654 | struct netdev_queue *txq, int *ret); | 3686 | struct netdev_queue *txq, int *ret); |
| @@ -3858,7 +3890,7 @@ static inline u32 netif_msg_init(int debug_value, int default_msg_enable_bits) | |||
| 3858 | if (debug_value == 0) /* no output */ | 3890 | if (debug_value == 0) /* no output */ |
| 3859 | return 0; | 3891 | return 0; |
| 3860 | /* set low N bits */ | 3892 | /* set low N bits */ |
| 3861 | return (1 << debug_value) - 1; | 3893 | return (1U << debug_value) - 1; |
| 3862 | } | 3894 | } |
| 3863 | 3895 | ||
| 3864 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) | 3896 | static inline void __netif_tx_lock(struct netdev_queue *txq, int cpu) |
| @@ -4549,6 +4581,11 @@ static inline bool netif_supports_nofcs(struct net_device *dev) | |||
| 4549 | return dev->priv_flags & IFF_SUPP_NOFCS; | 4581 | return dev->priv_flags & IFF_SUPP_NOFCS; |
| 4550 | } | 4582 | } |
| 4551 | 4583 | ||
| 4584 | static inline bool netif_has_l3_rx_handler(const struct net_device *dev) | ||
| 4585 | { | ||
| 4586 | return dev->priv_flags & IFF_L3MDEV_RX_HANDLER; | ||
| 4587 | } | ||
| 4588 | |||
| 4552 | static inline bool netif_is_l3_master(const struct net_device *dev) | 4589 | static inline bool netif_is_l3_master(const struct net_device *dev) |
| 4553 | { | 4590 | { |
| 4554 | return dev->priv_flags & IFF_L3MDEV_MASTER; | 4591 | return dev->priv_flags & IFF_L3MDEV_MASTER; |
| @@ -4660,22 +4697,22 @@ static inline const char *netdev_reg_state(const struct net_device *dev) | |||
| 4660 | return " (unknown)"; | 4697 | return " (unknown)"; |
| 4661 | } | 4698 | } |
| 4662 | 4699 | ||
| 4663 | __printf(3, 4) | 4700 | __printf(3, 4) __cold |
| 4664 | void netdev_printk(const char *level, const struct net_device *dev, | 4701 | void netdev_printk(const char *level, const struct net_device *dev, |
| 4665 | const char *format, ...); | 4702 | const char *format, ...); |
| 4666 | __printf(2, 3) | 4703 | __printf(2, 3) __cold |
| 4667 | void netdev_emerg(const struct net_device *dev, const char *format, ...); | 4704 | void netdev_emerg(const struct net_device *dev, const char *format, ...); |
| 4668 | __printf(2, 3) | 4705 | __printf(2, 3) __cold |
| 4669 | void netdev_alert(const struct net_device *dev, const char *format, ...); | 4706 | void netdev_alert(const struct net_device *dev, const char *format, ...); |
| 4670 | __printf(2, 3) | 4707 | __printf(2, 3) __cold |
| 4671 | void netdev_crit(const struct net_device *dev, const char *format, ...); | 4708 | void netdev_crit(const struct net_device *dev, const char *format, ...); |
| 4672 | __printf(2, 3) | 4709 | __printf(2, 3) __cold |
| 4673 | void netdev_err(const struct net_device *dev, const char *format, ...); | 4710 | void netdev_err(const struct net_device *dev, const char *format, ...); |
| 4674 | __printf(2, 3) | 4711 | __printf(2, 3) __cold |
| 4675 | void netdev_warn(const struct net_device *dev, const char *format, ...); | 4712 | void netdev_warn(const struct net_device *dev, const char *format, ...); |
| 4676 | __printf(2, 3) | 4713 | __printf(2, 3) __cold |
| 4677 | void netdev_notice(const struct net_device *dev, const char *format, ...); | 4714 | void netdev_notice(const struct net_device *dev, const char *format, ...); |
| 4678 | __printf(2, 3) | 4715 | __printf(2, 3) __cold |
| 4679 | void netdev_info(const struct net_device *dev, const char *format, ...); | 4716 | void netdev_info(const struct net_device *dev, const char *format, ...); |
| 4680 | 4717 | ||
| 4681 | #define netdev_level_once(level, dev, fmt, ...) \ | 4718 | #define netdev_level_once(level, dev, fmt, ...) \ |
diff --git a/include/linux/netfilter/nf_conntrack_proto_gre.h b/include/linux/netfilter/nf_conntrack_proto_gre.h index 6989e2e4eabf..25f9a770fb84 100644 --- a/include/linux/netfilter/nf_conntrack_proto_gre.h +++ b/include/linux/netfilter/nf_conntrack_proto_gre.h | |||
| @@ -19,27 +19,18 @@ struct nf_conn; | |||
| 19 | struct nf_ct_gre_keymap { | 19 | struct nf_ct_gre_keymap { |
| 20 | struct list_head list; | 20 | struct list_head list; |
| 21 | struct nf_conntrack_tuple tuple; | 21 | struct nf_conntrack_tuple tuple; |
| 22 | }; | 22 | struct rcu_head rcu; |
| 23 | |||
| 24 | enum grep_conntrack { | ||
| 25 | GRE_CT_UNREPLIED, | ||
| 26 | GRE_CT_REPLIED, | ||
| 27 | GRE_CT_MAX | ||
| 28 | }; | ||
| 29 | |||
| 30 | struct netns_proto_gre { | ||
| 31 | struct nf_proto_net nf; | ||
| 32 | rwlock_t keymap_lock; | ||
| 33 | struct list_head keymap_list; | ||
| 34 | unsigned int gre_timeouts[GRE_CT_MAX]; | ||
| 35 | }; | 23 | }; |
| 36 | 24 | ||
| 37 | /* add new tuple->key_reply pair to keymap */ | 25 | /* add new tuple->key_reply pair to keymap */ |
| 38 | int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, | 26 | int nf_ct_gre_keymap_add(struct nf_conn *ct, enum ip_conntrack_dir dir, |
| 39 | struct nf_conntrack_tuple *t); | 27 | struct nf_conntrack_tuple *t); |
| 40 | 28 | ||
| 29 | void nf_ct_gre_keymap_flush(struct net *net); | ||
| 41 | /* delete keymap entries */ | 30 | /* delete keymap entries */ |
| 42 | void nf_ct_gre_keymap_destroy(struct nf_conn *ct); | 31 | void nf_ct_gre_keymap_destroy(struct nf_conn *ct); |
| 43 | 32 | ||
| 33 | bool gre_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, | ||
| 34 | struct net *net, struct nf_conntrack_tuple *tuple); | ||
| 44 | #endif /* __KERNEL__ */ | 35 | #endif /* __KERNEL__ */ |
| 45 | #endif /* _CONNTRACK_PROTO_GRE_H */ | 36 | #endif /* _CONNTRACK_PROTO_GRE_H */ |
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h index 9077b3ebea08..bf384b3eedb8 100644 --- a/include/linux/netfilter/x_tables.h +++ b/include/linux/netfilter/x_tables.h | |||
| @@ -289,9 +289,9 @@ bool xt_find_jump_offset(const unsigned int *offsets, | |||
| 289 | 289 | ||
| 290 | int xt_check_proc_name(const char *name, unsigned int size); | 290 | int xt_check_proc_name(const char *name, unsigned int size); |
| 291 | 291 | ||
| 292 | int xt_check_match(struct xt_mtchk_param *, unsigned int size, u_int8_t proto, | 292 | int xt_check_match(struct xt_mtchk_param *, unsigned int size, u16 proto, |
| 293 | bool inv_proto); | 293 | bool inv_proto); |
| 294 | int xt_check_target(struct xt_tgchk_param *, unsigned int size, u_int8_t proto, | 294 | int xt_check_target(struct xt_tgchk_param *, unsigned int size, u16 proto, |
| 295 | bool inv_proto); | 295 | bool inv_proto); |
| 296 | 296 | ||
| 297 | int xt_match_to_user(const struct xt_entry_match *m, | 297 | int xt_match_to_user(const struct xt_entry_match *m, |
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h index 95ab5cc64422..082e2c41b7ff 100644 --- a/include/linux/netfilter_ipv4.h +++ b/include/linux/netfilter_ipv4.h | |||
| @@ -25,7 +25,6 @@ __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | |||
| 25 | unsigned int dataoff, u_int8_t protocol); | 25 | unsigned int dataoff, u_int8_t protocol); |
| 26 | int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, | 26 | int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl, |
| 27 | bool strict); | 27 | bool strict); |
| 28 | int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry); | ||
| 29 | #else | 28 | #else |
| 30 | static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, | 29 | static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook, |
| 31 | unsigned int dataoff, u_int8_t protocol) | 30 | unsigned int dataoff, u_int8_t protocol) |
| @@ -37,11 +36,6 @@ static inline int nf_ip_route(struct net *net, struct dst_entry **dst, | |||
| 37 | { | 36 | { |
| 38 | return -EOPNOTSUPP; | 37 | return -EOPNOTSUPP; |
| 39 | } | 38 | } |
| 40 | static inline int nf_ip_reroute(struct sk_buff *skb, | ||
| 41 | const struct nf_queue_entry *entry) | ||
| 42 | { | ||
| 43 | return -EOPNOTSUPP; | ||
| 44 | } | ||
| 45 | #endif /* CONFIG_INET */ | 39 | #endif /* CONFIG_INET */ |
| 46 | 40 | ||
| 47 | #endif /*__LINUX_IP_NETFILTER_H*/ | 41 | #endif /*__LINUX_IP_NETFILTER_H*/ |
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h index c0dc4dd78887..471e9467105b 100644 --- a/include/linux/netfilter_ipv6.h +++ b/include/linux/netfilter_ipv6.h | |||
| @@ -25,23 +25,24 @@ struct nf_queue_entry; | |||
| 25 | * if IPv6 is a module. | 25 | * if IPv6 is a module. |
| 26 | */ | 26 | */ |
| 27 | struct nf_ipv6_ops { | 27 | struct nf_ipv6_ops { |
| 28 | #if IS_MODULE(CONFIG_IPV6) | ||
| 28 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, | 29 | int (*chk_addr)(struct net *net, const struct in6_addr *addr, |
| 29 | const struct net_device *dev, int strict); | 30 | const struct net_device *dev, int strict); |
| 31 | int (*route_me_harder)(struct net *net, struct sk_buff *skb); | ||
| 32 | int (*dev_get_saddr)(struct net *net, const struct net_device *dev, | ||
| 33 | const struct in6_addr *daddr, unsigned int srcprefs, | ||
| 34 | struct in6_addr *saddr); | ||
| 35 | int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, | ||
| 36 | bool strict); | ||
| 37 | #endif | ||
| 30 | void (*route_input)(struct sk_buff *skb); | 38 | void (*route_input)(struct sk_buff *skb); |
| 31 | int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, | 39 | int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb, |
| 32 | int (*output)(struct net *, struct sock *, struct sk_buff *)); | 40 | int (*output)(struct net *, struct sock *, struct sk_buff *)); |
| 33 | int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, | ||
| 34 | bool strict); | ||
| 35 | int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); | 41 | int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry); |
| 36 | }; | 42 | }; |
| 37 | 43 | ||
| 38 | #ifdef CONFIG_NETFILTER | 44 | #ifdef CONFIG_NETFILTER |
| 39 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb); | 45 | #include <net/addrconf.h> |
| 40 | __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, | ||
| 41 | unsigned int dataoff, u_int8_t protocol); | ||
| 42 | |||
| 43 | int ipv6_netfilter_init(void); | ||
| 44 | void ipv6_netfilter_fini(void); | ||
| 45 | 46 | ||
| 46 | extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; | 47 | extern const struct nf_ipv6_ops __rcu *nf_ipv6_ops; |
| 47 | static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) | 48 | static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) |
| @@ -49,6 +50,49 @@ static inline const struct nf_ipv6_ops *nf_get_ipv6_ops(void) | |||
| 49 | return rcu_dereference(nf_ipv6_ops); | 50 | return rcu_dereference(nf_ipv6_ops); |
| 50 | } | 51 | } |
| 51 | 52 | ||
| 53 | static inline int nf_ipv6_chk_addr(struct net *net, const struct in6_addr *addr, | ||
| 54 | const struct net_device *dev, int strict) | ||
| 55 | { | ||
| 56 | #if IS_MODULE(CONFIG_IPV6) | ||
| 57 | const struct nf_ipv6_ops *v6_ops = nf_get_ipv6_ops(); | ||
| 58 | |||
| 59 | if (!v6_ops) | ||
| 60 | return 1; | ||
| 61 | |||
| 62 | return v6_ops->chk_addr(net, addr, dev, strict); | ||
| 63 | #else | ||
| 64 | return ipv6_chk_addr(net, addr, dev, strict); | ||
| 65 | #endif | ||
| 66 | } | ||
| 67 | |||
| 68 | int __nf_ip6_route(struct net *net, struct dst_entry **dst, | ||
| 69 | struct flowi *fl, bool strict); | ||
| 70 | |||
| 71 | static inline int nf_ip6_route(struct net *net, struct dst_entry **dst, | ||
| 72 | struct flowi *fl, bool strict) | ||
| 73 | { | ||
| 74 | #if IS_MODULE(CONFIG_IPV6) | ||
| 75 | const struct nf_ipv6_ops *v6ops = nf_get_ipv6_ops(); | ||
| 76 | |||
| 77 | if (v6ops) | ||
| 78 | return v6ops->route(net, dst, fl, strict); | ||
| 79 | |||
| 80 | return -EHOSTUNREACH; | ||
| 81 | #endif | ||
| 82 | #if IS_BUILTIN(CONFIG_IPV6) | ||
| 83 | return __nf_ip6_route(net, dst, fl, strict); | ||
| 84 | #else | ||
| 85 | return -EHOSTUNREACH; | ||
| 86 | #endif | ||
| 87 | } | ||
| 88 | |||
| 89 | int ip6_route_me_harder(struct net *net, struct sk_buff *skb); | ||
| 90 | __sum16 nf_ip6_checksum(struct sk_buff *skb, unsigned int hook, | ||
| 91 | unsigned int dataoff, u_int8_t protocol); | ||
| 92 | |||
| 93 | int ipv6_netfilter_init(void); | ||
| 94 | void ipv6_netfilter_fini(void); | ||
| 95 | |||
| 52 | #else /* CONFIG_NETFILTER */ | 96 | #else /* CONFIG_NETFILTER */ |
| 53 | static inline int ipv6_netfilter_init(void) { return 0; } | 97 | static inline int ipv6_netfilter_init(void) { return 0; } |
| 54 | static inline void ipv6_netfilter_fini(void) { return; } | 98 | static inline void ipv6_netfilter_fini(void) { return; } |
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4e8add270200..593d1b9c33a8 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
| @@ -126,6 +126,7 @@ void __netlink_clear_multicast_users(struct sock *sk, unsigned int group); | |||
| 126 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, | 126 | void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err, |
| 127 | const struct netlink_ext_ack *extack); | 127 | const struct netlink_ext_ack *extack); |
| 128 | int netlink_has_listeners(struct sock *sk, unsigned int group); | 128 | int netlink_has_listeners(struct sock *sk, unsigned int group); |
| 129 | bool netlink_strict_get_check(struct sk_buff *skb); | ||
| 129 | 130 | ||
| 130 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); | 131 | int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock); |
| 131 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, | 132 | int netlink_broadcast(struct sock *ssk, struct sk_buff *skb, __u32 portid, |
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h index 1b06f0b28453..22494d170619 100644 --- a/include/linux/nfs4.h +++ b/include/linux/nfs4.h | |||
| @@ -538,6 +538,7 @@ enum { | |||
| 538 | NFSPROC4_CLNT_OFFLOAD_CANCEL, | 538 | NFSPROC4_CLNT_OFFLOAD_CANCEL, |
| 539 | 539 | ||
| 540 | NFSPROC4_CLNT_LOOKUPP, | 540 | NFSPROC4_CLNT_LOOKUPP, |
| 541 | NFSPROC4_CLNT_LAYOUTERROR, | ||
| 541 | }; | 542 | }; |
| 542 | 543 | ||
| 543 | /* nfs41 types */ | 544 | /* nfs41 types */ |
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 6aa8cc83c3b6..c827d31298cc 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h | |||
| @@ -261,5 +261,6 @@ struct nfs_server { | |||
| 261 | #define NFS_CAP_CLONE (1U << 23) | 261 | #define NFS_CAP_CLONE (1U << 23) |
| 262 | #define NFS_CAP_COPY (1U << 24) | 262 | #define NFS_CAP_COPY (1U << 24) |
| 263 | #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) | 263 | #define NFS_CAP_OFFLOAD_CANCEL (1U << 25) |
| 264 | #define NFS_CAP_LAYOUTERROR (1U << 26) | ||
| 264 | 265 | ||
| 265 | #endif | 266 | #endif |
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h index e27572d30d97..ad69430fd0eb 100644 --- a/include/linux/nfs_page.h +++ b/include/linux/nfs_page.h | |||
| @@ -164,6 +164,16 @@ nfs_list_add_request(struct nfs_page *req, struct list_head *head) | |||
| 164 | list_add_tail(&req->wb_list, head); | 164 | list_add_tail(&req->wb_list, head); |
| 165 | } | 165 | } |
| 166 | 166 | ||
| 167 | /** | ||
| 168 | * nfs_list_move_request - Move a request to a new list | ||
| 169 | * @req: request | ||
| 170 | * @head: head of list into which to insert the request. | ||
| 171 | */ | ||
| 172 | static inline void | ||
| 173 | nfs_list_move_request(struct nfs_page *req, struct list_head *head) | ||
| 174 | { | ||
| 175 | list_move_tail(&req->wb_list, head); | ||
| 176 | } | ||
| 167 | 177 | ||
| 168 | /** | 178 | /** |
| 169 | * nfs_list_remove_request - Remove a request from its wb_list | 179 | * nfs_list_remove_request - Remove a request from its wb_list |
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 441a93ebcac0..9b8324ec08f3 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h | |||
| @@ -383,6 +383,41 @@ struct nfs42_layoutstat_data { | |||
| 383 | struct nfs42_layoutstat_res res; | 383 | struct nfs42_layoutstat_res res; |
| 384 | }; | 384 | }; |
| 385 | 385 | ||
| 386 | struct nfs42_device_error { | ||
| 387 | struct nfs4_deviceid dev_id; | ||
| 388 | int status; | ||
| 389 | enum nfs_opnum4 opnum; | ||
| 390 | }; | ||
| 391 | |||
| 392 | struct nfs42_layout_error { | ||
| 393 | __u64 offset; | ||
| 394 | __u64 length; | ||
| 395 | nfs4_stateid stateid; | ||
| 396 | struct nfs42_device_error errors[1]; | ||
| 397 | }; | ||
| 398 | |||
| 399 | #define NFS42_LAYOUTERROR_MAX 5 | ||
| 400 | |||
| 401 | struct nfs42_layouterror_args { | ||
| 402 | struct nfs4_sequence_args seq_args; | ||
| 403 | struct inode *inode; | ||
| 404 | unsigned int num_errors; | ||
| 405 | struct nfs42_layout_error errors[NFS42_LAYOUTERROR_MAX]; | ||
| 406 | }; | ||
| 407 | |||
| 408 | struct nfs42_layouterror_res { | ||
| 409 | struct nfs4_sequence_res seq_res; | ||
| 410 | unsigned int num_errors; | ||
| 411 | int rpc_status; | ||
| 412 | }; | ||
| 413 | |||
| 414 | struct nfs42_layouterror_data { | ||
| 415 | struct nfs42_layouterror_args args; | ||
| 416 | struct nfs42_layouterror_res res; | ||
| 417 | struct inode *inode; | ||
| 418 | struct pnfs_layout_segment *lseg; | ||
| 419 | }; | ||
| 420 | |||
| 386 | struct nfs42_clone_args { | 421 | struct nfs42_clone_args { |
| 387 | struct nfs4_sequence_args seq_args; | 422 | struct nfs4_sequence_args seq_args; |
| 388 | struct nfs_fh *src_fh; | 423 | struct nfs_fh *src_fh; |
| @@ -1549,7 +1584,7 @@ struct nfs_commit_data { | |||
| 1549 | }; | 1584 | }; |
| 1550 | 1585 | ||
| 1551 | struct nfs_pgio_completion_ops { | 1586 | struct nfs_pgio_completion_ops { |
| 1552 | void (*error_cleanup)(struct list_head *head); | 1587 | void (*error_cleanup)(struct list_head *head, int); |
| 1553 | void (*init_hdr)(struct nfs_pgio_header *hdr); | 1588 | void (*init_hdr)(struct nfs_pgio_header *hdr); |
| 1554 | void (*completion)(struct nfs_pgio_header *hdr); | 1589 | void (*completion)(struct nfs_pgio_header *hdr); |
| 1555 | void (*reschedule_io)(struct nfs_pgio_header *hdr); | 1590 | void (*reschedule_io)(struct nfs_pgio_header *hdr); |
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h index 5a30ad594ccc..27e7fa36f707 100644 --- a/include/linux/nodemask.h +++ b/include/linux/nodemask.h | |||
| @@ -444,8 +444,8 @@ static inline int next_memory_node(int nid) | |||
| 444 | return next_node(nid, node_states[N_MEMORY]); | 444 | return next_node(nid, node_states[N_MEMORY]); |
| 445 | } | 445 | } |
| 446 | 446 | ||
| 447 | extern int nr_node_ids; | 447 | extern unsigned int nr_node_ids; |
| 448 | extern int nr_online_nodes; | 448 | extern unsigned int nr_online_nodes; |
| 449 | 449 | ||
| 450 | static inline void node_set_online(int nid) | 450 | static inline void node_set_online(int nid) |
| 451 | { | 451 | { |
| @@ -485,8 +485,8 @@ static inline int num_node_state(enum node_states state) | |||
| 485 | #define first_online_node 0 | 485 | #define first_online_node 0 |
| 486 | #define first_memory_node 0 | 486 | #define first_memory_node 0 |
| 487 | #define next_online_node(nid) (MAX_NUMNODES) | 487 | #define next_online_node(nid) (MAX_NUMNODES) |
| 488 | #define nr_node_ids 1 | 488 | #define nr_node_ids 1U |
| 489 | #define nr_online_nodes 1 | 489 | #define nr_online_nodes 1U |
| 490 | 490 | ||
| 491 | #define node_set_online(node) node_set_state((node), N_ONLINE) | 491 | #define node_set_online(node) node_set_state((node), N_ONLINE) |
| 492 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) | 492 | #define node_set_offline(node) node_clear_state((node), N_ONLINE) |
diff --git a/include/linux/ntb.h b/include/linux/ntb.h index 181d16601dd9..56a92e3ae3ae 100644 --- a/include/linux/ntb.h +++ b/include/linux/ntb.h | |||
| @@ -296,7 +296,8 @@ struct ntb_dev_ops { | |||
| 296 | int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); | 296 | int (*db_clear_mask)(struct ntb_dev *ntb, u64 db_bits); |
| 297 | 297 | ||
| 298 | int (*peer_db_addr)(struct ntb_dev *ntb, | 298 | int (*peer_db_addr)(struct ntb_dev *ntb, |
| 299 | phys_addr_t *db_addr, resource_size_t *db_size); | 299 | phys_addr_t *db_addr, resource_size_t *db_size, |
| 300 | u64 *db_data, int db_bit); | ||
| 300 | u64 (*peer_db_read)(struct ntb_dev *ntb); | 301 | u64 (*peer_db_read)(struct ntb_dev *ntb); |
| 301 | int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); | 302 | int (*peer_db_set)(struct ntb_dev *ntb, u64 db_bits); |
| 302 | int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); | 303 | int (*peer_db_clear)(struct ntb_dev *ntb, u64 db_bits); |
| @@ -1078,6 +1079,8 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | |||
| 1078 | * @ntb: NTB device context. | 1079 | * @ntb: NTB device context. |
| 1079 | * @db_addr: OUT - The address of the peer doorbell register. | 1080 | * @db_addr: OUT - The address of the peer doorbell register. |
| 1080 | * @db_size: OUT - The number of bytes to write the peer doorbell register. | 1081 | * @db_size: OUT - The number of bytes to write the peer doorbell register. |
| 1082 | * @db_data: OUT - The data of peer doorbell register | ||
| 1083 | * @db_bit: door bell bit number | ||
| 1081 | * | 1084 | * |
| 1082 | * Return the address of the peer doorbell register. This may be used, for | 1085 | * Return the address of the peer doorbell register. This may be used, for |
| 1083 | * example, by drivers that offload memory copy operations to a dma engine. | 1086 | * example, by drivers that offload memory copy operations to a dma engine. |
| @@ -1091,12 +1094,13 @@ static inline int ntb_db_clear_mask(struct ntb_dev *ntb, u64 db_bits) | |||
| 1091 | */ | 1094 | */ |
| 1092 | static inline int ntb_peer_db_addr(struct ntb_dev *ntb, | 1095 | static inline int ntb_peer_db_addr(struct ntb_dev *ntb, |
| 1093 | phys_addr_t *db_addr, | 1096 | phys_addr_t *db_addr, |
| 1094 | resource_size_t *db_size) | 1097 | resource_size_t *db_size, |
| 1098 | u64 *db_data, int db_bit) | ||
| 1095 | { | 1099 | { |
| 1096 | if (!ntb->ops->peer_db_addr) | 1100 | if (!ntb->ops->peer_db_addr) |
| 1097 | return -EINVAL; | 1101 | return -EINVAL; |
| 1098 | 1102 | ||
| 1099 | return ntb->ops->peer_db_addr(ntb, db_addr, db_size); | 1103 | return ntb->ops->peer_db_addr(ntb, db_addr, db_size, db_data, db_bit); |
| 1100 | } | 1104 | } |
| 1101 | 1105 | ||
| 1102 | /** | 1106 | /** |
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h index 91745cc3704c..2bb349035431 100644 --- a/include/linux/nvme-fc-driver.h +++ b/include/linux/nvme-fc-driver.h | |||
| @@ -1,14 +1,6 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Copyright (c) 2016, Avago Technologies | 3 | * Copyright (c) 2016, Avago Technologies |
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | */ | 4 | */ |
| 13 | 5 | ||
| 14 | #ifndef _NVME_FC_DRIVER_H | 6 | #ifndef _NVME_FC_DRIVER_H |
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h index 36cca93a5ff2..067c9fea64fe 100644 --- a/include/linux/nvme-fc.h +++ b/include/linux/nvme-fc.h | |||
| @@ -1,18 +1,6 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Copyright (c) 2016 Avago Technologies. All rights reserved. | 3 | * Copyright (c) 2016 Avago Technologies. All rights reserved. |
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of version 2 of the GNU General Public License as | ||
| 6 | * published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope that it will be useful. | ||
| 9 | * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, | ||
| 10 | * INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A | ||
| 11 | * PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE DISCLAIMED, EXCEPT TO | ||
| 12 | * THE EXTENT THAT SUCH DISCLAIMERS ARE HELD TO BE LEGALLY INVALID. | ||
| 13 | * See the GNU General Public License for more details, a copy of which | ||
| 14 | * can be found in the file COPYING included with this package | ||
| 15 | * | ||
| 16 | */ | 4 | */ |
| 17 | 5 | ||
| 18 | /* | 6 | /* |
diff --git a/include/linux/nvme-rdma.h b/include/linux/nvme-rdma.h index a72fd04aa5e1..3aa97b98dc89 100644 --- a/include/linux/nvme-rdma.h +++ b/include/linux/nvme-rdma.h | |||
| @@ -1,14 +1,6 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Copyright (c) 2015 Mellanox Technologies. All rights reserved. | 3 | * Copyright (c) 2015 Mellanox Technologies. All rights reserved. |
| 3 | * | ||
| 4 | * This program is free software; you can redistribute it and/or modify it | ||
| 5 | * under the terms and conditions of the GNU General Public License, | ||
| 6 | * version 2, as published by the Free Software Foundation. | ||
| 7 | * | ||
| 8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 11 | * more details. | ||
| 12 | */ | 4 | */ |
| 13 | 5 | ||
| 14 | #ifndef _LINUX_NVME_RDMA_H | 6 | #ifndef _LINUX_NVME_RDMA_H |
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h index 03d87c0550a9..959e0bd9a913 100644 --- a/include/linux/nvme-tcp.h +++ b/include/linux/nvme-tcp.h | |||
| @@ -1,4 +1,4 @@ | |||
| 1 | // SPDX-License-Identifier: GPL-2.0 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
| 2 | /* | 2 | /* |
| 3 | * NVMe over Fabrics TCP protocol header. | 3 | * NVMe over Fabrics TCP protocol header. |
| 4 | * Copyright (c) 2018 Lightbits Labs. All rights reserved. | 4 | * Copyright (c) 2018 Lightbits Labs. All rights reserved. |
diff --git a/include/linux/nvme.h b/include/linux/nvme.h index bbcc83886899..c40720cb59ac 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h | |||
| @@ -1,15 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Definitions for the NVM Express interface | 3 | * Definitions for the NVM Express interface |
| 3 | * Copyright (c) 2011-2014, Intel Corporation. | 4 | * Copyright (c) 2011-2014, Intel Corporation. |
| 4 | * | ||
| 5 | * This program is free software; you can redistribute it and/or modify it | ||
| 6 | * under the terms and conditions of the GNU General Public License, | ||
| 7 | * version 2, as published by the Free Software Foundation. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | ||
| 12 | * more details. | ||
| 13 | */ | 5 | */ |
| 14 | 6 | ||
| 15 | #ifndef _LINUX_NVME_H | 7 | #ifndef _LINUX_NVME_H |
| @@ -975,8 +967,13 @@ struct nvme_get_log_page_command { | |||
| 975 | __le16 numdl; | 967 | __le16 numdl; |
| 976 | __le16 numdu; | 968 | __le16 numdu; |
| 977 | __u16 rsvd11; | 969 | __u16 rsvd11; |
| 978 | __le32 lpol; | 970 | union { |
| 979 | __le32 lpou; | 971 | struct { |
| 972 | __le32 lpol; | ||
| 973 | __le32 lpou; | ||
| 974 | }; | ||
| 975 | __le64 lpo; | ||
| 976 | }; | ||
| 980 | __u32 rsvd14[2]; | 977 | __u32 rsvd14[2]; |
| 981 | }; | 978 | }; |
| 982 | 979 | ||
diff --git a/include/linux/nvram.h b/include/linux/nvram.h index 28bfb9ab94ca..d29d9c93a927 100644 --- a/include/linux/nvram.h +++ b/include/linux/nvram.h | |||
| @@ -2,13 +2,132 @@ | |||
| 2 | #ifndef _LINUX_NVRAM_H | 2 | #ifndef _LINUX_NVRAM_H |
| 3 | #define _LINUX_NVRAM_H | 3 | #define _LINUX_NVRAM_H |
| 4 | 4 | ||
| 5 | #include <linux/errno.h> | ||
| 5 | #include <uapi/linux/nvram.h> | 6 | #include <uapi/linux/nvram.h> |
| 6 | 7 | ||
| 7 | /* __foo is foo without grabbing the rtc_lock - get it yourself */ | 8 | #ifdef CONFIG_PPC |
| 8 | extern unsigned char __nvram_read_byte(int i); | 9 | #include <asm/machdep.h> |
| 9 | extern unsigned char nvram_read_byte(int i); | 10 | #endif |
| 10 | extern void __nvram_write_byte(unsigned char c, int i); | 11 | |
| 11 | extern void nvram_write_byte(unsigned char c, int i); | 12 | /** |
| 12 | extern int __nvram_check_checksum(void); | 13 | * struct nvram_ops - NVRAM functionality made available to drivers |
| 13 | extern int nvram_check_checksum(void); | 14 | * @read: validate checksum (if any) then load a range of bytes from NVRAM |
| 15 | * @write: store a range of bytes to NVRAM then update checksum (if any) | ||
| 16 | * @read_byte: load a single byte from NVRAM | ||
| 17 | * @write_byte: store a single byte to NVRAM | ||
| 18 | * @get_size: return the fixed number of bytes in the NVRAM | ||
| 19 | * | ||
| 20 | * Architectures which provide an nvram ops struct need not implement all | ||
| 21 | * of these methods. If the NVRAM hardware can be accessed only one byte | ||
| 22 | * at a time then it may be sufficient to provide .read_byte and .write_byte. | ||
| 23 | * If the NVRAM has a checksum (and it is to be checked) the .read and | ||
| 24 | * .write methods can be used to implement that efficiently. | ||
| 25 | * | ||
| 26 | * Portable drivers may use the wrapper functions defined here. | ||
| 27 | * The nvram_read() and nvram_write() functions call the .read and .write | ||
| 28 | * methods when available and fall back on the .read_byte and .write_byte | ||
| 29 | * methods otherwise. | ||
| 30 | */ | ||
| 31 | |||
| 32 | struct nvram_ops { | ||
| 33 | ssize_t (*get_size)(void); | ||
| 34 | unsigned char (*read_byte)(int); | ||
| 35 | void (*write_byte)(unsigned char, int); | ||
| 36 | ssize_t (*read)(char *, size_t, loff_t *); | ||
| 37 | ssize_t (*write)(char *, size_t, loff_t *); | ||
| 38 | #if defined(CONFIG_X86) || defined(CONFIG_M68K) | ||
| 39 | long (*initialize)(void); | ||
| 40 | long (*set_checksum)(void); | ||
| 41 | #endif | ||
| 42 | }; | ||
| 43 | |||
| 44 | extern const struct nvram_ops arch_nvram_ops; | ||
| 45 | |||
| 46 | static inline ssize_t nvram_get_size(void) | ||
| 47 | { | ||
| 48 | #ifdef CONFIG_PPC | ||
| 49 | if (ppc_md.nvram_size) | ||
| 50 | return ppc_md.nvram_size(); | ||
| 51 | #else | ||
| 52 | if (arch_nvram_ops.get_size) | ||
| 53 | return arch_nvram_ops.get_size(); | ||
| 54 | #endif | ||
| 55 | return -ENODEV; | ||
| 56 | } | ||
| 57 | |||
| 58 | static inline unsigned char nvram_read_byte(int addr) | ||
| 59 | { | ||
| 60 | #ifdef CONFIG_PPC | ||
| 61 | if (ppc_md.nvram_read_val) | ||
| 62 | return ppc_md.nvram_read_val(addr); | ||
| 63 | #else | ||
| 64 | if (arch_nvram_ops.read_byte) | ||
| 65 | return arch_nvram_ops.read_byte(addr); | ||
| 66 | #endif | ||
| 67 | return 0xFF; | ||
| 68 | } | ||
| 69 | |||
| 70 | static inline void nvram_write_byte(unsigned char val, int addr) | ||
| 71 | { | ||
| 72 | #ifdef CONFIG_PPC | ||
| 73 | if (ppc_md.nvram_write_val) | ||
| 74 | ppc_md.nvram_write_val(addr, val); | ||
| 75 | #else | ||
| 76 | if (arch_nvram_ops.write_byte) | ||
| 77 | arch_nvram_ops.write_byte(val, addr); | ||
| 78 | #endif | ||
| 79 | } | ||
| 80 | |||
| 81 | static inline ssize_t nvram_read_bytes(char *buf, size_t count, loff_t *ppos) | ||
| 82 | { | ||
| 83 | ssize_t nvram_size = nvram_get_size(); | ||
| 84 | loff_t i; | ||
| 85 | char *p = buf; | ||
| 86 | |||
| 87 | if (nvram_size < 0) | ||
| 88 | return nvram_size; | ||
| 89 | for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) | ||
| 90 | *p = nvram_read_byte(i); | ||
| 91 | *ppos = i; | ||
| 92 | return p - buf; | ||
| 93 | } | ||
| 94 | |||
| 95 | static inline ssize_t nvram_write_bytes(char *buf, size_t count, loff_t *ppos) | ||
| 96 | { | ||
| 97 | ssize_t nvram_size = nvram_get_size(); | ||
| 98 | loff_t i; | ||
| 99 | char *p = buf; | ||
| 100 | |||
| 101 | if (nvram_size < 0) | ||
| 102 | return nvram_size; | ||
| 103 | for (i = *ppos; count > 0 && i < nvram_size; ++i, ++p, --count) | ||
| 104 | nvram_write_byte(*p, i); | ||
| 105 | *ppos = i; | ||
| 106 | return p - buf; | ||
| 107 | } | ||
| 108 | |||
| 109 | static inline ssize_t nvram_read(char *buf, size_t count, loff_t *ppos) | ||
| 110 | { | ||
| 111 | #ifdef CONFIG_PPC | ||
| 112 | if (ppc_md.nvram_read) | ||
| 113 | return ppc_md.nvram_read(buf, count, ppos); | ||
| 114 | #else | ||
| 115 | if (arch_nvram_ops.read) | ||
| 116 | return arch_nvram_ops.read(buf, count, ppos); | ||
| 117 | #endif | ||
| 118 | return nvram_read_bytes(buf, count, ppos); | ||
| 119 | } | ||
| 120 | |||
| 121 | static inline ssize_t nvram_write(char *buf, size_t count, loff_t *ppos) | ||
| 122 | { | ||
| 123 | #ifdef CONFIG_PPC | ||
| 124 | if (ppc_md.nvram_write) | ||
| 125 | return ppc_md.nvram_write(buf, count, ppos); | ||
| 126 | #else | ||
| 127 | if (arch_nvram_ops.write) | ||
| 128 | return arch_nvram_ops.write(buf, count, ppos); | ||
| 129 | #endif | ||
| 130 | return nvram_write_bytes(buf, count, ppos); | ||
| 131 | } | ||
| 132 | |||
| 14 | #endif /* _LINUX_NVRAM_H */ | 133 | #endif /* _LINUX_NVRAM_H */ |
diff --git a/include/linux/objagg.h b/include/linux/objagg.h index 34f38c186ea0..78021777df46 100644 --- a/include/linux/objagg.h +++ b/include/linux/objagg.h | |||
| @@ -6,14 +6,19 @@ | |||
| 6 | 6 | ||
| 7 | struct objagg_ops { | 7 | struct objagg_ops { |
| 8 | size_t obj_size; | 8 | size_t obj_size; |
| 9 | bool (*delta_check)(void *priv, const void *parent_obj, | ||
| 10 | const void *obj); | ||
| 11 | int (*hints_obj_cmp)(const void *obj1, const void *obj2); | ||
| 9 | void * (*delta_create)(void *priv, void *parent_obj, void *obj); | 12 | void * (*delta_create)(void *priv, void *parent_obj, void *obj); |
| 10 | void (*delta_destroy)(void *priv, void *delta_priv); | 13 | void (*delta_destroy)(void *priv, void *delta_priv); |
| 11 | void * (*root_create)(void *priv, void *obj); | 14 | void * (*root_create)(void *priv, void *obj, unsigned int root_id); |
| 15 | #define OBJAGG_OBJ_ROOT_ID_INVALID UINT_MAX | ||
| 12 | void (*root_destroy)(void *priv, void *root_priv); | 16 | void (*root_destroy)(void *priv, void *root_priv); |
| 13 | }; | 17 | }; |
| 14 | 18 | ||
| 15 | struct objagg; | 19 | struct objagg; |
| 16 | struct objagg_obj; | 20 | struct objagg_obj; |
| 21 | struct objagg_hints; | ||
| 17 | 22 | ||
| 18 | const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj); | 23 | const void *objagg_obj_root_priv(const struct objagg_obj *objagg_obj); |
| 19 | const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj); | 24 | const void *objagg_obj_delta_priv(const struct objagg_obj *objagg_obj); |
| @@ -21,7 +26,8 @@ const void *objagg_obj_raw(const struct objagg_obj *objagg_obj); | |||
| 21 | 26 | ||
| 22 | struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj); | 27 | struct objagg_obj *objagg_obj_get(struct objagg *objagg, void *obj); |
| 23 | void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); | 28 | void objagg_obj_put(struct objagg *objagg, struct objagg_obj *objagg_obj); |
| 24 | struct objagg *objagg_create(const struct objagg_ops *ops, void *priv); | 29 | struct objagg *objagg_create(const struct objagg_ops *ops, |
| 30 | struct objagg_hints *hints, void *priv); | ||
| 25 | void objagg_destroy(struct objagg *objagg); | 31 | void objagg_destroy(struct objagg *objagg); |
| 26 | 32 | ||
| 27 | struct objagg_obj_stats { | 33 | struct objagg_obj_stats { |
| @@ -36,6 +42,7 @@ struct objagg_obj_stats_info { | |||
| 36 | }; | 42 | }; |
| 37 | 43 | ||
| 38 | struct objagg_stats { | 44 | struct objagg_stats { |
| 45 | unsigned int root_count; | ||
| 39 | unsigned int stats_info_count; | 46 | unsigned int stats_info_count; |
| 40 | struct objagg_obj_stats_info stats_info[]; | 47 | struct objagg_obj_stats_info stats_info[]; |
| 41 | }; | 48 | }; |
| @@ -43,4 +50,14 @@ struct objagg_stats { | |||
| 43 | const struct objagg_stats *objagg_stats_get(struct objagg *objagg); | 50 | const struct objagg_stats *objagg_stats_get(struct objagg *objagg); |
| 44 | void objagg_stats_put(const struct objagg_stats *objagg_stats); | 51 | void objagg_stats_put(const struct objagg_stats *objagg_stats); |
| 45 | 52 | ||
| 53 | enum objagg_opt_algo_type { | ||
| 54 | OBJAGG_OPT_ALGO_SIMPLE_GREEDY, | ||
| 55 | }; | ||
| 56 | |||
| 57 | struct objagg_hints *objagg_hints_get(struct objagg *objagg, | ||
| 58 | enum objagg_opt_algo_type opt_algo_type); | ||
| 59 | void objagg_hints_put(struct objagg_hints *objagg_hints); | ||
| 60 | const struct objagg_stats * | ||
| 61 | objagg_hints_stats_get(struct objagg_hints *objagg_hints); | ||
| 62 | |||
| 46 | #endif | 63 | #endif |
diff --git a/include/linux/of.h b/include/linux/of.h index fe472e5195a9..e240992e5cb6 100644 --- a/include/linux/of.h +++ b/include/linux/of.h | |||
| @@ -50,7 +50,6 @@ struct of_irq_controller; | |||
| 50 | 50 | ||
| 51 | struct device_node { | 51 | struct device_node { |
| 52 | const char *name; | 52 | const char *name; |
| 53 | const char *type; | ||
| 54 | phandle phandle; | 53 | phandle phandle; |
| 55 | const char *full_name; | 54 | const char *full_name; |
| 56 | struct fwnode_handle fwnode; | 55 | struct fwnode_handle fwnode; |
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h index 163b79ecd01a..f9737dea9d1f 100644 --- a/include/linux/of_gpio.h +++ b/include/linux/of_gpio.h | |||
| @@ -28,6 +28,8 @@ enum of_gpio_flags { | |||
| 28 | OF_GPIO_SINGLE_ENDED = 0x2, | 28 | OF_GPIO_SINGLE_ENDED = 0x2, |
| 29 | OF_GPIO_OPEN_DRAIN = 0x4, | 29 | OF_GPIO_OPEN_DRAIN = 0x4, |
| 30 | OF_GPIO_TRANSITORY = 0x8, | 30 | OF_GPIO_TRANSITORY = 0x8, |
| 31 | OF_GPIO_PULL_UP = 0x10, | ||
| 32 | OF_GPIO_PULL_DOWN = 0x20, | ||
| 31 | }; | 33 | }; |
| 32 | 34 | ||
| 33 | #ifdef CONFIG_OF_GPIO | 35 | #ifdef CONFIG_OF_GPIO |
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h index 67ab8d271df3..60f541912ccf 100644 --- a/include/linux/of_reserved_mem.h +++ b/include/linux/of_reserved_mem.h | |||
| @@ -35,13 +35,6 @@ int of_reserved_mem_device_init_by_idx(struct device *dev, | |||
| 35 | struct device_node *np, int idx); | 35 | struct device_node *np, int idx); |
| 36 | void of_reserved_mem_device_release(struct device *dev); | 36 | void of_reserved_mem_device_release(struct device *dev); |
| 37 | 37 | ||
| 38 | int early_init_dt_alloc_reserved_memory_arch(phys_addr_t size, | ||
| 39 | phys_addr_t align, | ||
| 40 | phys_addr_t start, | ||
| 41 | phys_addr_t end, | ||
| 42 | bool nomap, | ||
| 43 | phys_addr_t *res_base); | ||
| 44 | |||
| 45 | void fdt_init_reserved_mem(void); | 38 | void fdt_init_reserved_mem(void); |
| 46 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, | 39 | void fdt_reserved_mem_save_node(unsigned long node, const char *uname, |
| 47 | phys_addr_t base, phys_addr_t size); | 40 | phys_addr_t base, phys_addr_t size); |
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h index 39b4494e29f1..9f8712a4b1a5 100644 --- a/include/linux/page-flags.h +++ b/include/linux/page-flags.h | |||
| @@ -17,8 +17,37 @@ | |||
| 17 | /* | 17 | /* |
| 18 | * Various page->flags bits: | 18 | * Various page->flags bits: |
| 19 | * | 19 | * |
| 20 | * PG_reserved is set for special pages, which can never be swapped out. Some | 20 | * PG_reserved is set for special pages. The "struct page" of such a page |
| 21 | * of them might not even exist... | 21 | * should in general not be touched (e.g. set dirty) except by its owner. |
| 22 | * Pages marked as PG_reserved include: | ||
| 23 | * - Pages part of the kernel image (including vDSO) and similar (e.g. BIOS, | ||
| 24 | * initrd, HW tables) | ||
| 25 | * - Pages reserved or allocated early during boot (before the page allocator | ||
| 26 | * was initialized). This includes (depending on the architecture) the | ||
| 27 | * initial vmemmap, initial page tables, crashkernel, elfcorehdr, and much | ||
| 28 | * much more. Once (if ever) freed, PG_reserved is cleared and they will | ||
| 29 | * be given to the page allocator. | ||
| 30 | * - Pages falling into physical memory gaps - not IORESOURCE_SYSRAM. Trying | ||
| 31 | * to read/write these pages might end badly. Don't touch! | ||
| 32 | * - The zero page(s) | ||
| 33 | * - Pages not added to the page allocator when onlining a section because | ||
| 34 | * they were excluded via the online_page_callback() or because they are | ||
| 35 | * PG_hwpoison. | ||
| 36 | * - Pages allocated in the context of kexec/kdump (loaded kernel image, | ||
| 37 | * control pages, vmcoreinfo) | ||
| 38 | * - MMIO/DMA pages. Some architectures don't allow to ioremap pages that are | ||
| 39 | * not marked PG_reserved (as they might be in use by somebody else who does | ||
| 40 | * not respect the caching strategy). | ||
| 41 | * - Pages part of an offline section (struct pages of offline sections should | ||
| 42 | * not be trusted as they will be initialized when first onlined). | ||
| 43 | * - MCA pages on ia64 | ||
| 44 | * - Pages holding CPU notes for POWER Firmware Assisted Dump | ||
| 45 | * - Device memory (e.g. PMEM, DAX, HMM) | ||
| 46 | * Some PG_reserved pages will be excluded from the hibernation image. | ||
| 47 | * PG_reserved does in general not hinder anybody from dumping or swapping | ||
| 48 | * and is no longer required for remap_pfn_range(). ioremap might require it. | ||
| 49 | * Consequently, PG_reserved for a page mapped into user space can indicate | ||
| 50 | * the zero page, the vDSO, MMIO pages or device memory. | ||
| 22 | * | 51 | * |
| 23 | * The PG_private bitflag is set on pagecache pages if they contain filesystem | 52 | * The PG_private bitflag is set on pagecache pages if they contain filesystem |
| 24 | * specific data (which is normally at page->private). It can be used by | 53 | * specific data (which is normally at page->private). It can be used by |
| @@ -671,7 +700,7 @@ PAGEFLAG_FALSE(DoubleMap) | |||
| 671 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ | 700 | /* Reserve 0x0000007f to catch underflows of page_mapcount */ |
| 672 | #define PAGE_MAPCOUNT_RESERVE -128 | 701 | #define PAGE_MAPCOUNT_RESERVE -128 |
| 673 | #define PG_buddy 0x00000080 | 702 | #define PG_buddy 0x00000080 |
| 674 | #define PG_balloon 0x00000100 | 703 | #define PG_offline 0x00000100 |
| 675 | #define PG_kmemcg 0x00000200 | 704 | #define PG_kmemcg 0x00000200 |
| 676 | #define PG_table 0x00000400 | 705 | #define PG_table 0x00000400 |
| 677 | 706 | ||
| @@ -706,10 +735,13 @@ static __always_inline void __ClearPage##uname(struct page *page) \ | |||
| 706 | PAGE_TYPE_OPS(Buddy, buddy) | 735 | PAGE_TYPE_OPS(Buddy, buddy) |
| 707 | 736 | ||
| 708 | /* | 737 | /* |
| 709 | * PageBalloon() is true for pages that are on the balloon page list | 738 | * PageOffline() indicates that the page is logically offline although the |
| 710 | * (see mm/balloon_compaction.c). | 739 | * containing section is online. (e.g. inflated in a balloon driver or |
| 740 | * not onlined when onlining the section). | ||
| 741 | * The content of these pages is effectively stale. Such pages should not | ||
| 742 | * be touched (read/write/dump/save) except by their owner. | ||
| 711 | */ | 743 | */ |
| 712 | PAGE_TYPE_OPS(Balloon, balloon) | 744 | PAGE_TYPE_OPS(Offline, offline) |
| 713 | 745 | ||
| 714 | /* | 746 | /* |
| 715 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on | 747 | * If kmemcg is enabled, the buddy allocator will set PageKmemcg() on |
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h index 4eb26d278046..280ae96dc4c3 100644 --- a/include/linux/page-isolation.h +++ b/include/linux/page-isolation.h | |||
| @@ -41,16 +41,6 @@ int move_freepages_block(struct zone *zone, struct page *page, | |||
| 41 | 41 | ||
| 42 | /* | 42 | /* |
| 43 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. | 43 | * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. |
| 44 | * If specified range includes migrate types other than MOVABLE or CMA, | ||
| 45 | * this will fail with -EBUSY. | ||
| 46 | * | ||
| 47 | * For isolating all pages in the range finally, the caller have to | ||
| 48 | * free all pages in the range. test_page_isolated() can be used for | ||
| 49 | * test it. | ||
| 50 | * | ||
| 51 | * The following flags are allowed (they can be combined in a bit mask) | ||
| 52 | * SKIP_HWPOISON - ignore hwpoison pages | ||
| 53 | * REPORT_FAILURE - report details about the failure to isolate the range | ||
| 54 | */ | 44 | */ |
| 55 | int | 45 | int |
| 56 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, | 46 | start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, |
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e2d7039af6a3..bcf909d0de5f 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h | |||
| @@ -164,7 +164,7 @@ void release_pages(struct page **pages, int nr); | |||
| 164 | * will find the page or it will not. Likewise, the old find_get_page could run | 164 | * will find the page or it will not. Likewise, the old find_get_page could run |
| 165 | * either before the insertion or afterwards, depending on timing. | 165 | * either before the insertion or afterwards, depending on timing. |
| 166 | */ | 166 | */ |
| 167 | static inline int page_cache_get_speculative(struct page *page) | 167 | static inline int __page_cache_add_speculative(struct page *page, int count) |
| 168 | { | 168 | { |
| 169 | #ifdef CONFIG_TINY_RCU | 169 | #ifdef CONFIG_TINY_RCU |
| 170 | # ifdef CONFIG_PREEMPT_COUNT | 170 | # ifdef CONFIG_PREEMPT_COUNT |
| @@ -180,10 +180,10 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 180 | * SMP requires. | 180 | * SMP requires. |
| 181 | */ | 181 | */ |
| 182 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | 182 | VM_BUG_ON_PAGE(page_count(page) == 0, page); |
| 183 | page_ref_inc(page); | 183 | page_ref_add(page, count); |
| 184 | 184 | ||
| 185 | #else | 185 | #else |
| 186 | if (unlikely(!get_page_unless_zero(page))) { | 186 | if (unlikely(!page_ref_add_unless(page, count, 0))) { |
| 187 | /* | 187 | /* |
| 188 | * Either the page has been freed, or will be freed. | 188 | * Either the page has been freed, or will be freed. |
| 189 | * In either case, retry here and the caller should | 189 | * In either case, retry here and the caller should |
| @@ -197,27 +197,14 @@ static inline int page_cache_get_speculative(struct page *page) | |||
| 197 | return 1; | 197 | return 1; |
| 198 | } | 198 | } |
| 199 | 199 | ||
| 200 | /* | 200 | static inline int page_cache_get_speculative(struct page *page) |
| 201 | * Same as above, but add instead of inc (could just be merged) | ||
| 202 | */ | ||
| 203 | static inline int page_cache_add_speculative(struct page *page, int count) | ||
| 204 | { | 201 | { |
| 205 | VM_BUG_ON(in_interrupt()); | 202 | return __page_cache_add_speculative(page, 1); |
| 206 | 203 | } | |
| 207 | #if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) | ||
| 208 | # ifdef CONFIG_PREEMPT_COUNT | ||
| 209 | VM_BUG_ON(!in_atomic() && !irqs_disabled()); | ||
| 210 | # endif | ||
| 211 | VM_BUG_ON_PAGE(page_count(page) == 0, page); | ||
| 212 | page_ref_add(page, count); | ||
| 213 | |||
| 214 | #else | ||
| 215 | if (unlikely(!page_ref_add_unless(page, count, 0))) | ||
| 216 | return 0; | ||
| 217 | #endif | ||
| 218 | VM_BUG_ON_PAGE(PageCompound(page) && page != compound_head(page), page); | ||
| 219 | 204 | ||
| 220 | return 1; | 205 | static inline int page_cache_add_speculative(struct page *page, int count) |
| 206 | { | ||
| 207 | return __page_cache_add_speculative(page, count); | ||
| 221 | } | 208 | } |
| 222 | 209 | ||
| 223 | #ifdef CONFIG_NUMA | 210 | #ifdef CONFIG_NUMA |
| @@ -252,6 +239,7 @@ pgoff_t page_cache_prev_miss(struct address_space *mapping, | |||
| 252 | #define FGP_WRITE 0x00000008 | 239 | #define FGP_WRITE 0x00000008 |
| 253 | #define FGP_NOFS 0x00000010 | 240 | #define FGP_NOFS 0x00000010 |
| 254 | #define FGP_NOWAIT 0x00000020 | 241 | #define FGP_NOWAIT 0x00000020 |
| 242 | #define FGP_FOR_MMAP 0x00000040 | ||
| 255 | 243 | ||
| 256 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, | 244 | struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset, |
| 257 | int fgp_flags, gfp_t cache_gfp_mask); | 245 | int fgp_flags, gfp_t cache_gfp_mask); |
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h index 7c4b8e27268c..1ebb88e7c184 100644 --- a/include/linux/pci-ats.h +++ b/include/linux/pci-ats.h | |||
| @@ -40,6 +40,7 @@ void pci_disable_pasid(struct pci_dev *pdev); | |||
| 40 | void pci_restore_pasid_state(struct pci_dev *pdev); | 40 | void pci_restore_pasid_state(struct pci_dev *pdev); |
| 41 | int pci_pasid_features(struct pci_dev *pdev); | 41 | int pci_pasid_features(struct pci_dev *pdev); |
| 42 | int pci_max_pasids(struct pci_dev *pdev); | 42 | int pci_max_pasids(struct pci_dev *pdev); |
| 43 | int pci_prg_resp_pasid_required(struct pci_dev *pdev); | ||
| 43 | 44 | ||
| 44 | #else /* CONFIG_PCI_PASID */ | 45 | #else /* CONFIG_PCI_PASID */ |
| 45 | 46 | ||
| @@ -66,6 +67,10 @@ static inline int pci_max_pasids(struct pci_dev *pdev) | |||
| 66 | return -EINVAL; | 67 | return -EINVAL; |
| 67 | } | 68 | } |
| 68 | 69 | ||
| 70 | static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev) | ||
| 71 | { | ||
| 72 | return 0; | ||
| 73 | } | ||
| 69 | #endif /* CONFIG_PCI_PASID */ | 74 | #endif /* CONFIG_PCI_PASID */ |
| 70 | 75 | ||
| 71 | 76 | ||
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h index cb1adf0b78a9..249d4d7fbf18 100644 --- a/include/linux/pci-dma-compat.h +++ b/include/linux/pci-dma-compat.h | |||
| @@ -24,7 +24,7 @@ static inline void * | |||
| 24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, | 24 | pci_zalloc_consistent(struct pci_dev *hwdev, size_t size, |
| 25 | dma_addr_t *dma_handle) | 25 | dma_addr_t *dma_handle) |
| 26 | { | 26 | { |
| 27 | return dma_zalloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); | 27 | return dma_alloc_coherent(&hwdev->dev, size, dma_handle, GFP_ATOMIC); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static inline void | 30 | static inline void |
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h index 37dab8116901..c3ffa3917f88 100644 --- a/include/linux/pci-epc.h +++ b/include/linux/pci-epc.h | |||
| @@ -59,6 +59,8 @@ struct pci_epc_ops { | |||
| 59 | enum pci_epc_irq_type type, u16 interrupt_num); | 59 | enum pci_epc_irq_type type, u16 interrupt_num); |
| 60 | int (*start)(struct pci_epc *epc); | 60 | int (*start)(struct pci_epc *epc); |
| 61 | void (*stop)(struct pci_epc *epc); | 61 | void (*stop)(struct pci_epc *epc); |
| 62 | const struct pci_epc_features* (*get_features)(struct pci_epc *epc, | ||
| 63 | u8 func_no); | ||
| 62 | struct module *owner; | 64 | struct module *owner; |
| 63 | }; | 65 | }; |
| 64 | 66 | ||
| @@ -97,16 +99,25 @@ struct pci_epc { | |||
| 97 | struct config_group *group; | 99 | struct config_group *group; |
| 98 | /* spinlock to protect against concurrent access of EP controller */ | 100 | /* spinlock to protect against concurrent access of EP controller */ |
| 99 | spinlock_t lock; | 101 | spinlock_t lock; |
| 100 | unsigned int features; | ||
| 101 | }; | 102 | }; |
| 102 | 103 | ||
| 103 | #define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0) | 104 | /** |
| 104 | #define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3)) | 105 | * struct pci_epc_features - features supported by a EPC device per function |
| 105 | #define EPC_FEATURE_MSIX_AVAILABLE BIT(4) | 106 | * @linkup_notifier: indicate if the EPC device can notify EPF driver on link up |
| 106 | #define EPC_FEATURE_SET_BAR(features, bar) \ | 107 | * @msi_capable: indicate if the endpoint function has MSI capability |
| 107 | (features |= (EPC_FEATURE_BAR_MASK & (bar << 1))) | 108 | * @msix_capable: indicate if the endpoint function has MSI-X capability |
| 108 | #define EPC_FEATURE_GET_BAR(features) \ | 109 | * @reserved_bar: bitmap to indicate reserved BAR unavailable to function driver |
| 109 | ((features & EPC_FEATURE_BAR_MASK) >> 1) | 110 | * @bar_fixed_64bit: bitmap to indicate fixed 64bit BARs |
| 111 | * @bar_fixed_size: Array specifying the size supported by each BAR | ||
| 112 | */ | ||
| 113 | struct pci_epc_features { | ||
| 114 | unsigned int linkup_notifier : 1; | ||
| 115 | unsigned int msi_capable : 1; | ||
| 116 | unsigned int msix_capable : 1; | ||
| 117 | u8 reserved_bar; | ||
| 118 | u8 bar_fixed_64bit; | ||
| 119 | u64 bar_fixed_size[BAR_5 + 1]; | ||
| 120 | }; | ||
| 110 | 121 | ||
| 111 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) | 122 | #define to_pci_epc(device) container_of((device), struct pci_epc, dev) |
| 112 | 123 | ||
| @@ -158,6 +169,10 @@ int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, | |||
| 158 | enum pci_epc_irq_type type, u16 interrupt_num); | 169 | enum pci_epc_irq_type type, u16 interrupt_num); |
| 159 | int pci_epc_start(struct pci_epc *epc); | 170 | int pci_epc_start(struct pci_epc *epc); |
| 160 | void pci_epc_stop(struct pci_epc *epc); | 171 | void pci_epc_stop(struct pci_epc *epc); |
| 172 | const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, | ||
| 173 | u8 func_no); | ||
| 174 | unsigned int pci_epc_get_first_free_bar(const struct pci_epc_features | ||
| 175 | *epc_features); | ||
| 161 | struct pci_epc *pci_epc_get(const char *epc_name); | 176 | struct pci_epc *pci_epc_get(const char *epc_name); |
| 162 | void pci_epc_put(struct pci_epc *epc); | 177 | void pci_epc_put(struct pci_epc *epc); |
| 163 | 178 | ||
diff --git a/include/linux/pci.h b/include/linux/pci.h index 65f1d8c2f082..77448215ef5b 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h | |||
| @@ -373,6 +373,9 @@ struct pci_dev { | |||
| 373 | bool match_driver; /* Skip attaching driver */ | 373 | bool match_driver; /* Skip attaching driver */ |
| 374 | 374 | ||
| 375 | unsigned int transparent:1; /* Subtractive decode bridge */ | 375 | unsigned int transparent:1; /* Subtractive decode bridge */ |
| 376 | unsigned int io_window:1; /* Bridge has I/O window */ | ||
| 377 | unsigned int pref_window:1; /* Bridge has pref mem window */ | ||
| 378 | unsigned int pref_64_window:1; /* Pref mem window is 64-bit */ | ||
| 376 | unsigned int multifunction:1; /* Multi-function device */ | 379 | unsigned int multifunction:1; /* Multi-function device */ |
| 377 | 380 | ||
| 378 | unsigned int is_busmaster:1; /* Is busmaster */ | 381 | unsigned int is_busmaster:1; /* Is busmaster */ |
| @@ -1393,7 +1396,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, | |||
| 1393 | } | 1396 | } |
| 1394 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | 1397 | int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1395 | unsigned int max_vecs, unsigned int flags, | 1398 | unsigned int max_vecs, unsigned int flags, |
| 1396 | const struct irq_affinity *affd); | 1399 | struct irq_affinity *affd); |
| 1397 | 1400 | ||
| 1398 | void pci_free_irq_vectors(struct pci_dev *dev); | 1401 | void pci_free_irq_vectors(struct pci_dev *dev); |
| 1399 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); | 1402 | int pci_irq_vector(struct pci_dev *dev, unsigned int nr); |
| @@ -1419,7 +1422,7 @@ static inline int pci_enable_msix_exact(struct pci_dev *dev, | |||
| 1419 | static inline int | 1422 | static inline int |
| 1420 | pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, | 1423 | pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs, |
| 1421 | unsigned int max_vecs, unsigned int flags, | 1424 | unsigned int max_vecs, unsigned int flags, |
| 1422 | const struct irq_affinity *aff_desc) | 1425 | struct irq_affinity *aff_desc) |
| 1423 | { | 1426 | { |
| 1424 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) | 1427 | if ((flags & PCI_IRQ_LEGACY) && min_vecs == 1 && dev->irq) |
| 1425 | return 1; | 1428 | return 1; |
| @@ -1524,11 +1527,13 @@ void pci_ats_init(struct pci_dev *dev); | |||
| 1524 | int pci_enable_ats(struct pci_dev *dev, int ps); | 1527 | int pci_enable_ats(struct pci_dev *dev, int ps); |
| 1525 | void pci_disable_ats(struct pci_dev *dev); | 1528 | void pci_disable_ats(struct pci_dev *dev); |
| 1526 | int pci_ats_queue_depth(struct pci_dev *dev); | 1529 | int pci_ats_queue_depth(struct pci_dev *dev); |
| 1530 | int pci_ats_page_aligned(struct pci_dev *dev); | ||
| 1527 | #else | 1531 | #else |
| 1528 | static inline void pci_ats_init(struct pci_dev *d) { } | 1532 | static inline void pci_ats_init(struct pci_dev *d) { } |
| 1529 | static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } | 1533 | static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; } |
| 1530 | static inline void pci_disable_ats(struct pci_dev *d) { } | 1534 | static inline void pci_disable_ats(struct pci_dev *d) { } |
| 1531 | static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } | 1535 | static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; } |
| 1536 | static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; } | ||
| 1532 | #endif | 1537 | #endif |
| 1533 | 1538 | ||
| 1534 | #ifdef CONFIG_PCIE_PTM | 1539 | #ifdef CONFIG_PCIE_PTM |
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h index 5eaf39dbc388..70e86148cb1e 100644 --- a/include/linux/pci_ids.h +++ b/include/linux/pci_ids.h | |||
| @@ -1140,6 +1140,8 @@ | |||
| 1140 | #define PCI_VENDOR_ID_TCONRAD 0x10da | 1140 | #define PCI_VENDOR_ID_TCONRAD 0x10da |
| 1141 | #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 | 1141 | #define PCI_DEVICE_ID_TCONRAD_TOKENRING 0x0508 |
| 1142 | 1142 | ||
| 1143 | #define PCI_VENDOR_ID_ROHM 0x10db | ||
| 1144 | |||
| 1143 | #define PCI_VENDOR_ID_NVIDIA 0x10de | 1145 | #define PCI_VENDOR_ID_NVIDIA 0x10de |
| 1144 | #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 | 1146 | #define PCI_DEVICE_ID_NVIDIA_TNT 0x0020 |
| 1145 | #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 | 1147 | #define PCI_DEVICE_ID_NVIDIA_TNT2 0x0028 |
| @@ -2573,6 +2575,8 @@ | |||
| 2573 | 2575 | ||
| 2574 | #define PCI_VENDOR_ID_HYGON 0x1d94 | 2576 | #define PCI_VENDOR_ID_HYGON 0x1d94 |
| 2575 | 2577 | ||
| 2578 | #define PCI_VENDOR_ID_HXT 0x1dbf | ||
| 2579 | |||
| 2576 | #define PCI_VENDOR_ID_TEKRAM 0x1de1 | 2580 | #define PCI_VENDOR_ID_TEKRAM 0x1de1 |
| 2577 | #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 | 2581 | #define PCI_DEVICE_ID_TEKRAM_DC290 0xdc29 |
| 2578 | 2582 | ||
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h index 71b75643c432..03cb4b6f842e 100644 --- a/include/linux/percpu-rwsem.h +++ b/include/linux/percpu-rwsem.h | |||
| @@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \ | |||
| 29 | extern int __percpu_down_read(struct percpu_rw_semaphore *, int); | 29 | extern int __percpu_down_read(struct percpu_rw_semaphore *, int); |
| 30 | extern void __percpu_up_read(struct percpu_rw_semaphore *); | 30 | extern void __percpu_up_read(struct percpu_rw_semaphore *); |
| 31 | 31 | ||
| 32 | static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) | 32 | static inline void percpu_down_read(struct percpu_rw_semaphore *sem) |
| 33 | { | 33 | { |
| 34 | might_sleep(); | 34 | might_sleep(); |
| 35 | 35 | ||
| @@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore * | |||
| 47 | __this_cpu_inc(*sem->read_count); | 47 | __this_cpu_inc(*sem->read_count); |
| 48 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) | 48 | if (unlikely(!rcu_sync_is_idle(&sem->rss))) |
| 49 | __percpu_down_read(sem, false); /* Unconditional memory barrier */ | 49 | __percpu_down_read(sem, false); /* Unconditional memory barrier */ |
| 50 | barrier(); | ||
| 51 | /* | 50 | /* |
| 52 | * The barrier() prevents the compiler from | 51 | * The preempt_enable() prevents the compiler from |
| 53 | * bleeding the critical section out. | 52 | * bleeding the critical section out. |
| 54 | */ | 53 | */ |
| 55 | } | ||
| 56 | |||
| 57 | static inline void percpu_down_read(struct percpu_rw_semaphore *sem) | ||
| 58 | { | ||
| 59 | percpu_down_read_preempt_disable(sem); | ||
| 60 | preempt_enable(); | 54 | preempt_enable(); |
| 61 | } | 55 | } |
| 62 | 56 | ||
| @@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem) | |||
| 83 | return ret; | 77 | return ret; |
| 84 | } | 78 | } |
| 85 | 79 | ||
| 86 | static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) | 80 | static inline void percpu_up_read(struct percpu_rw_semaphore *sem) |
| 87 | { | 81 | { |
| 88 | /* | 82 | preempt_disable(); |
| 89 | * The barrier() prevents the compiler from | ||
| 90 | * bleeding the critical section out. | ||
| 91 | */ | ||
| 92 | barrier(); | ||
| 93 | /* | 83 | /* |
| 94 | * Same as in percpu_down_read(). | 84 | * Same as in percpu_down_read(). |
| 95 | */ | 85 | */ |
| @@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem | |||
| 102 | rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); | 92 | rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); |
| 103 | } | 93 | } |
| 104 | 94 | ||
| 105 | static inline void percpu_up_read(struct percpu_rw_semaphore *sem) | ||
| 106 | { | ||
| 107 | preempt_disable(); | ||
| 108 | percpu_up_read_preempt_enable(sem); | ||
| 109 | } | ||
| 110 | |||
| 111 | extern void percpu_down_write(struct percpu_rw_semaphore *); | 95 | extern void percpu_down_write(struct percpu_rw_semaphore *); |
| 112 | extern void percpu_up_write(struct percpu_rw_semaphore *); | 96 | extern void percpu_up_write(struct percpu_rw_semaphore *); |
| 113 | 97 | ||
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 1d5c551a5add..1f678f023850 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -53,8 +53,8 @@ struct perf_guest_info_callbacks { | |||
| 53 | #include <linux/atomic.h> | 53 | #include <linux/atomic.h> |
| 54 | #include <linux/sysfs.h> | 54 | #include <linux/sysfs.h> |
| 55 | #include <linux/perf_regs.h> | 55 | #include <linux/perf_regs.h> |
| 56 | #include <linux/workqueue.h> | ||
| 57 | #include <linux/cgroup.h> | 56 | #include <linux/cgroup.h> |
| 57 | #include <linux/refcount.h> | ||
| 58 | #include <asm/local.h> | 58 | #include <asm/local.h> |
| 59 | 59 | ||
| 60 | struct perf_callchain_entry { | 60 | struct perf_callchain_entry { |
| @@ -240,10 +240,10 @@ struct perf_event; | |||
| 240 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 | 240 | #define PERF_PMU_CAP_NO_INTERRUPT 0x01 |
| 241 | #define PERF_PMU_CAP_NO_NMI 0x02 | 241 | #define PERF_PMU_CAP_NO_NMI 0x02 |
| 242 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 | 242 | #define PERF_PMU_CAP_AUX_NO_SG 0x04 |
| 243 | #define PERF_PMU_CAP_AUX_SW_DOUBLEBUF 0x08 | ||
| 244 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 | 243 | #define PERF_PMU_CAP_EXCLUSIVE 0x10 |
| 245 | #define PERF_PMU_CAP_ITRACE 0x20 | 244 | #define PERF_PMU_CAP_ITRACE 0x20 |
| 246 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 | 245 | #define PERF_PMU_CAP_HETEROGENEOUS_CPUS 0x40 |
| 246 | #define PERF_PMU_CAP_NO_EXCLUDE 0x80 | ||
| 247 | 247 | ||
| 248 | /** | 248 | /** |
| 249 | * struct pmu - generic performance monitoring unit | 249 | * struct pmu - generic performance monitoring unit |
| @@ -409,7 +409,7 @@ struct pmu { | |||
| 409 | /* | 409 | /* |
| 410 | * Set up pmu-private data structures for an AUX area | 410 | * Set up pmu-private data structures for an AUX area |
| 411 | */ | 411 | */ |
| 412 | void *(*setup_aux) (int cpu, void **pages, | 412 | void *(*setup_aux) (struct perf_event *event, void **pages, |
| 413 | int nr_pages, bool overwrite); | 413 | int nr_pages, bool overwrite); |
| 414 | /* optional */ | 414 | /* optional */ |
| 415 | 415 | ||
| @@ -447,6 +447,11 @@ struct pmu { | |||
| 447 | * Filter events for PMU-specific reasons. | 447 | * Filter events for PMU-specific reasons. |
| 448 | */ | 448 | */ |
| 449 | int (*filter_match) (struct perf_event *event); /* optional */ | 449 | int (*filter_match) (struct perf_event *event); /* optional */ |
| 450 | |||
| 451 | /* | ||
| 452 | * Check period value for PERF_EVENT_IOC_PERIOD ioctl. | ||
| 453 | */ | ||
| 454 | int (*check_period) (struct perf_event *event, u64 value); /* optional */ | ||
| 450 | }; | 455 | }; |
| 451 | 456 | ||
| 452 | enum perf_addr_filter_action_t { | 457 | enum perf_addr_filter_action_t { |
| @@ -489,6 +494,11 @@ struct perf_addr_filters_head { | |||
| 489 | unsigned int nr_file_filters; | 494 | unsigned int nr_file_filters; |
| 490 | }; | 495 | }; |
| 491 | 496 | ||
| 497 | struct perf_addr_filter_range { | ||
| 498 | unsigned long start; | ||
| 499 | unsigned long size; | ||
| 500 | }; | ||
| 501 | |||
| 492 | /** | 502 | /** |
| 493 | * enum perf_event_state - the states of an event: | 503 | * enum perf_event_state - the states of an event: |
| 494 | */ | 504 | */ |
| @@ -665,7 +675,7 @@ struct perf_event { | |||
| 665 | /* address range filters */ | 675 | /* address range filters */ |
| 666 | struct perf_addr_filters_head addr_filters; | 676 | struct perf_addr_filters_head addr_filters; |
| 667 | /* vma address array for file-based filders */ | 677 | /* vma address array for file-based filders */ |
| 668 | unsigned long *addr_filters_offs; | 678 | struct perf_addr_filter_range *addr_filter_ranges; |
| 669 | unsigned long addr_filters_gen; | 679 | unsigned long addr_filters_gen; |
| 670 | 680 | ||
| 671 | void (*destroy)(struct perf_event *); | 681 | void (*destroy)(struct perf_event *); |
| @@ -737,7 +747,7 @@ struct perf_event_context { | |||
| 737 | int nr_stat; | 747 | int nr_stat; |
| 738 | int nr_freq; | 748 | int nr_freq; |
| 739 | int rotate_disable; | 749 | int rotate_disable; |
| 740 | atomic_t refcount; | 750 | refcount_t refcount; |
| 741 | struct task_struct *task; | 751 | struct task_struct *task; |
| 742 | 752 | ||
| 743 | /* | 753 | /* |
| @@ -978,9 +988,9 @@ extern void perf_event_output_forward(struct perf_event *event, | |||
| 978 | extern void perf_event_output_backward(struct perf_event *event, | 988 | extern void perf_event_output_backward(struct perf_event *event, |
| 979 | struct perf_sample_data *data, | 989 | struct perf_sample_data *data, |
| 980 | struct pt_regs *regs); | 990 | struct pt_regs *regs); |
| 981 | extern void perf_event_output(struct perf_event *event, | 991 | extern int perf_event_output(struct perf_event *event, |
| 982 | struct perf_sample_data *data, | 992 | struct perf_sample_data *data, |
| 983 | struct pt_regs *regs); | 993 | struct pt_regs *regs); |
| 984 | 994 | ||
| 985 | static inline bool | 995 | static inline bool |
| 986 | is_default_overflow_handler(struct perf_event *event) | 996 | is_default_overflow_handler(struct perf_event *event) |
| @@ -1004,6 +1014,15 @@ perf_event__output_id_sample(struct perf_event *event, | |||
| 1004 | extern void | 1014 | extern void |
| 1005 | perf_log_lost_samples(struct perf_event *event, u64 lost); | 1015 | perf_log_lost_samples(struct perf_event *event, u64 lost); |
| 1006 | 1016 | ||
| 1017 | static inline bool event_has_any_exclude_flag(struct perf_event *event) | ||
| 1018 | { | ||
| 1019 | struct perf_event_attr *attr = &event->attr; | ||
| 1020 | |||
| 1021 | return attr->exclude_idle || attr->exclude_user || | ||
| 1022 | attr->exclude_kernel || attr->exclude_hv || | ||
| 1023 | attr->exclude_guest || attr->exclude_host; | ||
| 1024 | } | ||
| 1025 | |||
| 1007 | static inline bool is_sampling_event(struct perf_event *event) | 1026 | static inline bool is_sampling_event(struct perf_event *event) |
| 1008 | { | 1027 | { |
| 1009 | return event->attr.sample_period != 0; | 1028 | return event->attr.sample_period != 0; |
| @@ -1113,6 +1132,13 @@ static inline void perf_event_task_sched_out(struct task_struct *prev, | |||
| 1113 | } | 1132 | } |
| 1114 | 1133 | ||
| 1115 | extern void perf_event_mmap(struct vm_area_struct *vma); | 1134 | extern void perf_event_mmap(struct vm_area_struct *vma); |
| 1135 | |||
| 1136 | extern void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, | ||
| 1137 | bool unregister, const char *sym); | ||
| 1138 | extern void perf_event_bpf_event(struct bpf_prog *prog, | ||
| 1139 | enum perf_bpf_event_type type, | ||
| 1140 | u16 flags); | ||
| 1141 | |||
| 1116 | extern struct perf_guest_info_callbacks *perf_guest_cbs; | 1142 | extern struct perf_guest_info_callbacks *perf_guest_cbs; |
| 1117 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 1143 | extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| 1118 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); | 1144 | extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks); |
| @@ -1333,6 +1359,13 @@ static inline int perf_unregister_guest_info_callbacks | |||
| 1333 | (struct perf_guest_info_callbacks *callbacks) { return 0; } | 1359 | (struct perf_guest_info_callbacks *callbacks) { return 0; } |
| 1334 | 1360 | ||
| 1335 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } | 1361 | static inline void perf_event_mmap(struct vm_area_struct *vma) { } |
| 1362 | |||
| 1363 | typedef int (perf_ksymbol_get_name_f)(char *name, int name_len, void *data); | ||
| 1364 | static inline void perf_event_ksymbol(u16 ksym_type, u64 addr, u32 len, | ||
| 1365 | bool unregister, const char *sym) { } | ||
| 1366 | static inline void perf_event_bpf_event(struct bpf_prog *prog, | ||
| 1367 | enum perf_bpf_event_type type, | ||
| 1368 | u16 flags) { } | ||
| 1336 | static inline void perf_event_exec(void) { } | 1369 | static inline void perf_event_exec(void) { } |
| 1337 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } | 1370 | static inline void perf_event_comm(struct task_struct *tsk, bool exec) { } |
| 1338 | static inline void perf_event_namespaces(struct task_struct *tsk) { } | 1371 | static inline void perf_event_namespaces(struct task_struct *tsk) { } |
diff --git a/include/linux/phy.h b/include/linux/phy.h index 3b051f761450..34084892a466 100644 --- a/include/linux/phy.h +++ b/include/linux/phy.h | |||
| @@ -48,6 +48,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_features) __ro_after_init; | |||
| 48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; | 48 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_fibre_features) __ro_after_init; |
| 49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; | 49 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_gbit_all_ports_features) __ro_after_init; |
| 50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; | 50 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_features) __ro_after_init; |
| 51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_fec_features) __ro_after_init; | ||
| 51 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; | 52 | extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_init; |
| 52 | 53 | ||
| 53 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) | 54 | #define PHY_BASIC_FEATURES ((unsigned long *)&phy_basic_features) |
| @@ -56,6 +57,7 @@ extern __ETHTOOL_DECLARE_LINK_MODE_MASK(phy_10gbit_full_features) __ro_after_ini | |||
| 56 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) | 57 | #define PHY_GBIT_FIBRE_FEATURES ((unsigned long *)&phy_gbit_fibre_features) |
| 57 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) | 58 | #define PHY_GBIT_ALL_PORTS_FEATURES ((unsigned long *)&phy_gbit_all_ports_features) |
| 58 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) | 59 | #define PHY_10GBIT_FEATURES ((unsigned long *)&phy_10gbit_features) |
| 60 | #define PHY_10GBIT_FEC_FEATURES ((unsigned long *)&phy_10gbit_fec_features) | ||
| 59 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) | 61 | #define PHY_10GBIT_FULL_FEATURES ((unsigned long *)&phy_10gbit_full_features) |
| 60 | 62 | ||
| 61 | extern const int phy_10_100_features_array[4]; | 63 | extern const int phy_10_100_features_array[4]; |
| @@ -304,11 +306,6 @@ struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr); | |||
| 304 | * - irq or timer will set NOLINK if link goes down | 306 | * - irq or timer will set NOLINK if link goes down |
| 305 | * - phy_stop moves to HALTED | 307 | * - phy_stop moves to HALTED |
| 306 | * | 308 | * |
| 307 | * CHANGELINK: PHY experienced a change in link state | ||
| 308 | * - timer moves to RUNNING if link | ||
| 309 | * - timer moves to NOLINK if the link is down | ||
| 310 | * - phy_stop moves to HALTED | ||
| 311 | * | ||
| 312 | * HALTED: PHY is up, but no polling or interrupts are done. Or | 309 | * HALTED: PHY is up, but no polling or interrupts are done. Or |
| 313 | * PHY is in an error state. | 310 | * PHY is in an error state. |
| 314 | * | 311 | * |
| @@ -327,7 +324,6 @@ enum phy_state { | |||
| 327 | PHY_RUNNING, | 324 | PHY_RUNNING, |
| 328 | PHY_NOLINK, | 325 | PHY_NOLINK, |
| 329 | PHY_FORCING, | 326 | PHY_FORCING, |
| 330 | PHY_CHANGELINK, | ||
| 331 | PHY_RESUMING | 327 | PHY_RESUMING |
| 332 | }; | 328 | }; |
| 333 | 329 | ||
| @@ -467,8 +463,8 @@ struct phy_device { | |||
| 467 | * only works for PHYs with IDs which match this field | 463 | * only works for PHYs with IDs which match this field |
| 468 | * name: The friendly name of this PHY type | 464 | * name: The friendly name of this PHY type |
| 469 | * phy_id_mask: Defines the important bits of the phy_id | 465 | * phy_id_mask: Defines the important bits of the phy_id |
| 470 | * features: A list of features (speed, duplex, etc) supported | 466 | * features: A mandatory list of features (speed, duplex, etc) |
| 471 | * by this PHY | 467 | * supported by this PHY |
| 472 | * flags: A bitfield defining certain other features this PHY | 468 | * flags: A bitfield defining certain other features this PHY |
| 473 | * supports (like interrupts) | 469 | * supports (like interrupts) |
| 474 | * | 470 | * |
| @@ -506,6 +502,12 @@ struct phy_driver { | |||
| 506 | */ | 502 | */ |
| 507 | int (*probe)(struct phy_device *phydev); | 503 | int (*probe)(struct phy_device *phydev); |
| 508 | 504 | ||
| 505 | /* | ||
| 506 | * Probe the hardware to determine what abilities it has. | ||
| 507 | * Should only set phydev->supported. | ||
| 508 | */ | ||
| 509 | int (*get_features)(struct phy_device *phydev); | ||
| 510 | |||
| 509 | /* PHY Power Management */ | 511 | /* PHY Power Management */ |
| 510 | int (*suspend)(struct phy_device *phydev); | 512 | int (*suspend)(struct phy_device *phydev); |
| 511 | int (*resume)(struct phy_device *phydev); | 513 | int (*resume)(struct phy_device *phydev); |
| @@ -671,13 +673,8 @@ phy_lookup_setting(int speed, int duplex, const unsigned long *mask, | |||
| 671 | bool exact); | 673 | bool exact); |
| 672 | size_t phy_speeds(unsigned int *speeds, size_t size, | 674 | size_t phy_speeds(unsigned int *speeds, size_t size, |
| 673 | unsigned long *mask); | 675 | unsigned long *mask); |
| 674 | 676 | void of_set_phy_supported(struct phy_device *phydev); | |
| 675 | static inline bool __phy_is_started(struct phy_device *phydev) | 677 | void of_set_phy_eee_broken(struct phy_device *phydev); |
| 676 | { | ||
| 677 | WARN_ON(!mutex_is_locked(&phydev->lock)); | ||
| 678 | |||
| 679 | return phydev->state >= PHY_UP; | ||
| 680 | } | ||
| 681 | 678 | ||
| 682 | /** | 679 | /** |
| 683 | * phy_is_started - Convenience function to check whether PHY is started | 680 | * phy_is_started - Convenience function to check whether PHY is started |
| @@ -685,29 +682,12 @@ static inline bool __phy_is_started(struct phy_device *phydev) | |||
| 685 | */ | 682 | */ |
| 686 | static inline bool phy_is_started(struct phy_device *phydev) | 683 | static inline bool phy_is_started(struct phy_device *phydev) |
| 687 | { | 684 | { |
| 688 | bool started; | 685 | return phydev->state >= PHY_UP; |
| 689 | |||
| 690 | mutex_lock(&phydev->lock); | ||
| 691 | started = __phy_is_started(phydev); | ||
| 692 | mutex_unlock(&phydev->lock); | ||
| 693 | |||
| 694 | return started; | ||
| 695 | } | 686 | } |
| 696 | 687 | ||
| 697 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); | 688 | void phy_resolve_aneg_linkmode(struct phy_device *phydev); |
| 698 | 689 | ||
| 699 | /** | 690 | /** |
| 700 | * phy_read_mmd - Convenience function for reading a register | ||
| 701 | * from an MMD on a given PHY. | ||
| 702 | * @phydev: The phy_device struct | ||
| 703 | * @devad: The MMD to read from | ||
| 704 | * @regnum: The register on the MMD to read | ||
| 705 | * | ||
| 706 | * Same rules as for phy_read(); | ||
| 707 | */ | ||
| 708 | int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); | ||
| 709 | |||
| 710 | /** | ||
| 711 | * phy_read - Convenience function for reading a given PHY register | 691 | * phy_read - Convenience function for reading a given PHY register |
| 712 | * @phydev: the phy_device struct | 692 | * @phydev: the phy_device struct |
| 713 | * @regnum: register number to read | 693 | * @regnum: register number to read |
| @@ -762,9 +742,68 @@ static inline int __phy_write(struct phy_device *phydev, u32 regnum, u16 val) | |||
| 762 | val); | 742 | val); |
| 763 | } | 743 | } |
| 764 | 744 | ||
| 745 | /** | ||
| 746 | * phy_read_mmd - Convenience function for reading a register | ||
| 747 | * from an MMD on a given PHY. | ||
| 748 | * @phydev: The phy_device struct | ||
| 749 | * @devad: The MMD to read from | ||
| 750 | * @regnum: The register on the MMD to read | ||
| 751 | * | ||
| 752 | * Same rules as for phy_read(); | ||
| 753 | */ | ||
| 754 | int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); | ||
| 755 | |||
| 756 | /** | ||
| 757 | * __phy_read_mmd - Convenience function for reading a register | ||
| 758 | * from an MMD on a given PHY. | ||
| 759 | * @phydev: The phy_device struct | ||
| 760 | * @devad: The MMD to read from | ||
| 761 | * @regnum: The register on the MMD to read | ||
| 762 | * | ||
| 763 | * Same rules as for __phy_read(); | ||
| 764 | */ | ||
| 765 | int __phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum); | ||
| 766 | |||
| 767 | /** | ||
| 768 | * phy_write_mmd - Convenience function for writing a register | ||
| 769 | * on an MMD on a given PHY. | ||
| 770 | * @phydev: The phy_device struct | ||
| 771 | * @devad: The MMD to write to | ||
| 772 | * @regnum: The register on the MMD to read | ||
| 773 | * @val: value to write to @regnum | ||
| 774 | * | ||
| 775 | * Same rules as for phy_write(); | ||
| 776 | */ | ||
| 777 | int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); | ||
| 778 | |||
| 779 | /** | ||
| 780 | * __phy_write_mmd - Convenience function for writing a register | ||
| 781 | * on an MMD on a given PHY. | ||
| 782 | * @phydev: The phy_device struct | ||
| 783 | * @devad: The MMD to write to | ||
| 784 | * @regnum: The register on the MMD to read | ||
| 785 | * @val: value to write to @regnum | ||
| 786 | * | ||
| 787 | * Same rules as for __phy_write(); | ||
| 788 | */ | ||
| 789 | int __phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); | ||
| 790 | |||
| 791 | int __phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, | ||
| 792 | u16 set); | ||
| 793 | int phy_modify_changed(struct phy_device *phydev, u32 regnum, u16 mask, | ||
| 794 | u16 set); | ||
| 765 | int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); | 795 | int __phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); |
| 766 | int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); | 796 | int phy_modify(struct phy_device *phydev, u32 regnum, u16 mask, u16 set); |
| 767 | 797 | ||
| 798 | int __phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, | ||
| 799 | u16 mask, u16 set); | ||
| 800 | int phy_modify_mmd_changed(struct phy_device *phydev, int devad, u32 regnum, | ||
| 801 | u16 mask, u16 set); | ||
| 802 | int __phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, | ||
| 803 | u16 mask, u16 set); | ||
| 804 | int phy_modify_mmd(struct phy_device *phydev, int devad, u32 regnum, | ||
| 805 | u16 mask, u16 set); | ||
| 806 | |||
| 768 | /** | 807 | /** |
| 769 | * __phy_set_bits - Convenience function for setting bits in a PHY register | 808 | * __phy_set_bits - Convenience function for setting bits in a PHY register |
| 770 | * @phydev: the phy_device struct | 809 | * @phydev: the phy_device struct |
| @@ -815,6 +854,66 @@ static inline int phy_clear_bits(struct phy_device *phydev, u32 regnum, u16 val) | |||
| 815 | } | 854 | } |
| 816 | 855 | ||
| 817 | /** | 856 | /** |
| 857 | * __phy_set_bits_mmd - Convenience function for setting bits in a register | ||
| 858 | * on MMD | ||
| 859 | * @phydev: the phy_device struct | ||
| 860 | * @devad: the MMD containing register to modify | ||
| 861 | * @regnum: register number to modify | ||
| 862 | * @val: bits to set | ||
| 863 | * | ||
| 864 | * The caller must have taken the MDIO bus lock. | ||
| 865 | */ | ||
| 866 | static inline int __phy_set_bits_mmd(struct phy_device *phydev, int devad, | ||
| 867 | u32 regnum, u16 val) | ||
| 868 | { | ||
| 869 | return __phy_modify_mmd(phydev, devad, regnum, 0, val); | ||
| 870 | } | ||
| 871 | |||
| 872 | /** | ||
| 873 | * __phy_clear_bits_mmd - Convenience function for clearing bits in a register | ||
| 874 | * on MMD | ||
| 875 | * @phydev: the phy_device struct | ||
| 876 | * @devad: the MMD containing register to modify | ||
| 877 | * @regnum: register number to modify | ||
| 878 | * @val: bits to clear | ||
| 879 | * | ||
| 880 | * The caller must have taken the MDIO bus lock. | ||
| 881 | */ | ||
| 882 | static inline int __phy_clear_bits_mmd(struct phy_device *phydev, int devad, | ||
| 883 | u32 regnum, u16 val) | ||
| 884 | { | ||
| 885 | return __phy_modify_mmd(phydev, devad, regnum, val, 0); | ||
| 886 | } | ||
| 887 | |||
| 888 | /** | ||
| 889 | * phy_set_bits_mmd - Convenience function for setting bits in a register | ||
| 890 | * on MMD | ||
| 891 | * @phydev: the phy_device struct | ||
| 892 | * @devad: the MMD containing register to modify | ||
| 893 | * @regnum: register number to modify | ||
| 894 | * @val: bits to set | ||
| 895 | */ | ||
| 896 | static inline int phy_set_bits_mmd(struct phy_device *phydev, int devad, | ||
| 897 | u32 regnum, u16 val) | ||
| 898 | { | ||
| 899 | return phy_modify_mmd(phydev, devad, regnum, 0, val); | ||
| 900 | } | ||
| 901 | |||
| 902 | /** | ||
| 903 | * phy_clear_bits_mmd - Convenience function for clearing bits in a register | ||
| 904 | * on MMD | ||
| 905 | * @phydev: the phy_device struct | ||
| 906 | * @devad: the MMD containing register to modify | ||
| 907 | * @regnum: register number to modify | ||
| 908 | * @val: bits to clear | ||
| 909 | */ | ||
| 910 | static inline int phy_clear_bits_mmd(struct phy_device *phydev, int devad, | ||
| 911 | u32 regnum, u16 val) | ||
| 912 | { | ||
| 913 | return phy_modify_mmd(phydev, devad, regnum, val, 0); | ||
| 914 | } | ||
| 915 | |||
| 916 | /** | ||
| 818 | * phy_interrupt_is_valid - Convenience function for testing a given PHY irq | 917 | * phy_interrupt_is_valid - Convenience function for testing a given PHY irq |
| 819 | * @phydev: the phy_device struct | 918 | * @phydev: the phy_device struct |
| 820 | * | 919 | * |
| @@ -890,18 +989,6 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev) | |||
| 890 | return phydev->is_pseudo_fixed_link; | 989 | return phydev->is_pseudo_fixed_link; |
| 891 | } | 990 | } |
| 892 | 991 | ||
| 893 | /** | ||
| 894 | * phy_write_mmd - Convenience function for writing a register | ||
| 895 | * on an MMD on a given PHY. | ||
| 896 | * @phydev: The phy_device struct | ||
| 897 | * @devad: The MMD to read from | ||
| 898 | * @regnum: The register on the MMD to read | ||
| 899 | * @val: value to write to @regnum | ||
| 900 | * | ||
| 901 | * Same rules as for phy_write(); | ||
| 902 | */ | ||
| 903 | int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val); | ||
| 904 | |||
| 905 | int phy_save_page(struct phy_device *phydev); | 992 | int phy_save_page(struct phy_device *phydev); |
| 906 | int phy_select_page(struct phy_device *phydev, int page); | 993 | int phy_select_page(struct phy_device *phydev, int page); |
| 907 | int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); | 994 | int phy_restore_page(struct phy_device *phydev, int oldpage, int ret); |
| @@ -957,7 +1044,6 @@ int phy_aneg_done(struct phy_device *phydev); | |||
| 957 | int phy_speed_down(struct phy_device *phydev, bool sync); | 1044 | int phy_speed_down(struct phy_device *phydev, bool sync); |
| 958 | int phy_speed_up(struct phy_device *phydev); | 1045 | int phy_speed_up(struct phy_device *phydev); |
| 959 | 1046 | ||
| 960 | int phy_stop_interrupts(struct phy_device *phydev); | ||
| 961 | int phy_restart_aneg(struct phy_device *phydev); | 1047 | int phy_restart_aneg(struct phy_device *phydev); |
| 962 | int phy_reset_after_clk_enable(struct phy_device *phydev); | 1048 | int phy_reset_after_clk_enable(struct phy_device *phydev); |
| 963 | 1049 | ||
| @@ -991,6 +1077,7 @@ void phy_attached_info(struct phy_device *phydev); | |||
| 991 | int genphy_config_init(struct phy_device *phydev); | 1077 | int genphy_config_init(struct phy_device *phydev); |
| 992 | int genphy_setup_forced(struct phy_device *phydev); | 1078 | int genphy_setup_forced(struct phy_device *phydev); |
| 993 | int genphy_restart_aneg(struct phy_device *phydev); | 1079 | int genphy_restart_aneg(struct phy_device *phydev); |
| 1080 | int genphy_config_eee_advert(struct phy_device *phydev); | ||
| 994 | int genphy_config_aneg(struct phy_device *phydev); | 1081 | int genphy_config_aneg(struct phy_device *phydev); |
| 995 | int genphy_aneg_done(struct phy_device *phydev); | 1082 | int genphy_aneg_done(struct phy_device *phydev); |
| 996 | int genphy_update_link(struct phy_device *phydev); | 1083 | int genphy_update_link(struct phy_device *phydev); |
| @@ -1003,6 +1090,14 @@ static inline int genphy_no_soft_reset(struct phy_device *phydev) | |||
| 1003 | { | 1090 | { |
| 1004 | return 0; | 1091 | return 0; |
| 1005 | } | 1092 | } |
| 1093 | static inline int genphy_no_ack_interrupt(struct phy_device *phydev) | ||
| 1094 | { | ||
| 1095 | return 0; | ||
| 1096 | } | ||
| 1097 | static inline int genphy_no_config_intr(struct phy_device *phydev) | ||
| 1098 | { | ||
| 1099 | return 0; | ||
| 1100 | } | ||
| 1006 | int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, | 1101 | int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad, |
| 1007 | u16 regnum); | 1102 | u16 regnum); |
| 1008 | int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, | 1103 | int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, |
| @@ -1010,21 +1105,20 @@ int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum, | |||
| 1010 | 1105 | ||
| 1011 | /* Clause 45 PHY */ | 1106 | /* Clause 45 PHY */ |
| 1012 | int genphy_c45_restart_aneg(struct phy_device *phydev); | 1107 | int genphy_c45_restart_aneg(struct phy_device *phydev); |
| 1108 | int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart); | ||
| 1013 | int genphy_c45_aneg_done(struct phy_device *phydev); | 1109 | int genphy_c45_aneg_done(struct phy_device *phydev); |
| 1014 | int genphy_c45_read_link(struct phy_device *phydev, u32 mmd_mask); | 1110 | int genphy_c45_read_link(struct phy_device *phydev); |
| 1015 | int genphy_c45_read_lpa(struct phy_device *phydev); | 1111 | int genphy_c45_read_lpa(struct phy_device *phydev); |
| 1016 | int genphy_c45_read_pma(struct phy_device *phydev); | 1112 | int genphy_c45_read_pma(struct phy_device *phydev); |
| 1017 | int genphy_c45_pma_setup_forced(struct phy_device *phydev); | 1113 | int genphy_c45_pma_setup_forced(struct phy_device *phydev); |
| 1114 | int genphy_c45_an_config_aneg(struct phy_device *phydev); | ||
| 1018 | int genphy_c45_an_disable_aneg(struct phy_device *phydev); | 1115 | int genphy_c45_an_disable_aneg(struct phy_device *phydev); |
| 1019 | int genphy_c45_read_mdix(struct phy_device *phydev); | 1116 | int genphy_c45_read_mdix(struct phy_device *phydev); |
| 1117 | int genphy_c45_pma_read_abilities(struct phy_device *phydev); | ||
| 1118 | int genphy_c45_read_status(struct phy_device *phydev); | ||
| 1020 | 1119 | ||
| 1021 | /* The gen10g_* functions are the old Clause 45 stub */ | 1120 | /* The gen10g_* functions are the old Clause 45 stub */ |
| 1022 | int gen10g_config_aneg(struct phy_device *phydev); | 1121 | int gen10g_config_aneg(struct phy_device *phydev); |
| 1023 | int gen10g_read_status(struct phy_device *phydev); | ||
| 1024 | int gen10g_no_soft_reset(struct phy_device *phydev); | ||
| 1025 | int gen10g_config_init(struct phy_device *phydev); | ||
| 1026 | int gen10g_suspend(struct phy_device *phydev); | ||
| 1027 | int gen10g_resume(struct phy_device *phydev); | ||
| 1028 | 1122 | ||
| 1029 | static inline int phy_read_status(struct phy_device *phydev) | 1123 | static inline int phy_read_status(struct phy_device *phydev) |
| 1030 | { | 1124 | { |
| @@ -1052,7 +1146,7 @@ void phy_ethtool_ksettings_get(struct phy_device *phydev, | |||
| 1052 | int phy_ethtool_ksettings_set(struct phy_device *phydev, | 1146 | int phy_ethtool_ksettings_set(struct phy_device *phydev, |
| 1053 | const struct ethtool_link_ksettings *cmd); | 1147 | const struct ethtool_link_ksettings *cmd); |
| 1054 | int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); | 1148 | int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd); |
| 1055 | int phy_start_interrupts(struct phy_device *phydev); | 1149 | void phy_request_interrupt(struct phy_device *phydev); |
| 1056 | void phy_print_status(struct phy_device *phydev); | 1150 | void phy_print_status(struct phy_device *phydev); |
| 1057 | int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); | 1151 | int phy_set_max_speed(struct phy_device *phydev, u32 max_speed); |
| 1058 | void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); | 1152 | void phy_remove_link_mode(struct phy_device *phydev, u32 link_mode); |
| @@ -1183,4 +1277,7 @@ module_exit(phy_module_exit) | |||
| 1183 | #define module_phy_driver(__phy_drivers) \ | 1277 | #define module_phy_driver(__phy_drivers) \ |
| 1184 | phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) | 1278 | phy_module_driver(__phy_drivers, ARRAY_SIZE(__phy_drivers)) |
| 1185 | 1279 | ||
| 1280 | bool phy_driver_is_genphy(struct phy_device *phydev); | ||
| 1281 | bool phy_driver_is_genphy_10g(struct phy_device *phydev); | ||
| 1282 | |||
| 1186 | #endif /* __PHY_H */ | 1283 | #endif /* __PHY_H */ |
diff --git a/include/linux/phy/phy-mipi-dphy.h b/include/linux/phy/phy-mipi-dphy.h index c08aacc0ac35..a877ffee845d 100644 --- a/include/linux/phy/phy-mipi-dphy.h +++ b/include/linux/phy/phy-mipi-dphy.h | |||
| @@ -6,8 +6,6 @@ | |||
| 6 | #ifndef __PHY_MIPI_DPHY_H_ | 6 | #ifndef __PHY_MIPI_DPHY_H_ |
| 7 | #define __PHY_MIPI_DPHY_H_ | 7 | #define __PHY_MIPI_DPHY_H_ |
| 8 | 8 | ||
| 9 | #include <video/videomode.h> | ||
| 10 | |||
| 11 | /** | 9 | /** |
| 12 | * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set | 10 | * struct phy_configure_opts_mipi_dphy - MIPI D-PHY configuration set |
| 13 | * | 11 | * |
| @@ -192,10 +190,10 @@ struct phy_configure_opts_mipi_dphy { | |||
| 192 | /** | 190 | /** |
| 193 | * @init: | 191 | * @init: |
| 194 | * | 192 | * |
| 195 | * Time, in picoseconds for the initialization period to | 193 | * Time, in microseconds for the initialization period to |
| 196 | * complete. | 194 | * complete. |
| 197 | * | 195 | * |
| 198 | * Minimum value: 100000000 ps | 196 | * Minimum value: 100 us |
| 199 | */ | 197 | */ |
| 200 | unsigned int init; | 198 | unsigned int init; |
| 201 | 199 | ||
| @@ -246,11 +244,11 @@ struct phy_configure_opts_mipi_dphy { | |||
| 246 | /** | 244 | /** |
| 247 | * @wakeup: | 245 | * @wakeup: |
| 248 | * | 246 | * |
| 249 | * Time, in picoseconds, that a transmitter drives a Mark-1 | 247 | * Time, in microseconds, that a transmitter drives a Mark-1 |
| 250 | * state prior to a Stop state in order to initiate an exit | 248 | * state prior to a Stop state in order to initiate an exit |
| 251 | * from ULPS. | 249 | * from ULPS. |
| 252 | * | 250 | * |
| 253 | * Minimum value: 1000000000 ps | 251 | * Minimum value: 1000 us |
| 254 | */ | 252 | */ |
| 255 | unsigned int wakeup; | 253 | unsigned int wakeup; |
| 256 | 254 | ||
| @@ -271,7 +269,8 @@ struct phy_configure_opts_mipi_dphy { | |||
| 271 | /** | 269 | /** |
| 272 | * @lanes: | 270 | * @lanes: |
| 273 | * | 271 | * |
| 274 | * Number of active data lanes used for the transmissions. | 272 | * Number of active, consecutive, data lanes, starting from |
| 273 | * lane 0, used for the transmissions. | ||
| 275 | */ | 274 | */ |
| 276 | unsigned char lanes; | 275 | unsigned char lanes; |
| 277 | }; | 276 | }; |
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index e8e118d70fd7..3f350e2749fe 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h | |||
| @@ -42,6 +42,7 @@ enum phy_mode { | |||
| 42 | PHY_MODE_PCIE, | 42 | PHY_MODE_PCIE, |
| 43 | PHY_MODE_ETHERNET, | 43 | PHY_MODE_ETHERNET, |
| 44 | PHY_MODE_MIPI_DPHY, | 44 | PHY_MODE_MIPI_DPHY, |
| 45 | PHY_MODE_SATA | ||
| 45 | }; | 46 | }; |
| 46 | 47 | ||
| 47 | /** | 48 | /** |
diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h index 9525567b1951..1e5d86ebdaeb 100644 --- a/include/linux/phy_fixed.h +++ b/include/linux/phy_fixed.h | |||
| @@ -15,30 +15,41 @@ struct device_node; | |||
| 15 | #if IS_ENABLED(CONFIG_FIXED_PHY) | 15 | #if IS_ENABLED(CONFIG_FIXED_PHY) |
| 16 | extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); | 16 | extern int fixed_phy_change_carrier(struct net_device *dev, bool new_carrier); |
| 17 | extern int fixed_phy_add(unsigned int irq, int phy_id, | 17 | extern int fixed_phy_add(unsigned int irq, int phy_id, |
| 18 | struct fixed_phy_status *status, | 18 | struct fixed_phy_status *status); |
| 19 | int link_gpio); | ||
| 20 | extern struct phy_device *fixed_phy_register(unsigned int irq, | 19 | extern struct phy_device *fixed_phy_register(unsigned int irq, |
| 21 | struct fixed_phy_status *status, | 20 | struct fixed_phy_status *status, |
| 22 | int link_gpio, | ||
| 23 | struct device_node *np); | 21 | struct device_node *np); |
| 22 | |||
| 23 | extern struct phy_device * | ||
| 24 | fixed_phy_register_with_gpiod(unsigned int irq, | ||
| 25 | struct fixed_phy_status *status, | ||
| 26 | struct gpio_desc *gpiod); | ||
| 27 | |||
| 24 | extern void fixed_phy_unregister(struct phy_device *phydev); | 28 | extern void fixed_phy_unregister(struct phy_device *phydev); |
| 25 | extern int fixed_phy_set_link_update(struct phy_device *phydev, | 29 | extern int fixed_phy_set_link_update(struct phy_device *phydev, |
| 26 | int (*link_update)(struct net_device *, | 30 | int (*link_update)(struct net_device *, |
| 27 | struct fixed_phy_status *)); | 31 | struct fixed_phy_status *)); |
| 28 | #else | 32 | #else |
| 29 | static inline int fixed_phy_add(unsigned int irq, int phy_id, | 33 | static inline int fixed_phy_add(unsigned int irq, int phy_id, |
| 30 | struct fixed_phy_status *status, | 34 | struct fixed_phy_status *status) |
| 31 | int link_gpio) | ||
| 32 | { | 35 | { |
| 33 | return -ENODEV; | 36 | return -ENODEV; |
| 34 | } | 37 | } |
| 35 | static inline struct phy_device *fixed_phy_register(unsigned int irq, | 38 | static inline struct phy_device *fixed_phy_register(unsigned int irq, |
| 36 | struct fixed_phy_status *status, | 39 | struct fixed_phy_status *status, |
| 37 | int gpio_link, | ||
| 38 | struct device_node *np) | 40 | struct device_node *np) |
| 39 | { | 41 | { |
| 40 | return ERR_PTR(-ENODEV); | 42 | return ERR_PTR(-ENODEV); |
| 41 | } | 43 | } |
| 44 | |||
| 45 | static inline struct phy_device * | ||
| 46 | fixed_phy_register_with_gpiod(unsigned int irq, | ||
| 47 | struct fixed_phy_status *status, | ||
| 48 | struct gpio_desc *gpiod) | ||
| 49 | { | ||
| 50 | return ERR_PTR(-ENODEV); | ||
| 51 | } | ||
| 52 | |||
| 42 | static inline void fixed_phy_unregister(struct phy_device *phydev) | 53 | static inline void fixed_phy_unregister(struct phy_device *phydev) |
| 43 | { | 54 | { |
| 44 | } | 55 | } |
diff --git a/include/linux/phylink.h b/include/linux/phylink.h index 021fc6595856..6411c624f63a 100644 --- a/include/linux/phylink.h +++ b/include/linux/phylink.h | |||
| @@ -149,6 +149,13 @@ int mac_link_state(struct net_device *ndev, | |||
| 149 | * configuration word. Nothing is advertised by the MAC. The MAC is | 149 | * configuration word. Nothing is advertised by the MAC. The MAC is |
| 150 | * responsible for reading the configuration word and configuring | 150 | * responsible for reading the configuration word and configuring |
| 151 | * itself accordingly. | 151 | * itself accordingly. |
| 152 | * | ||
| 153 | * Implementations are expected to update the MAC to reflect the | ||
| 154 | * requested settings - i.o.w., if nothing has changed between two | ||
| 155 | * calls, no action is expected. If only flow control settings have | ||
| 156 | * changed, flow control should be updated *without* taking the link | ||
| 157 | * down. This "update" behaviour is critical to avoid bouncing the | ||
| 158 | * link up status. | ||
| 152 | */ | 159 | */ |
| 153 | void mac_config(struct net_device *ndev, unsigned int mode, | 160 | void mac_config(struct net_device *ndev, unsigned int mode, |
| 154 | const struct phylink_link_state *state); | 161 | const struct phylink_link_state *state); |
| @@ -220,6 +227,7 @@ void phylink_ethtool_get_pauseparam(struct phylink *, | |||
| 220 | int phylink_ethtool_set_pauseparam(struct phylink *, | 227 | int phylink_ethtool_set_pauseparam(struct phylink *, |
| 221 | struct ethtool_pauseparam *); | 228 | struct ethtool_pauseparam *); |
| 222 | int phylink_get_eee_err(struct phylink *); | 229 | int phylink_get_eee_err(struct phylink *); |
| 230 | int phylink_init_eee(struct phylink *, bool); | ||
| 223 | int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); | 231 | int phylink_ethtool_get_eee(struct phylink *, struct ethtool_eee *); |
| 224 | int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); | 232 | int phylink_ethtool_set_eee(struct phylink *, struct ethtool_eee *); |
| 225 | int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); | 233 | int phylink_mii_ioctl(struct phylink *, struct ifreq *, int); |
diff --git a/include/linux/pid.h b/include/linux/pid.h index 14a9a39da9c7..b6f4ba16065a 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
| @@ -109,7 +109,6 @@ extern struct pid *find_vpid(int nr); | |||
| 109 | */ | 109 | */ |
| 110 | extern struct pid *find_get_pid(int nr); | 110 | extern struct pid *find_get_pid(int nr); |
| 111 | extern struct pid *find_ge_pid(int nr, struct pid_namespace *); | 111 | extern struct pid *find_ge_pid(int nr, struct pid_namespace *); |
| 112 | int next_pidmap(struct pid_namespace *pid_ns, unsigned int last); | ||
| 113 | 112 | ||
| 114 | extern struct pid *alloc_pid(struct pid_namespace *ns); | 113 | extern struct pid *alloc_pid(struct pid_namespace *ns); |
| 115 | extern void free_pid(struct pid *pid); | 114 | extern void free_pid(struct pid *pid); |
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h index 8dd85d302b90..93c9dd133e9d 100644 --- a/include/linux/pinctrl/pinconf.h +++ b/include/linux/pinctrl/pinconf.h | |||
| @@ -14,8 +14,6 @@ | |||
| 14 | 14 | ||
| 15 | #ifdef CONFIG_PINCONF | 15 | #ifdef CONFIG_PINCONF |
| 16 | 16 | ||
| 17 | #include <linux/pinctrl/machine.h> | ||
| 18 | |||
| 19 | struct pinctrl_dev; | 17 | struct pinctrl_dev; |
| 20 | struct seq_file; | 18 | struct seq_file; |
| 21 | 19 | ||
| @@ -31,7 +29,6 @@ struct seq_file; | |||
| 31 | * @pin_config_group_get: get configurations for an entire pin group; should | 29 | * @pin_config_group_get: get configurations for an entire pin group; should |
| 32 | * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. | 30 | * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get. |
| 33 | * @pin_config_group_set: configure all pins in a group | 31 | * @pin_config_group_set: configure all pins in a group |
| 34 | * @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration | ||
| 35 | * @pin_config_dbg_show: optional debugfs display hook that will provide | 32 | * @pin_config_dbg_show: optional debugfs display hook that will provide |
| 36 | * per-device info for a certain pin in debugfs | 33 | * per-device info for a certain pin in debugfs |
| 37 | * @pin_config_group_dbg_show: optional debugfs display hook that will provide | 34 | * @pin_config_group_dbg_show: optional debugfs display hook that will provide |
| @@ -57,9 +54,6 @@ struct pinconf_ops { | |||
| 57 | unsigned selector, | 54 | unsigned selector, |
| 58 | unsigned long *configs, | 55 | unsigned long *configs, |
| 59 | unsigned num_configs); | 56 | unsigned num_configs); |
| 60 | int (*pin_config_dbg_parse_modify) (struct pinctrl_dev *pctldev, | ||
| 61 | const char *arg, | ||
| 62 | unsigned long *config); | ||
| 63 | void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, | 57 | void (*pin_config_dbg_show) (struct pinctrl_dev *pctldev, |
| 64 | struct seq_file *s, | 58 | struct seq_file *s, |
| 65 | unsigned offset); | 59 | unsigned offset); |
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 5a3bb3b7c9ad..5c626fdc10db 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h | |||
| @@ -74,13 +74,6 @@ struct pipe_inode_info { | |||
| 74 | */ | 74 | */ |
| 75 | struct pipe_buf_operations { | 75 | struct pipe_buf_operations { |
| 76 | /* | 76 | /* |
| 77 | * This is set to 1, if the generic pipe read/write may coalesce | ||
| 78 | * data into an existing buffer. If this is set to 0, a new pipe | ||
| 79 | * page segment is always used for new data. | ||
| 80 | */ | ||
| 81 | int can_merge; | ||
| 82 | |||
| 83 | /* | ||
| 84 | * ->confirm() verifies that the data in the pipe buffer is there | 77 | * ->confirm() verifies that the data in the pipe buffer is there |
| 85 | * and that the contents are good. If the pages in the pipe belong | 78 | * and that the contents are good. If the pages in the pipe belong |
| 86 | * to a file system, we may need to wait for IO completion in this | 79 | * to a file system, we may need to wait for IO completion in this |
| @@ -108,18 +101,20 @@ struct pipe_buf_operations { | |||
| 108 | /* | 101 | /* |
| 109 | * Get a reference to the pipe buffer. | 102 | * Get a reference to the pipe buffer. |
| 110 | */ | 103 | */ |
| 111 | void (*get)(struct pipe_inode_info *, struct pipe_buffer *); | 104 | bool (*get)(struct pipe_inode_info *, struct pipe_buffer *); |
| 112 | }; | 105 | }; |
| 113 | 106 | ||
| 114 | /** | 107 | /** |
| 115 | * pipe_buf_get - get a reference to a pipe_buffer | 108 | * pipe_buf_get - get a reference to a pipe_buffer |
| 116 | * @pipe: the pipe that the buffer belongs to | 109 | * @pipe: the pipe that the buffer belongs to |
| 117 | * @buf: the buffer to get a reference to | 110 | * @buf: the buffer to get a reference to |
| 111 | * | ||
| 112 | * Return: %true if the reference was successfully obtained. | ||
| 118 | */ | 113 | */ |
| 119 | static inline void pipe_buf_get(struct pipe_inode_info *pipe, | 114 | static inline __must_check bool pipe_buf_get(struct pipe_inode_info *pipe, |
| 120 | struct pipe_buffer *buf) | 115 | struct pipe_buffer *buf) |
| 121 | { | 116 | { |
| 122 | buf->ops->get(pipe, buf); | 117 | return buf->ops->get(pipe, buf); |
| 123 | } | 118 | } |
| 124 | 119 | ||
| 125 | /** | 120 | /** |
| @@ -178,10 +173,12 @@ struct pipe_inode_info *alloc_pipe_info(void); | |||
| 178 | void free_pipe_info(struct pipe_inode_info *); | 173 | void free_pipe_info(struct pipe_inode_info *); |
| 179 | 174 | ||
| 180 | /* Generic pipe buffer ops functions */ | 175 | /* Generic pipe buffer ops functions */ |
| 181 | void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); | 176 | bool generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *); |
| 182 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); | 177 | int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *); |
| 183 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); | 178 | int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *); |
| 179 | int generic_pipe_buf_nosteal(struct pipe_inode_info *, struct pipe_buffer *); | ||
| 184 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); | 180 | void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *); |
| 181 | void pipe_buf_mark_unmergeable(struct pipe_buffer *buf); | ||
| 185 | 182 | ||
| 186 | extern const struct pipe_buf_operations nosteal_pipe_buf_ops; | 183 | extern const struct pipe_buf_operations nosteal_pipe_buf_ops; |
| 187 | 184 | ||
diff --git a/include/linux/platform_data/at24.h b/include/linux/platform_data/at24.h deleted file mode 100644 index 63507ff464ee..000000000000 --- a/include/linux/platform_data/at24.h +++ /dev/null | |||
| @@ -1,60 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * at24.h - platform_data for the at24 (generic eeprom) driver | ||
| 3 | * (C) Copyright 2008 by Pengutronix | ||
| 4 | * (C) Copyright 2012 by Wolfram Sang | ||
| 5 | * same license as the driver | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _LINUX_AT24_H | ||
| 9 | #define _LINUX_AT24_H | ||
| 10 | |||
| 11 | #include <linux/types.h> | ||
| 12 | #include <linux/nvmem-consumer.h> | ||
| 13 | #include <linux/bitops.h> | ||
| 14 | |||
| 15 | /** | ||
| 16 | * struct at24_platform_data - data to set up at24 (generic eeprom) driver | ||
| 17 | * @byte_len: size of eeprom in byte | ||
| 18 | * @page_size: number of byte which can be written in one go | ||
| 19 | * @flags: tunable options, check AT24_FLAG_* defines | ||
| 20 | * @setup: an optional callback invoked after eeprom is probed; enables kernel | ||
| 21 | code to access eeprom via nvmem, see example | ||
| 22 | * @context: optional parameter passed to setup() | ||
| 23 | * | ||
| 24 | * If you set up a custom eeprom type, please double-check the parameters. | ||
| 25 | * Especially page_size needs extra care, as you risk data loss if your value | ||
| 26 | * is bigger than what the chip actually supports! | ||
| 27 | * | ||
| 28 | * An example in pseudo code for a setup() callback: | ||
| 29 | * | ||
| 30 | * void get_mac_addr(struct nvmem_device *nvmem, void *context) | ||
| 31 | * { | ||
| 32 | * u8 *mac_addr = ethernet_pdata->mac_addr; | ||
| 33 | * off_t offset = context; | ||
| 34 | * | ||
| 35 | * // Read MAC addr from EEPROM | ||
| 36 | * if (nvmem_device_read(nvmem, offset, ETH_ALEN, mac_addr) == ETH_ALEN) | ||
| 37 | * pr_info("Read MAC addr from EEPROM: %pM\n", mac_addr); | ||
| 38 | * } | ||
| 39 | * | ||
| 40 | * This function pointer and context can now be set up in at24_platform_data. | ||
| 41 | */ | ||
| 42 | |||
| 43 | struct at24_platform_data { | ||
| 44 | u32 byte_len; /* size (sum of all addr) */ | ||
| 45 | u16 page_size; /* for writes */ | ||
| 46 | u8 flags; | ||
| 47 | #define AT24_FLAG_ADDR16 BIT(7) /* address pointer is 16 bit */ | ||
| 48 | #define AT24_FLAG_READONLY BIT(6) /* sysfs-entry will be read-only */ | ||
| 49 | #define AT24_FLAG_IRUGO BIT(5) /* sysfs-entry will be world-readable */ | ||
| 50 | #define AT24_FLAG_TAKE8ADDR BIT(4) /* take always 8 addresses (24c00) */ | ||
| 51 | #define AT24_FLAG_SERIAL BIT(3) /* factory-programmed serial number */ | ||
| 52 | #define AT24_FLAG_MAC BIT(2) /* factory-programmed mac address */ | ||
| 53 | #define AT24_FLAG_NO_RDROL BIT(1) /* does not auto-rollover reads to */ | ||
| 54 | /* the next slave address */ | ||
| 55 | |||
| 56 | void (*setup)(struct nvmem_device *nvmem, void *context); | ||
| 57 | void *context; | ||
| 58 | }; | ||
| 59 | |||
| 60 | #endif /* _LINUX_AT24_H */ | ||
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h index 8eaef2f2b691..c3b61ead41f2 100644 --- a/include/linux/platform_data/b53.h +++ b/include/linux/platform_data/b53.h | |||
| @@ -20,7 +20,7 @@ | |||
| 20 | #define __B53_H | 20 | #define __B53_H |
| 21 | 21 | ||
| 22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
| 23 | #include <net/dsa.h> | 23 | #include <linux/platform_data/dsa.h> |
| 24 | 24 | ||
| 25 | struct b53_platform_data { | 25 | struct b53_platform_data { |
| 26 | /* Must be first such that dsa_register_switch() can access it */ | 26 | /* Must be first such that dsa_register_switch() can access it */ |
diff --git a/include/linux/platform_data/davinci-cpufreq.h b/include/linux/platform_data/davinci-cpufreq.h new file mode 100644 index 000000000000..3fbf9f2793b5 --- /dev/null +++ b/include/linux/platform_data/davinci-cpufreq.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * TI DaVinci CPUFreq platform support. | ||
| 4 | * | ||
| 5 | * Copyright (C) 2009 Texas Instruments, Inc. http://www.ti.com/ | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef _MACH_DAVINCI_CPUFREQ_H | ||
| 9 | #define _MACH_DAVINCI_CPUFREQ_H | ||
| 10 | |||
| 11 | #include <linux/cpufreq.h> | ||
| 12 | |||
| 13 | struct davinci_cpufreq_config { | ||
| 14 | struct cpufreq_frequency_table *freq_table; | ||
| 15 | int (*set_voltage)(unsigned int index); | ||
| 16 | int (*init)(void); | ||
| 17 | }; | ||
| 18 | |||
| 19 | #endif /* _MACH_DAVINCI_CPUFREQ_H */ | ||
diff --git a/include/linux/platform_data/dma-dw.h b/include/linux/platform_data/dma-dw.h index 1a1d58ebffbf..f3eaf9ec00a1 100644 --- a/include/linux/platform_data/dma-dw.h +++ b/include/linux/platform_data/dma-dw.h | |||
| @@ -1,12 +1,9 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Driver for the Synopsys DesignWare DMA Controller | 3 | * Driver for the Synopsys DesignWare DMA Controller |
| 3 | * | 4 | * |
| 4 | * Copyright (C) 2007 Atmel Corporation | 5 | * Copyright (C) 2007 Atmel Corporation |
| 5 | * Copyright (C) 2010-2011 ST Microelectronics | 6 | * Copyright (C) 2010-2011 ST Microelectronics |
| 6 | * | ||
| 7 | * This program is free software; you can redistribute it and/or modify | ||
| 8 | * it under the terms of the GNU General Public License version 2 as | ||
| 9 | * published by the Free Software Foundation. | ||
| 10 | */ | 7 | */ |
| 11 | #ifndef _PLATFORM_DATA_DMA_DW_H | 8 | #ifndef _PLATFORM_DATA_DMA_DW_H |
| 12 | #define _PLATFORM_DATA_DMA_DW_H | 9 | #define _PLATFORM_DATA_DMA_DW_H |
| @@ -38,10 +35,6 @@ struct dw_dma_slave { | |||
| 38 | /** | 35 | /** |
| 39 | * struct dw_dma_platform_data - Controller configuration parameters | 36 | * struct dw_dma_platform_data - Controller configuration parameters |
| 40 | * @nr_channels: Number of channels supported by hardware (max 8) | 37 | * @nr_channels: Number of channels supported by hardware (max 8) |
| 41 | * @is_private: The device channels should be marked as private and not for | ||
| 42 | * by the general purpose DMA channel allocator. | ||
| 43 | * @is_memcpy: The device channels do support memory-to-memory transfers. | ||
| 44 | * @is_idma32: The type of the DMA controller is iDMA32 | ||
| 45 | * @chan_allocation_order: Allocate channels starting from 0 or 7 | 38 | * @chan_allocation_order: Allocate channels starting from 0 or 7 |
| 46 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. | 39 | * @chan_priority: Set channel priority increasing from 0 to 7 or 7 to 0. |
| 47 | * @block_size: Maximum block size supported by the controller | 40 | * @block_size: Maximum block size supported by the controller |
| @@ -53,9 +46,6 @@ struct dw_dma_slave { | |||
| 53 | */ | 46 | */ |
| 54 | struct dw_dma_platform_data { | 47 | struct dw_dma_platform_data { |
| 55 | unsigned int nr_channels; | 48 | unsigned int nr_channels; |
| 56 | bool is_private; | ||
| 57 | bool is_memcpy; | ||
| 58 | bool is_idma32; | ||
| 59 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ | 49 | #define CHAN_ALLOCATION_ASCENDING 0 /* zero to seven */ |
| 60 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ | 50 | #define CHAN_ALLOCATION_DESCENDING 1 /* seven to zero */ |
| 61 | unsigned char chan_allocation_order; | 51 | unsigned char chan_allocation_order; |
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h index 7d964e787299..9daea8d42a10 100644 --- a/include/linux/platform_data/dma-imx.h +++ b/include/linux/platform_data/dma-imx.h | |||
| @@ -55,6 +55,7 @@ struct imx_dma_data { | |||
| 55 | int dma_request2; /* secondary DMA request line */ | 55 | int dma_request2; /* secondary DMA request line */ |
| 56 | enum sdma_peripheral_type peripheral_type; | 56 | enum sdma_peripheral_type peripheral_type; |
| 57 | int priority; | 57 | int priority; |
| 58 | struct device_node *of_node; | ||
| 58 | }; | 59 | }; |
| 59 | 60 | ||
| 60 | static inline int imx_dma_is_ipu(struct dma_chan *chan) | 61 | static inline int imx_dma_is_ipu(struct dma_chan *chan) |
diff --git a/include/linux/platform_data/dsa.h b/include/linux/platform_data/dsa.h new file mode 100644 index 000000000000..d4d9bf2060a6 --- /dev/null +++ b/include/linux/platform_data/dsa.h | |||
| @@ -0,0 +1,68 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __DSA_PDATA_H | ||
| 3 | #define __DSA_PDATA_H | ||
| 4 | |||
| 5 | struct device; | ||
| 6 | struct net_device; | ||
| 7 | |||
| 8 | #define DSA_MAX_SWITCHES 4 | ||
| 9 | #define DSA_MAX_PORTS 12 | ||
| 10 | #define DSA_RTABLE_NONE -1 | ||
| 11 | |||
| 12 | struct dsa_chip_data { | ||
| 13 | /* | ||
| 14 | * How to access the switch configuration registers. | ||
| 15 | */ | ||
| 16 | struct device *host_dev; | ||
| 17 | int sw_addr; | ||
| 18 | |||
| 19 | /* | ||
| 20 | * Reference to network devices | ||
| 21 | */ | ||
| 22 | struct device *netdev[DSA_MAX_PORTS]; | ||
| 23 | |||
| 24 | /* set to size of eeprom if supported by the switch */ | ||
| 25 | int eeprom_len; | ||
| 26 | |||
| 27 | /* Device tree node pointer for this specific switch chip | ||
| 28 | * used during switch setup in case additional properties | ||
| 29 | * and resources needs to be used | ||
| 30 | */ | ||
| 31 | struct device_node *of_node; | ||
| 32 | |||
| 33 | /* | ||
| 34 | * The names of the switch's ports. Use "cpu" to | ||
| 35 | * designate the switch port that the cpu is connected to, | ||
| 36 | * "dsa" to indicate that this port is a DSA link to | ||
| 37 | * another switch, NULL to indicate the port is unused, | ||
| 38 | * or any other string to indicate this is a physical port. | ||
| 39 | */ | ||
| 40 | char *port_names[DSA_MAX_PORTS]; | ||
| 41 | struct device_node *port_dn[DSA_MAX_PORTS]; | ||
| 42 | |||
| 43 | /* | ||
| 44 | * An array of which element [a] indicates which port on this | ||
| 45 | * switch should be used to send packets to that are destined | ||
| 46 | * for switch a. Can be NULL if there is only one switch chip. | ||
| 47 | */ | ||
| 48 | s8 rtable[DSA_MAX_SWITCHES]; | ||
| 49 | }; | ||
| 50 | |||
| 51 | struct dsa_platform_data { | ||
| 52 | /* | ||
| 53 | * Reference to a Linux network interface that connects | ||
| 54 | * to the root switch chip of the tree. | ||
| 55 | */ | ||
| 56 | struct device *netdev; | ||
| 57 | struct net_device *of_netdev; | ||
| 58 | |||
| 59 | /* | ||
| 60 | * Info structs describing each of the switch chips | ||
| 61 | * connected via this network interface. | ||
| 62 | */ | ||
| 63 | int nr_chips; | ||
| 64 | struct dsa_chip_data *chip; | ||
| 65 | }; | ||
| 66 | |||
| 67 | |||
| 68 | #endif /* __DSA_PDATA_H */ | ||
diff --git a/include/linux/platform_data/gpio/gpio-amd-fch.h b/include/linux/platform_data/gpio/gpio-amd-fch.h new file mode 100644 index 000000000000..9e46678edb2a --- /dev/null +++ b/include/linux/platform_data/gpio/gpio-amd-fch.h | |||
| @@ -0,0 +1,46 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 2 | |||
| 3 | /* | ||
| 4 | * AMD FCH gpio driver platform-data | ||
| 5 | * | ||
| 6 | * Copyright (C) 2018 metux IT consult | ||
| 7 | * Author: Enrico Weigelt <info@metux.net> | ||
| 8 | * | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H | ||
| 12 | #define __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H | ||
| 13 | |||
| 14 | #define AMD_FCH_GPIO_DRIVER_NAME "gpio_amd_fch" | ||
| 15 | |||
| 16 | /* | ||
| 17 | * gpio register index definitions | ||
| 18 | */ | ||
| 19 | #define AMD_FCH_GPIO_REG_GPIO49 0x40 | ||
| 20 | #define AMD_FCH_GPIO_REG_GPIO50 0x41 | ||
| 21 | #define AMD_FCH_GPIO_REG_GPIO51 0x42 | ||
| 22 | #define AMD_FCH_GPIO_REG_GPIO59_DEVSLP0 0x43 | ||
| 23 | #define AMD_FCH_GPIO_REG_GPIO57 0x44 | ||
| 24 | #define AMD_FCH_GPIO_REG_GPIO58 0x45 | ||
| 25 | #define AMD_FCH_GPIO_REG_GPIO59_DEVSLP1 0x46 | ||
| 26 | #define AMD_FCH_GPIO_REG_GPIO64 0x47 | ||
| 27 | #define AMD_FCH_GPIO_REG_GPIO68 0x48 | ||
| 28 | #define AMD_FCH_GPIO_REG_GPIO66_SPKR 0x5B | ||
| 29 | #define AMD_FCH_GPIO_REG_GPIO71 0x4D | ||
| 30 | #define AMD_FCH_GPIO_REG_GPIO32_GE1 0x59 | ||
| 31 | #define AMD_FCH_GPIO_REG_GPIO33_GE2 0x5A | ||
| 32 | #define AMT_FCH_GPIO_REG_GEVT22 0x09 | ||
| 33 | |||
| 34 | /* | ||
| 35 | * struct amd_fch_gpio_pdata - GPIO chip platform data | ||
| 36 | * @gpio_num: number of entries | ||
| 37 | * @gpio_reg: array of gpio registers | ||
| 38 | * @gpio_names: array of gpio names | ||
| 39 | */ | ||
| 40 | struct amd_fch_gpio_pdata { | ||
| 41 | int gpio_num; | ||
| 42 | int *gpio_reg; | ||
| 43 | const char * const *gpio_names; | ||
| 44 | }; | ||
| 45 | |||
| 46 | #endif /* __LINUX_PLATFORM_DATA_GPIO_AMD_FCH_H */ | ||
diff --git a/include/linux/platform_data/i2c-cbus-gpio.h b/include/linux/platform_data/i2c-cbus-gpio.h deleted file mode 100644 index 6faa992a9502..000000000000 --- a/include/linux/platform_data/i2c-cbus-gpio.h +++ /dev/null | |||
| @@ -1,27 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * i2c-cbus-gpio.h - CBUS I2C platform_data definition | ||
| 3 | * | ||
| 4 | * Copyright (C) 2004-2009 Nokia Corporation | ||
| 5 | * | ||
| 6 | * Written by Felipe Balbi and Aaro Koskinen. | ||
| 7 | * | ||
| 8 | * This file is subject to the terms and conditions of the GNU General | ||
| 9 | * Public License. See the file "COPYING" in the main directory of this | ||
| 10 | * archive for more details. | ||
| 11 | * | ||
| 12 | * This program is distributed in the hope that it will be useful, | ||
| 13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 15 | * GNU General Public License for more details. | ||
| 16 | */ | ||
| 17 | |||
| 18 | #ifndef __INCLUDE_LINUX_I2C_CBUS_GPIO_H | ||
| 19 | #define __INCLUDE_LINUX_I2C_CBUS_GPIO_H | ||
| 20 | |||
| 21 | struct i2c_cbus_platform_data { | ||
| 22 | int dat_gpio; | ||
| 23 | int clk_gpio; | ||
| 24 | int sel_gpio; | ||
| 25 | }; | ||
| 26 | |||
| 27 | #endif /* __INCLUDE_LINUX_I2C_CBUS_GPIO_H */ | ||
diff --git a/include/linux/platform_data/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h index 113d6b12f650..e6326cbafe59 100644 --- a/include/linux/platform_data/i2c-ocores.h +++ b/include/linux/platform_data/i2c-ocores.h | |||
| @@ -1,11 +1,8 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * i2c-ocores.h - definitions for the i2c-ocores interface | 3 | * i2c-ocores.h - definitions for the i2c-ocores interface |
| 3 | * | 4 | * |
| 4 | * Peter Korsgaard <peter@korsgaard.com> | 5 | * Peter Korsgaard <peter@korsgaard.com> |
| 5 | * | ||
| 6 | * This file is licensed under the terms of the GNU General Public License | ||
| 7 | * version 2. This program is licensed "as is" without any warranty of any | ||
| 8 | * kind, whether express or implied. | ||
| 9 | */ | 6 | */ |
| 10 | 7 | ||
| 11 | #ifndef _LINUX_I2C_OCORES_H | 8 | #ifndef _LINUX_I2C_OCORES_H |
| @@ -15,6 +12,7 @@ struct ocores_i2c_platform_data { | |||
| 15 | u32 reg_shift; /* register offset shift value */ | 12 | u32 reg_shift; /* register offset shift value */ |
| 16 | u32 reg_io_width; /* register io read/write width */ | 13 | u32 reg_io_width; /* register io read/write width */ |
| 17 | u32 clock_khz; /* input clock in kHz */ | 14 | u32 clock_khz; /* input clock in kHz */ |
| 15 | u32 bus_khz; /* bus clock in kHz */ | ||
| 18 | bool big_endian; /* registers are big endian */ | 16 | bool big_endian; /* registers are big endian */ |
| 19 | u8 num_devices; /* number of devices in the devices list */ | 17 | u8 num_devices; /* number of devices in the devices list */ |
| 20 | struct i2c_board_info const *devices; /* devices connected to the bus */ | 18 | struct i2c_board_info const *devices; /* devices connected to the bus */ |
diff --git a/include/linux/platform_data/media/si4713.h b/include/linux/platform_data/media/si4713.h index 932668ad54f7..13b3eb7a9059 100644 --- a/include/linux/platform_data/media/si4713.h +++ b/include/linux/platform_data/media/si4713.h | |||
| @@ -31,7 +31,7 @@ struct si4713_platform_data { | |||
| 31 | */ | 31 | */ |
| 32 | struct si4713_rnl { | 32 | struct si4713_rnl { |
| 33 | __u32 index; /* modulator index */ | 33 | __u32 index; /* modulator index */ |
| 34 | __u32 frequency; /* frequency to peform rnl measurement */ | 34 | __u32 frequency; /* frequency to perform rnl measurement */ |
| 35 | __s32 rnl; /* result of measurement in dBuV */ | 35 | __s32 rnl; /* result of measurement in dBuV */ |
| 36 | __u32 reserved[4]; /* drivers and apps must init this to 0 */ | 36 | __u32 reserved[4]; /* drivers and apps must init this to 0 */ |
| 37 | }; | 37 | }; |
| @@ -40,7 +40,7 @@ struct si4713_rnl { | |||
| 40 | * This is the ioctl number to query for rnl. Users must pass a | 40 | * This is the ioctl number to query for rnl. Users must pass a |
| 41 | * struct si4713_rnl pointer specifying desired frequency in 'frequency' field | 41 | * struct si4713_rnl pointer specifying desired frequency in 'frequency' field |
| 42 | * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). | 42 | * following driver capabilities (i.e V4L2_TUNER_CAP_LOW). |
| 43 | * Driver must return measured value in the same struture, filling 'rnl' field. | 43 | * Driver must return measured value in the same structure, filling 'rnl' field. |
| 44 | */ | 44 | */ |
| 45 | #define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ | 45 | #define SI4713_IOC_MEASURE_RNL _IOWR('V', BASE_VIDIOC_PRIVATE + 0, \ |
| 46 | struct si4713_rnl) | 46 | struct si4713_rnl) |
diff --git a/include/linux/platform_data/media/soc_camera_platform.h b/include/linux/platform_data/media/soc_camera_platform.h deleted file mode 100644 index 1e5065dab430..000000000000 --- a/include/linux/platform_data/media/soc_camera_platform.h +++ /dev/null | |||
| @@ -1,83 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Generic Platform Camera Driver Header | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008 Magnus Damm | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef __SOC_CAMERA_H__ | ||
| 12 | #define __SOC_CAMERA_H__ | ||
| 13 | |||
| 14 | #include <linux/videodev2.h> | ||
| 15 | #include <media/soc_camera.h> | ||
| 16 | #include <media/v4l2-mediabus.h> | ||
| 17 | |||
| 18 | struct device; | ||
| 19 | |||
| 20 | struct soc_camera_platform_info { | ||
| 21 | const char *format_name; | ||
| 22 | unsigned long format_depth; | ||
| 23 | struct v4l2_mbus_framefmt format; | ||
| 24 | unsigned long mbus_param; | ||
| 25 | enum v4l2_mbus_type mbus_type; | ||
| 26 | struct soc_camera_device *icd; | ||
| 27 | int (*set_capture)(struct soc_camera_platform_info *info, int enable); | ||
| 28 | }; | ||
| 29 | |||
| 30 | static inline void soc_camera_platform_release(struct platform_device **pdev) | ||
| 31 | { | ||
| 32 | *pdev = NULL; | ||
| 33 | } | ||
| 34 | |||
| 35 | static inline int soc_camera_platform_add(struct soc_camera_device *icd, | ||
| 36 | struct platform_device **pdev, | ||
| 37 | struct soc_camera_link *plink, | ||
| 38 | void (*release)(struct device *dev), | ||
| 39 | int id) | ||
| 40 | { | ||
| 41 | struct soc_camera_subdev_desc *ssdd = | ||
| 42 | (struct soc_camera_subdev_desc *)plink; | ||
| 43 | struct soc_camera_platform_info *info = ssdd->drv_priv; | ||
| 44 | int ret; | ||
| 45 | |||
| 46 | if (&icd->sdesc->subdev_desc != ssdd) | ||
| 47 | return -ENODEV; | ||
| 48 | |||
| 49 | if (*pdev) | ||
| 50 | return -EBUSY; | ||
| 51 | |||
| 52 | *pdev = platform_device_alloc("soc_camera_platform", id); | ||
| 53 | if (!*pdev) | ||
| 54 | return -ENOMEM; | ||
| 55 | |||
| 56 | info->icd = icd; | ||
| 57 | |||
| 58 | (*pdev)->dev.platform_data = info; | ||
| 59 | (*pdev)->dev.release = release; | ||
| 60 | |||
| 61 | ret = platform_device_add(*pdev); | ||
| 62 | if (ret < 0) { | ||
| 63 | platform_device_put(*pdev); | ||
| 64 | *pdev = NULL; | ||
| 65 | info->icd = NULL; | ||
| 66 | } | ||
| 67 | |||
| 68 | return ret; | ||
| 69 | } | ||
| 70 | |||
| 71 | static inline void soc_camera_platform_del(const struct soc_camera_device *icd, | ||
| 72 | struct platform_device *pdev, | ||
| 73 | const struct soc_camera_link *plink) | ||
| 74 | { | ||
| 75 | const struct soc_camera_subdev_desc *ssdd = | ||
| 76 | (const struct soc_camera_subdev_desc *)plink; | ||
| 77 | if (&icd->sdesc->subdev_desc != ssdd || !pdev) | ||
| 78 | return; | ||
| 79 | |||
| 80 | platform_device_unregister(pdev); | ||
| 81 | } | ||
| 82 | |||
| 83 | #endif /* __SOC_CAMERA_H__ */ | ||
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h index 19f5cb618c55..6d54fe3bcac9 100644 --- a/include/linux/platform_data/mlxreg.h +++ b/include/linux/platform_data/mlxreg.h | |||
| @@ -35,6 +35,19 @@ | |||
| 35 | #define __LINUX_PLATFORM_DATA_MLXREG_H | 35 | #define __LINUX_PLATFORM_DATA_MLXREG_H |
| 36 | 36 | ||
| 37 | #define MLXREG_CORE_LABEL_MAX_SIZE 32 | 37 | #define MLXREG_CORE_LABEL_MAX_SIZE 32 |
| 38 | #define MLXREG_CORE_WD_FEATURE_NOWAYOUT BIT(0) | ||
| 39 | #define MLXREG_CORE_WD_FEATURE_START_AT_BOOT BIT(1) | ||
| 40 | |||
| 41 | /** | ||
| 42 | * enum mlxreg_wdt_type - type of HW watchdog | ||
| 43 | * | ||
| 44 | * TYPE1 HW watchdog implementation exist in old systems. | ||
| 45 | * All new systems have TYPE2 HW watchdog. | ||
| 46 | */ | ||
| 47 | enum mlxreg_wdt_type { | ||
| 48 | MLX_WDT_TYPE1, | ||
| 49 | MLX_WDT_TYPE2, | ||
| 50 | }; | ||
| 38 | 51 | ||
| 39 | /** | 52 | /** |
| 40 | * struct mlxreg_hotplug_device - I2C device data: | 53 | * struct mlxreg_hotplug_device - I2C device data: |
| @@ -61,6 +74,7 @@ struct mlxreg_hotplug_device { | |||
| 61 | * @reg: attribute register; | 74 | * @reg: attribute register; |
| 62 | * @mask: attribute access mask; | 75 | * @mask: attribute access mask; |
| 63 | * @bit: attribute effective bit; | 76 | * @bit: attribute effective bit; |
| 77 | * @capability: attribute capability register; | ||
| 64 | * @mode: access mode; | 78 | * @mode: access mode; |
| 65 | * @np - pointer to node platform associated with attribute; | 79 | * @np - pointer to node platform associated with attribute; |
| 66 | * @hpdev - hotplug device data; | 80 | * @hpdev - hotplug device data; |
| @@ -72,6 +86,7 @@ struct mlxreg_core_data { | |||
| 72 | u32 reg; | 86 | u32 reg; |
| 73 | u32 mask; | 87 | u32 mask; |
| 74 | u32 bit; | 88 | u32 bit; |
| 89 | u32 capability; | ||
| 75 | umode_t mode; | 90 | umode_t mode; |
| 76 | struct device_node *np; | 91 | struct device_node *np; |
| 77 | struct mlxreg_hotplug_device hpdev; | 92 | struct mlxreg_hotplug_device hpdev; |
| @@ -107,14 +122,20 @@ struct mlxreg_core_item { | |||
| 107 | /** | 122 | /** |
| 108 | * struct mlxreg_core_platform_data - platform data: | 123 | * struct mlxreg_core_platform_data - platform data: |
| 109 | * | 124 | * |
| 110 | * @led_data: led private data; | 125 | * @data: instance private data; |
| 111 | * @regmap: register map of parent device; | 126 | * @regmap: register map of parent device; |
| 112 | * @counter: number of led instances; | 127 | * @counter: number of instances; |
| 128 | * @features: supported features of device; | ||
| 129 | * @version: implementation version; | ||
| 130 | * @identity: device identity name; | ||
| 113 | */ | 131 | */ |
| 114 | struct mlxreg_core_platform_data { | 132 | struct mlxreg_core_platform_data { |
| 115 | struct mlxreg_core_data *data; | 133 | struct mlxreg_core_data *data; |
| 116 | void *regmap; | 134 | void *regmap; |
| 117 | int counter; | 135 | int counter; |
| 136 | u32 features; | ||
| 137 | u32 version; | ||
| 138 | char identity[MLXREG_CORE_LABEL_MAX_SIZE]; | ||
| 118 | }; | 139 | }; |
| 119 | 140 | ||
| 120 | /** | 141 | /** |
diff --git a/include/linux/platform_data/mv88e6xxx.h b/include/linux/platform_data/mv88e6xxx.h index f63af2955ea0..963730b44aea 100644 --- a/include/linux/platform_data/mv88e6xxx.h +++ b/include/linux/platform_data/mv88e6xxx.h | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | #ifndef __DSA_MV88E6XXX_H | 2 | #ifndef __DSA_MV88E6XXX_H |
| 3 | #define __DSA_MV88E6XXX_H | 3 | #define __DSA_MV88E6XXX_H |
| 4 | 4 | ||
| 5 | #include <net/dsa.h> | 5 | #include <linux/platform_data/dsa.h> |
| 6 | 6 | ||
| 7 | struct dsa_mv88e6xxx_pdata { | 7 | struct dsa_mv88e6xxx_pdata { |
| 8 | /* Must be first, such that dsa_register_switch() can access this | 8 | /* Must be first, such that dsa_register_switch() can access this |
diff --git a/include/linux/platform_data/spi-ath79.h b/include/linux/platform_data/spi-ath79.h new file mode 100644 index 000000000000..aa71216edf99 --- /dev/null +++ b/include/linux/platform_data/spi-ath79.h | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | /* | ||
| 2 | * Platform data definition for Atheros AR71XX/AR724X/AR913X SPI controller | ||
| 3 | * | ||
| 4 | * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify it | ||
| 7 | * under the terms of the GNU General Public License version 2 as published | ||
| 8 | * by the Free Software Foundation. | ||
| 9 | */ | ||
| 10 | |||
| 11 | #ifndef _ATH79_SPI_PLATFORM_H | ||
| 12 | #define _ATH79_SPI_PLATFORM_H | ||
| 13 | |||
| 14 | struct ath79_spi_platform_data { | ||
| 15 | unsigned bus_num; | ||
| 16 | unsigned num_chipselect; | ||
| 17 | }; | ||
| 18 | |||
| 19 | #endif /* _ATH79_SPI_PLATFORM_H */ | ||
diff --git a/include/linux/platform_data/usb-davinci.h b/include/linux/platform_data/usb-davinci.h index 0926e99f2e8f..879f5c78b91a 100644 --- a/include/linux/platform_data/usb-davinci.h +++ b/include/linux/platform_data/usb-davinci.h | |||
| @@ -11,22 +11,8 @@ | |||
| 11 | #ifndef __ASM_ARCH_USB_H | 11 | #ifndef __ASM_ARCH_USB_H |
| 12 | #define __ASM_ARCH_USB_H | 12 | #define __ASM_ARCH_USB_H |
| 13 | 13 | ||
| 14 | struct da8xx_ohci_root_hub; | ||
| 15 | |||
| 16 | typedef void (*da8xx_ocic_handler_t)(struct da8xx_ohci_root_hub *hub, | ||
| 17 | unsigned port); | ||
| 18 | |||
| 19 | /* Passed as the platform data to the OHCI driver */ | 14 | /* Passed as the platform data to the OHCI driver */ |
| 20 | struct da8xx_ohci_root_hub { | 15 | struct da8xx_ohci_root_hub { |
| 21 | /* Switch the port power on/off */ | ||
| 22 | int (*set_power)(unsigned port, int on); | ||
| 23 | /* Read the port power status */ | ||
| 24 | int (*get_power)(unsigned port); | ||
| 25 | /* Read the port over-current indicator */ | ||
| 26 | int (*get_oci)(unsigned port); | ||
| 27 | /* Over-current indicator change notification (pass NULL to disable) */ | ||
| 28 | int (*ocic_notify)(da8xx_ocic_handler_t handler); | ||
| 29 | |||
| 30 | /* Time from power on to power good (in 2 ms units) */ | 16 | /* Time from power on to power good (in 2 ms units) */ |
| 31 | u8 potpgt; | 17 | u8 potpgt; |
| 32 | }; | 18 | }; |
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h new file mode 100644 index 000000000000..446473a46b88 --- /dev/null +++ b/include/linux/platform_data/wilco-ec.h | |||
| @@ -0,0 +1,144 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | /* | ||
| 3 | * ChromeOS Wilco Embedded Controller | ||
| 4 | * | ||
| 5 | * Copyright 2018 Google LLC | ||
| 6 | */ | ||
| 7 | |||
| 8 | #ifndef WILCO_EC_H | ||
| 9 | #define WILCO_EC_H | ||
| 10 | |||
| 11 | #include <linux/device.h> | ||
| 12 | #include <linux/kernel.h> | ||
| 13 | |||
| 14 | /* Message flags for using the mailbox() interface */ | ||
| 15 | #define WILCO_EC_FLAG_NO_RESPONSE BIT(0) /* EC does not respond */ | ||
| 16 | #define WILCO_EC_FLAG_EXTENDED_DATA BIT(1) /* EC returns 256 data bytes */ | ||
| 17 | #define WILCO_EC_FLAG_RAW_REQUEST BIT(2) /* Do not trim request data */ | ||
| 18 | #define WILCO_EC_FLAG_RAW_RESPONSE BIT(3) /* Do not trim response data */ | ||
| 19 | #define WILCO_EC_FLAG_RAW (WILCO_EC_FLAG_RAW_REQUEST | \ | ||
| 20 | WILCO_EC_FLAG_RAW_RESPONSE) | ||
| 21 | |||
| 22 | /* Normal commands have a maximum 32 bytes of data */ | ||
| 23 | #define EC_MAILBOX_DATA_SIZE 32 | ||
| 24 | /* Extended commands have 256 bytes of response data */ | ||
| 25 | #define EC_MAILBOX_DATA_SIZE_EXTENDED 256 | ||
| 26 | |||
| 27 | /** | ||
| 28 | * struct wilco_ec_device - Wilco Embedded Controller handle. | ||
| 29 | * @dev: Device handle. | ||
| 30 | * @mailbox_lock: Mutex to ensure one mailbox command at a time. | ||
| 31 | * @io_command: I/O port for mailbox command. Provided by ACPI. | ||
| 32 | * @io_data: I/O port for mailbox data. Provided by ACPI. | ||
| 33 | * @io_packet: I/O port for mailbox packet data. Provided by ACPI. | ||
| 34 | * @data_buffer: Buffer used for EC communication. The same buffer | ||
| 35 | * is used to hold the request and the response. | ||
| 36 | * @data_size: Size of the data buffer used for EC communication. | ||
| 37 | * @debugfs_pdev: The child platform_device used by the debugfs sub-driver. | ||
| 38 | * @rtc_pdev: The child platform_device used by the RTC sub-driver. | ||
| 39 | */ | ||
| 40 | struct wilco_ec_device { | ||
| 41 | struct device *dev; | ||
| 42 | struct mutex mailbox_lock; | ||
| 43 | struct resource *io_command; | ||
| 44 | struct resource *io_data; | ||
| 45 | struct resource *io_packet; | ||
| 46 | void *data_buffer; | ||
| 47 | size_t data_size; | ||
| 48 | struct platform_device *debugfs_pdev; | ||
| 49 | struct platform_device *rtc_pdev; | ||
| 50 | }; | ||
| 51 | |||
| 52 | /** | ||
| 53 | * struct wilco_ec_request - Mailbox request message format. | ||
| 54 | * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION | ||
| 55 | * @checksum: Sum of all bytes must be 0. | ||
| 56 | * @mailbox_id: Mailbox identifier, specifies the command set. | ||
| 57 | * @mailbox_version: Mailbox interface version %EC_MAILBOX_VERSION | ||
| 58 | * @reserved: Set to zero. | ||
| 59 | * @data_size: Length of request, data + last 2 bytes of the header. | ||
| 60 | * @command: Mailbox command code, unique for each mailbox_id set. | ||
| 61 | * @reserved_raw: Set to zero for most commands, but is used by | ||
| 62 | * some command types and for raw commands. | ||
| 63 | */ | ||
| 64 | struct wilco_ec_request { | ||
| 65 | u8 struct_version; | ||
| 66 | u8 checksum; | ||
| 67 | u16 mailbox_id; | ||
| 68 | u8 mailbox_version; | ||
| 69 | u8 reserved; | ||
| 70 | u16 data_size; | ||
| 71 | u8 command; | ||
| 72 | u8 reserved_raw; | ||
| 73 | } __packed; | ||
| 74 | |||
| 75 | /** | ||
| 76 | * struct wilco_ec_response - Mailbox response message format. | ||
| 77 | * @struct_version: Should be %EC_MAILBOX_PROTO_VERSION | ||
| 78 | * @checksum: Sum of all bytes must be 0. | ||
| 79 | * @result: Result code from the EC. Non-zero indicates an error. | ||
| 80 | * @data_size: Length of the response data buffer. | ||
| 81 | * @reserved: Set to zero. | ||
| 82 | * @mbox0: EC returned data at offset 0 is unused (always 0) so this byte | ||
| 83 | * is treated as part of the header instead of the data. | ||
| 84 | * @data: Response data buffer. Max size is %EC_MAILBOX_DATA_SIZE_EXTENDED. | ||
| 85 | */ | ||
| 86 | struct wilco_ec_response { | ||
| 87 | u8 struct_version; | ||
| 88 | u8 checksum; | ||
| 89 | u16 result; | ||
| 90 | u16 data_size; | ||
| 91 | u8 reserved[2]; | ||
| 92 | u8 mbox0; | ||
| 93 | u8 data[0]; | ||
| 94 | } __packed; | ||
| 95 | |||
| 96 | /** | ||
| 97 | * enum wilco_ec_msg_type - Message type to select a set of command codes. | ||
| 98 | * @WILCO_EC_MSG_LEGACY: Legacy EC messages for standard EC behavior. | ||
| 99 | * @WILCO_EC_MSG_PROPERTY: Get/Set/Sync EC controlled NVRAM property. | ||
| 100 | * @WILCO_EC_MSG_TELEMETRY_SHORT: 32 bytes of telemetry data provided by the EC. | ||
| 101 | * @WILCO_EC_MSG_TELEMETRY_LONG: 256 bytes of telemetry data provided by the EC. | ||
| 102 | */ | ||
| 103 | enum wilco_ec_msg_type { | ||
| 104 | WILCO_EC_MSG_LEGACY = 0x00f0, | ||
| 105 | WILCO_EC_MSG_PROPERTY = 0x00f2, | ||
| 106 | WILCO_EC_MSG_TELEMETRY_SHORT = 0x00f5, | ||
| 107 | WILCO_EC_MSG_TELEMETRY_LONG = 0x00f6, | ||
| 108 | }; | ||
| 109 | |||
| 110 | /** | ||
| 111 | * struct wilco_ec_message - Request and response message. | ||
| 112 | * @type: Mailbox message type. | ||
| 113 | * @flags: Message flags, e.g. %WILCO_EC_FLAG_NO_RESPONSE. | ||
| 114 | * @command: Mailbox command code. | ||
| 115 | * @result: Result code from the EC. Non-zero indicates an error. | ||
| 116 | * @request_size: Number of bytes to send to the EC. | ||
| 117 | * @request_data: Buffer containing the request data. | ||
| 118 | * @response_size: Number of bytes expected from the EC. | ||
| 119 | * This is 32 by default and 256 if the flag | ||
| 120 | * is set for %WILCO_EC_FLAG_EXTENDED_DATA | ||
| 121 | * @response_data: Buffer containing the response data, should be | ||
| 122 | * response_size bytes and allocated by caller. | ||
| 123 | */ | ||
| 124 | struct wilco_ec_message { | ||
| 125 | enum wilco_ec_msg_type type; | ||
| 126 | u8 flags; | ||
| 127 | u8 command; | ||
| 128 | u8 result; | ||
| 129 | size_t request_size; | ||
| 130 | void *request_data; | ||
| 131 | size_t response_size; | ||
| 132 | void *response_data; | ||
| 133 | }; | ||
| 134 | |||
| 135 | /** | ||
| 136 | * wilco_ec_mailbox() - Send request to the EC and receive the response. | ||
| 137 | * @ec: Wilco EC device. | ||
| 138 | * @msg: Wilco EC message. | ||
| 139 | * | ||
| 140 | * Return: Number of bytes received or negative error code on failure. | ||
| 141 | */ | ||
| 142 | int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg); | ||
| 143 | |||
| 144 | #endif /* WILCO_EC_H */ | ||
diff --git a/include/linux/platform_data/clk-lpss.h b/include/linux/platform_data/x86/clk-lpss.h index 23901992b9dd..23901992b9dd 100644 --- a/include/linux/platform_data/clk-lpss.h +++ b/include/linux/platform_data/x86/clk-lpss.h | |||
diff --git a/include/linux/platform_data/x86/clk-pmc-atom.h b/include/linux/platform_data/x86/clk-pmc-atom.h index 3ab892208343..7a37ac27d0fb 100644 --- a/include/linux/platform_data/x86/clk-pmc-atom.h +++ b/include/linux/platform_data/x86/clk-pmc-atom.h | |||
| @@ -35,10 +35,13 @@ struct pmc_clk { | |||
| 35 | * | 35 | * |
| 36 | * @base: PMC clock register base offset | 36 | * @base: PMC clock register base offset |
| 37 | * @clks: pointer to set of registered clocks, typically 0..5 | 37 | * @clks: pointer to set of registered clocks, typically 0..5 |
| 38 | * @critical: flag to indicate if firmware enabled pmc_plt_clks | ||
| 39 | * should be marked as critial or not | ||
| 38 | */ | 40 | */ |
| 39 | struct pmc_clk_data { | 41 | struct pmc_clk_data { |
| 40 | void __iomem *base; | 42 | void __iomem *base; |
| 41 | const struct pmc_clk *clks; | 43 | const struct pmc_clk *clks; |
| 44 | bool critical; | ||
| 42 | }; | 45 | }; |
| 43 | 46 | ||
| 44 | #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ | 47 | #endif /* __PLATFORM_DATA_X86_CLK_PMC_ATOM_H */ |
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h index c7c081dc6034..cc464850b71e 100644 --- a/include/linux/platform_device.h +++ b/include/linux/platform_device.h | |||
| @@ -52,6 +52,9 @@ extern struct device platform_bus; | |||
| 52 | extern void arch_setup_pdev_archdata(struct platform_device *); | 52 | extern void arch_setup_pdev_archdata(struct platform_device *); |
| 53 | extern struct resource *platform_get_resource(struct platform_device *, | 53 | extern struct resource *platform_get_resource(struct platform_device *, |
| 54 | unsigned int, unsigned int); | 54 | unsigned int, unsigned int); |
| 55 | extern void __iomem * | ||
| 56 | devm_platform_ioremap_resource(struct platform_device *pdev, | ||
| 57 | unsigned int index); | ||
| 55 | extern int platform_get_irq(struct platform_device *, unsigned int); | 58 | extern int platform_get_irq(struct platform_device *, unsigned int); |
| 56 | extern int platform_irq_count(struct platform_device *); | 59 | extern int platform_irq_count(struct platform_device *); |
| 57 | extern struct resource *platform_get_resource_byname(struct platform_device *, | 60 | extern struct resource *platform_get_resource_byname(struct platform_device *, |
| @@ -63,6 +66,7 @@ extern int platform_add_devices(struct platform_device **, int); | |||
| 63 | struct platform_device_info { | 66 | struct platform_device_info { |
| 64 | struct device *parent; | 67 | struct device *parent; |
| 65 | struct fwnode_handle *fwnode; | 68 | struct fwnode_handle *fwnode; |
| 69 | bool of_node_reused; | ||
| 66 | 70 | ||
| 67 | const char *name; | 71 | const char *name; |
| 68 | int id; | 72 | int id; |
diff --git a/include/linux/pm.h b/include/linux/pm.h index 0bd9de116826..66c19a65a514 100644 --- a/include/linux/pm.h +++ b/include/linux/pm.h | |||
| @@ -592,6 +592,7 @@ struct dev_pm_info { | |||
| 592 | bool is_suspended:1; /* Ditto */ | 592 | bool is_suspended:1; /* Ditto */ |
| 593 | bool is_noirq_suspended:1; | 593 | bool is_noirq_suspended:1; |
| 594 | bool is_late_suspended:1; | 594 | bool is_late_suspended:1; |
| 595 | bool no_pm:1; | ||
| 595 | bool early_init:1; /* Owned by the PM core */ | 596 | bool early_init:1; /* Owned by the PM core */ |
| 596 | bool direct_complete:1; /* Owned by the PM core */ | 597 | bool direct_complete:1; /* Owned by the PM core */ |
| 597 | u32 driver_flags; | 598 | u32 driver_flags; |
| @@ -633,16 +634,15 @@ struct dev_pm_info { | |||
| 633 | int runtime_error; | 634 | int runtime_error; |
| 634 | int autosuspend_delay; | 635 | int autosuspend_delay; |
| 635 | u64 last_busy; | 636 | u64 last_busy; |
| 636 | unsigned long active_jiffies; | 637 | u64 active_time; |
| 637 | unsigned long suspended_jiffies; | 638 | u64 suspended_time; |
| 638 | unsigned long accounting_timestamp; | 639 | u64 accounting_timestamp; |
| 639 | #endif | 640 | #endif |
| 640 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ | 641 | struct pm_subsys_data *subsys_data; /* Owned by the subsystem. */ |
| 641 | void (*set_latency_tolerance)(struct device *, s32); | 642 | void (*set_latency_tolerance)(struct device *, s32); |
| 642 | struct dev_pm_qos *qos; | 643 | struct dev_pm_qos *qos; |
| 643 | }; | 644 | }; |
| 644 | 645 | ||
| 645 | extern void update_pm_runtime_accounting(struct device *dev); | ||
| 646 | extern int dev_pm_get_subsys_data(struct device *dev); | 646 | extern int dev_pm_get_subsys_data(struct device *dev); |
| 647 | extern void dev_pm_put_subsys_data(struct device *dev); | 647 | extern void dev_pm_put_subsys_data(struct device *dev); |
| 648 | 648 | ||
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h index dd364abb649a..1ed5874bcee0 100644 --- a/include/linux/pm_domain.h +++ b/include/linux/pm_domain.h | |||
| @@ -271,7 +271,7 @@ int genpd_dev_pm_attach(struct device *dev); | |||
| 271 | struct device *genpd_dev_pm_attach_by_id(struct device *dev, | 271 | struct device *genpd_dev_pm_attach_by_id(struct device *dev, |
| 272 | unsigned int index); | 272 | unsigned int index); |
| 273 | struct device *genpd_dev_pm_attach_by_name(struct device *dev, | 273 | struct device *genpd_dev_pm_attach_by_name(struct device *dev, |
| 274 | char *name); | 274 | const char *name); |
| 275 | #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ | 275 | #else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ |
| 276 | static inline int of_genpd_add_provider_simple(struct device_node *np, | 276 | static inline int of_genpd_add_provider_simple(struct device_node *np, |
| 277 | struct generic_pm_domain *genpd) | 277 | struct generic_pm_domain *genpd) |
| @@ -324,7 +324,7 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev, | |||
| 324 | } | 324 | } |
| 325 | 325 | ||
| 326 | static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, | 326 | static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev, |
| 327 | char *name) | 327 | const char *name) |
| 328 | { | 328 | { |
| 329 | return NULL; | 329 | return NULL; |
| 330 | } | 330 | } |
| @@ -341,7 +341,7 @@ int dev_pm_domain_attach(struct device *dev, bool power_on); | |||
| 341 | struct device *dev_pm_domain_attach_by_id(struct device *dev, | 341 | struct device *dev_pm_domain_attach_by_id(struct device *dev, |
| 342 | unsigned int index); | 342 | unsigned int index); |
| 343 | struct device *dev_pm_domain_attach_by_name(struct device *dev, | 343 | struct device *dev_pm_domain_attach_by_name(struct device *dev, |
| 344 | char *name); | 344 | const char *name); |
| 345 | void dev_pm_domain_detach(struct device *dev, bool power_off); | 345 | void dev_pm_domain_detach(struct device *dev, bool power_off); |
| 346 | void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); | 346 | void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd); |
| 347 | #else | 347 | #else |
| @@ -355,7 +355,7 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev, | |||
| 355 | return NULL; | 355 | return NULL; |
| 356 | } | 356 | } |
| 357 | static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, | 357 | static inline struct device *dev_pm_domain_attach_by_name(struct device *dev, |
| 358 | char *name) | 358 | const char *name) |
| 359 | { | 359 | { |
| 360 | return NULL; | 360 | return NULL; |
| 361 | } | 361 | } |
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h index 0a2a88e5a383..24c757a32a7b 100644 --- a/include/linux/pm_opp.h +++ b/include/linux/pm_opp.h | |||
| @@ -86,6 +86,8 @@ unsigned long dev_pm_opp_get_voltage(struct dev_pm_opp *opp); | |||
| 86 | 86 | ||
| 87 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); | 87 | unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp); |
| 88 | 88 | ||
| 89 | unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp); | ||
| 90 | |||
| 89 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); | 91 | bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp); |
| 90 | 92 | ||
| 91 | int dev_pm_opp_get_opp_count(struct device *dev); | 93 | int dev_pm_opp_get_opp_count(struct device *dev); |
| @@ -108,6 +110,7 @@ void dev_pm_opp_put(struct dev_pm_opp *opp); | |||
| 108 | int dev_pm_opp_add(struct device *dev, unsigned long freq, | 110 | int dev_pm_opp_add(struct device *dev, unsigned long freq, |
| 109 | unsigned long u_volt); | 111 | unsigned long u_volt); |
| 110 | void dev_pm_opp_remove(struct device *dev, unsigned long freq); | 112 | void dev_pm_opp_remove(struct device *dev, unsigned long freq); |
| 113 | void dev_pm_opp_remove_all_dynamic(struct device *dev); | ||
| 111 | 114 | ||
| 112 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); | 115 | int dev_pm_opp_enable(struct device *dev, unsigned long freq); |
| 113 | 116 | ||
| @@ -157,6 +160,11 @@ static inline unsigned long dev_pm_opp_get_freq(struct dev_pm_opp *opp) | |||
| 157 | return 0; | 160 | return 0; |
| 158 | } | 161 | } |
| 159 | 162 | ||
| 163 | static inline unsigned int dev_pm_opp_get_level(struct dev_pm_opp *opp) | ||
| 164 | { | ||
| 165 | return 0; | ||
| 166 | } | ||
| 167 | |||
| 160 | static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) | 168 | static inline bool dev_pm_opp_is_turbo(struct dev_pm_opp *opp) |
| 161 | { | 169 | { |
| 162 | return false; | 170 | return false; |
| @@ -217,6 +225,10 @@ static inline void dev_pm_opp_remove(struct device *dev, unsigned long freq) | |||
| 217 | { | 225 | { |
| 218 | } | 226 | } |
| 219 | 227 | ||
| 228 | static inline void dev_pm_opp_remove_all_dynamic(struct device *dev) | ||
| 229 | { | ||
| 230 | } | ||
| 231 | |||
| 220 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) | 232 | static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq) |
| 221 | { | 233 | { |
| 222 | return 0; | 234 | return 0; |
| @@ -322,6 +334,7 @@ int dev_pm_opp_of_get_sharing_cpus(struct device *cpu_dev, struct cpumask *cpuma | |||
| 322 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); | 334 | struct device_node *dev_pm_opp_of_get_opp_desc_node(struct device *dev); |
| 323 | struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); | 335 | struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp); |
| 324 | int of_get_required_opp_performance_state(struct device_node *np, int index); | 336 | int of_get_required_opp_performance_state(struct device_node *np, int index); |
| 337 | void dev_pm_opp_of_register_em(struct cpumask *cpus); | ||
| 325 | #else | 338 | #else |
| 326 | static inline int dev_pm_opp_of_add_table(struct device *dev) | 339 | static inline int dev_pm_opp_of_add_table(struct device *dev) |
| 327 | { | 340 | { |
| @@ -360,6 +373,11 @@ static inline struct device_node *dev_pm_opp_get_of_node(struct dev_pm_opp *opp) | |||
| 360 | { | 373 | { |
| 361 | return NULL; | 374 | return NULL; |
| 362 | } | 375 | } |
| 376 | |||
| 377 | static inline void dev_pm_opp_of_register_em(struct cpumask *cpus) | ||
| 378 | { | ||
| 379 | } | ||
| 380 | |||
| 363 | static inline int of_get_required_opp_performance_state(struct device_node *np, int index) | 381 | static inline int of_get_required_opp_performance_state(struct device_node *np, int index) |
| 364 | { | 382 | { |
| 365 | return -ENOTSUPP; | 383 | return -ENOTSUPP; |
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h index 54af4eef169f..9dc6eebf62d2 100644 --- a/include/linux/pm_runtime.h +++ b/include/linux/pm_runtime.h | |||
| @@ -105,7 +105,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev) | |||
| 105 | 105 | ||
| 106 | static inline void pm_runtime_mark_last_busy(struct device *dev) | 106 | static inline void pm_runtime_mark_last_busy(struct device *dev) |
| 107 | { | 107 | { |
| 108 | WRITE_ONCE(dev->power.last_busy, ktime_to_ns(ktime_get())); | 108 | WRITE_ONCE(dev->power.last_busy, ktime_get_mono_fast_ns()); |
| 109 | } | 109 | } |
| 110 | 110 | ||
| 111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) | 111 | static inline bool pm_runtime_is_irq_safe(struct device *dev) |
| @@ -113,6 +113,8 @@ static inline bool pm_runtime_is_irq_safe(struct device *dev) | |||
| 113 | return dev->power.irq_safe; | 113 | return dev->power.irq_safe; |
| 114 | } | 114 | } |
| 115 | 115 | ||
| 116 | extern u64 pm_runtime_suspended_time(struct device *dev); | ||
| 117 | |||
| 116 | #else /* !CONFIG_PM */ | 118 | #else /* !CONFIG_PM */ |
| 117 | 119 | ||
| 118 | static inline bool queue_pm_work(struct work_struct *work) { return false; } | 120 | static inline bool queue_pm_work(struct work_struct *work) { return false; } |
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h index 4238dde0aaf0..0ff134d6575a 100644 --- a/include/linux/pm_wakeup.h +++ b/include/linux/pm_wakeup.h | |||
| @@ -96,7 +96,6 @@ static inline void device_set_wakeup_path(struct device *dev) | |||
| 96 | /* drivers/base/power/wakeup.c */ | 96 | /* drivers/base/power/wakeup.c */ |
| 97 | extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); | 97 | extern void wakeup_source_prepare(struct wakeup_source *ws, const char *name); |
| 98 | extern struct wakeup_source *wakeup_source_create(const char *name); | 98 | extern struct wakeup_source *wakeup_source_create(const char *name); |
| 99 | extern void wakeup_source_drop(struct wakeup_source *ws); | ||
| 100 | extern void wakeup_source_destroy(struct wakeup_source *ws); | 99 | extern void wakeup_source_destroy(struct wakeup_source *ws); |
| 101 | extern void wakeup_source_add(struct wakeup_source *ws); | 100 | extern void wakeup_source_add(struct wakeup_source *ws); |
| 102 | extern void wakeup_source_remove(struct wakeup_source *ws); | 101 | extern void wakeup_source_remove(struct wakeup_source *ws); |
| @@ -134,8 +133,6 @@ static inline struct wakeup_source *wakeup_source_create(const char *name) | |||
| 134 | return NULL; | 133 | return NULL; |
| 135 | } | 134 | } |
| 136 | 135 | ||
| 137 | static inline void wakeup_source_drop(struct wakeup_source *ws) {} | ||
| 138 | |||
| 139 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} | 136 | static inline void wakeup_source_destroy(struct wakeup_source *ws) {} |
| 140 | 137 | ||
| 141 | static inline void wakeup_source_add(struct wakeup_source *ws) {} | 138 | static inline void wakeup_source_add(struct wakeup_source *ws) {} |
| @@ -204,12 +201,6 @@ static inline void wakeup_source_init(struct wakeup_source *ws, | |||
| 204 | wakeup_source_add(ws); | 201 | wakeup_source_add(ws); |
| 205 | } | 202 | } |
| 206 | 203 | ||
| 207 | static inline void wakeup_source_trash(struct wakeup_source *ws) | ||
| 208 | { | ||
| 209 | wakeup_source_remove(ws); | ||
| 210 | wakeup_source_drop(ws); | ||
| 211 | } | ||
| 212 | |||
| 213 | static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) | 204 | static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) |
| 214 | { | 205 | { |
| 215 | return pm_wakeup_ws_event(ws, msec, false); | 206 | return pm_wakeup_ws_event(ws, msec, false); |
diff --git a/include/linux/poison.h b/include/linux/poison.h index 15927ebc22f2..d6d980a681c7 100644 --- a/include/linux/poison.h +++ b/include/linux/poison.h | |||
| @@ -30,7 +30,7 @@ | |||
| 30 | */ | 30 | */ |
| 31 | #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) | 31 | #define TIMER_ENTRY_STATIC ((void *) 0x300 + POISON_POINTER_DELTA) |
| 32 | 32 | ||
| 33 | /********** mm/debug-pagealloc.c **********/ | 33 | /********** mm/page_poison.c **********/ |
| 34 | #ifdef CONFIG_PAGE_POISONING_ZERO | 34 | #ifdef CONFIG_PAGE_POISONING_ZERO |
| 35 | #define PAGE_POISON 0x00 | 35 | #define PAGE_POISON 0x00 |
| 36 | #else | 36 | #else |
| @@ -83,9 +83,6 @@ | |||
| 83 | #define MUTEX_DEBUG_FREE 0x22 | 83 | #define MUTEX_DEBUG_FREE 0x22 |
| 84 | #define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) | 84 | #define MUTEX_POISON_WW_CTX ((void *) 0x500 + POISON_POINTER_DELTA) |
| 85 | 85 | ||
| 86 | /********** lib/flex_array.c **********/ | ||
| 87 | #define FLEX_ARRAY_FREE 0x6c /* for use-after-free poisoning */ | ||
| 88 | |||
| 89 | /********** security/ **********/ | 86 | /********** security/ **********/ |
| 90 | #define KEY_DESTROY 0xbd | 87 | #define KEY_DESTROY 0xbd |
| 91 | 88 | ||
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h index 3a3bc71017d5..18674d7d5b1c 100644 --- a/include/linux/posix-clock.h +++ b/include/linux/posix-clock.h | |||
| @@ -51,7 +51,7 @@ struct posix_clock; | |||
| 51 | struct posix_clock_operations { | 51 | struct posix_clock_operations { |
| 52 | struct module *owner; | 52 | struct module *owner; |
| 53 | 53 | ||
| 54 | int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx); | 54 | int (*clock_adjtime)(struct posix_clock *pc, struct __kernel_timex *tx); |
| 55 | 55 | ||
| 56 | int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); | 56 | int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts); |
| 57 | 57 | ||
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h index e96581ca7c9d..b20798fc5191 100644 --- a/include/linux/posix-timers.h +++ b/include/linux/posix-timers.h | |||
| @@ -12,7 +12,7 @@ struct siginfo; | |||
| 12 | 12 | ||
| 13 | struct cpu_timer_list { | 13 | struct cpu_timer_list { |
| 14 | struct list_head entry; | 14 | struct list_head entry; |
| 15 | u64 expires, incr; | 15 | u64 expires; |
| 16 | struct task_struct *task; | 16 | struct task_struct *task; |
| 17 | int firing; | 17 | int firing; |
| 18 | }; | 18 | }; |
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h deleted file mode 100644 index 0105d9e7af85..000000000000 --- a/include/linux/power/isp1704_charger.h +++ /dev/null | |||
| @@ -1,30 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * ISP1704 USB Charger Detection driver | ||
| 3 | * | ||
| 4 | * Copyright (C) 2011 Nokia Corporation | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License as published by | ||
| 8 | * the Free Software Foundation; either version 2 of the License, or | ||
| 9 | * (at your option) any later version. | ||
| 10 | * | ||
| 11 | * This program is distributed in the hope that it will be useful, | ||
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 14 | * GNU General Public License for more details. | ||
| 15 | * | ||
| 16 | * You should have received a copy of the GNU General Public License | ||
| 17 | * along with this program; if not, write to the Free Software | ||
| 18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
| 19 | */ | ||
| 20 | |||
| 21 | |||
| 22 | #ifndef __ISP1704_CHARGER_H | ||
| 23 | #define __ISP1704_CHARGER_H | ||
| 24 | |||
| 25 | struct isp1704_charger_data { | ||
| 26 | void (*set_power)(bool on); | ||
| 27 | int enable_gpio; | ||
| 28 | }; | ||
| 29 | |||
| 30 | #endif | ||
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h index 57b2ab82b951..2f9c201a54d1 100644 --- a/include/linux/power_supply.h +++ b/include/linux/power_supply.h | |||
| @@ -332,6 +332,7 @@ struct power_supply_battery_info { | |||
| 332 | int energy_full_design_uwh; /* microWatt-hours */ | 332 | int energy_full_design_uwh; /* microWatt-hours */ |
| 333 | int charge_full_design_uah; /* microAmp-hours */ | 333 | int charge_full_design_uah; /* microAmp-hours */ |
| 334 | int voltage_min_design_uv; /* microVolts */ | 334 | int voltage_min_design_uv; /* microVolts */ |
| 335 | int voltage_max_design_uv; /* microVolts */ | ||
| 335 | int precharge_current_ua; /* microAmps */ | 336 | int precharge_current_ua; /* microAmps */ |
| 336 | int charge_term_current_ua; /* microAmps */ | 337 | int charge_term_current_ua; /* microAmps */ |
| 337 | int constant_charge_current_max_ua; /* microAmps */ | 338 | int constant_charge_current_max_ua; /* microAmps */ |
diff --git a/include/linux/printk.h b/include/linux/printk.h index 77740a506ebb..d7c77ed1a4cb 100644 --- a/include/linux/printk.h +++ b/include/linux/printk.h | |||
| @@ -18,7 +18,6 @@ static inline int printk_get_level(const char *buffer) | |||
| 18 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { | 18 | if (buffer[0] == KERN_SOH_ASCII && buffer[1]) { |
| 19 | switch (buffer[1]) { | 19 | switch (buffer[1]) { |
| 20 | case '0' ... '7': | 20 | case '0' ... '7': |
| 21 | case 'd': /* KERN_DEFAULT */ | ||
| 22 | case 'c': /* KERN_CONT */ | 21 | case 'c': /* KERN_CONT */ |
| 23 | return buffer[1]; | 22 | return buffer[1]; |
| 24 | } | 23 | } |
| @@ -461,7 +460,7 @@ do { \ | |||
| 461 | DEFAULT_RATELIMIT_INTERVAL, \ | 460 | DEFAULT_RATELIMIT_INTERVAL, \ |
| 462 | DEFAULT_RATELIMIT_BURST); \ | 461 | DEFAULT_RATELIMIT_BURST); \ |
| 463 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ | 462 | DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, pr_fmt(fmt)); \ |
| 464 | if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \ | 463 | if (DYNAMIC_DEBUG_BRANCH(descriptor) && \ |
| 465 | __ratelimit(&_rs)) \ | 464 | __ratelimit(&_rs)) \ |
| 466 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ | 465 | __dynamic_pr_debug(&descriptor, pr_fmt(fmt), ##__VA_ARGS__); \ |
| 467 | } while (0) | 466 | } while (0) |
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index d0e1f1522a78..52a283ba0465 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
| @@ -73,6 +73,7 @@ struct proc_dir_entry *proc_create_net_single_write(const char *name, umode_t mo | |||
| 73 | int (*show)(struct seq_file *, void *), | 73 | int (*show)(struct seq_file *, void *), |
| 74 | proc_write_t write, | 74 | proc_write_t write, |
| 75 | void *data); | 75 | void *data); |
| 76 | extern struct pid *tgid_pidfd_to_pid(const struct file *file); | ||
| 76 | 77 | ||
| 77 | #else /* CONFIG_PROC_FS */ | 78 | #else /* CONFIG_PROC_FS */ |
| 78 | 79 | ||
| @@ -114,6 +115,11 @@ static inline int remove_proc_subtree(const char *name, struct proc_dir_entry *p | |||
| 114 | #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) | 115 | #define proc_create_net(name, mode, parent, state_size, ops) ({NULL;}) |
| 115 | #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) | 116 | #define proc_create_net_single(name, mode, parent, show, data) ({NULL;}) |
| 116 | 117 | ||
| 118 | static inline struct pid *tgid_pidfd_to_pid(const struct file *file) | ||
| 119 | { | ||
| 120 | return ERR_PTR(-EBADF); | ||
| 121 | } | ||
| 122 | |||
| 117 | #endif /* CONFIG_PROC_FS */ | 123 | #endif /* CONFIG_PROC_FS */ |
| 118 | 124 | ||
| 119 | struct net; | 125 | struct net; |
diff --git a/include/linux/property.h b/include/linux/property.h index 3789ec755fb6..65d3420dd5d1 100644 --- a/include/linux/property.h +++ b/include/linux/property.h | |||
| @@ -258,7 +258,7 @@ struct property_entry { | |||
| 258 | #define PROPERTY_ENTRY_STRING(_name_, _val_) \ | 258 | #define PROPERTY_ENTRY_STRING(_name_, _val_) \ |
| 259 | (struct property_entry) { \ | 259 | (struct property_entry) { \ |
| 260 | .name = _name_, \ | 260 | .name = _name_, \ |
| 261 | .length = sizeof(_val_), \ | 261 | .length = sizeof(const char *), \ |
| 262 | .type = DEV_PROP_STRING, \ | 262 | .type = DEV_PROP_STRING, \ |
| 263 | { .value = { .str = _val_ } }, \ | 263 | { .value = { .str = _val_ } }, \ |
| 264 | } | 264 | } |
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h index 186cd8e970c7..8da46ac44a2e 100644 --- a/include/linux/ptr_ring.h +++ b/include/linux/ptr_ring.h | |||
| @@ -26,7 +26,6 @@ | |||
| 26 | #include <linux/cache.h> | 26 | #include <linux/cache.h> |
| 27 | #include <linux/types.h> | 27 | #include <linux/types.h> |
| 28 | #include <linux/compiler.h> | 28 | #include <linux/compiler.h> |
| 29 | #include <linux/cache.h> | ||
| 30 | #include <linux/slab.h> | 29 | #include <linux/slab.h> |
| 31 | #include <asm/errno.h> | 30 | #include <asm/errno.h> |
| 32 | #endif | 31 | #endif |
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h index edb9b040c94c..d5084ebd9f03 100644 --- a/include/linux/ptrace.h +++ b/include/linux/ptrace.h | |||
| @@ -9,6 +9,13 @@ | |||
| 9 | #include <linux/bug.h> /* For BUG_ON. */ | 9 | #include <linux/bug.h> /* For BUG_ON. */ |
| 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ | 10 | #include <linux/pid_namespace.h> /* For task_active_pid_ns. */ |
| 11 | #include <uapi/linux/ptrace.h> | 11 | #include <uapi/linux/ptrace.h> |
| 12 | #include <linux/seccomp.h> | ||
| 13 | |||
| 14 | /* Add sp to seccomp_data, as seccomp is user API, we don't want to modify it */ | ||
| 15 | struct syscall_info { | ||
| 16 | __u64 sp; | ||
| 17 | struct seccomp_data data; | ||
| 18 | }; | ||
| 12 | 19 | ||
| 13 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, | 20 | extern int ptrace_access_vm(struct task_struct *tsk, unsigned long addr, |
| 14 | void *buf, int len, unsigned int gup_flags); | 21 | void *buf, int len, unsigned int gup_flags); |
| @@ -407,9 +414,7 @@ static inline void user_single_step_report(struct pt_regs *regs) | |||
| 407 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) | 414 | #define current_user_stack_pointer() user_stack_pointer(current_pt_regs()) |
| 408 | #endif | 415 | #endif |
| 409 | 416 | ||
| 410 | extern int task_current_syscall(struct task_struct *target, long *callno, | 417 | extern int task_current_syscall(struct task_struct *target, struct syscall_info *info); |
| 411 | unsigned long args[6], unsigned int maxargs, | ||
| 412 | unsigned long *sp, unsigned long *pc); | ||
| 413 | 418 | ||
| 414 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); | 419 | extern void sigaction_compat_abi(struct k_sigaction *act, struct k_sigaction *oact); |
| 415 | #endif | 420 | #endif |
diff --git a/include/linux/pwm.h b/include/linux/pwm.h index d5199b507d79..b628abfffacc 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h | |||
| @@ -242,11 +242,7 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, | |||
| 242 | * struct pwm_ops - PWM controller operations | 242 | * struct pwm_ops - PWM controller operations |
| 243 | * @request: optional hook for requesting a PWM | 243 | * @request: optional hook for requesting a PWM |
| 244 | * @free: optional hook for freeing a PWM | 244 | * @free: optional hook for freeing a PWM |
| 245 | * @config: configure duty cycles and period length for this PWM | ||
| 246 | * @set_polarity: configure the polarity of this PWM | ||
| 247 | * @capture: capture and report PWM signal | 245 | * @capture: capture and report PWM signal |
| 248 | * @enable: enable PWM output toggling | ||
| 249 | * @disable: disable PWM output toggling | ||
| 250 | * @apply: atomically apply a new PWM config. The state argument | 246 | * @apply: atomically apply a new PWM config. The state argument |
| 251 | * should be adjusted with the real hardware config (if the | 247 | * should be adjusted with the real hardware config (if the |
| 252 | * approximate the period or duty_cycle value, state should | 248 | * approximate the period or duty_cycle value, state should |
| @@ -254,53 +250,56 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle, | |||
| 254 | * @get_state: get the current PWM state. This function is only | 250 | * @get_state: get the current PWM state. This function is only |
| 255 | * called once per PWM device when the PWM chip is | 251 | * called once per PWM device when the PWM chip is |
| 256 | * registered. | 252 | * registered. |
| 257 | * @dbg_show: optional routine to show contents in debugfs | ||
| 258 | * @owner: helps prevent removal of modules exporting active PWMs | 253 | * @owner: helps prevent removal of modules exporting active PWMs |
| 254 | * @config: configure duty cycles and period length for this PWM | ||
| 255 | * @set_polarity: configure the polarity of this PWM | ||
| 256 | * @enable: enable PWM output toggling | ||
| 257 | * @disable: disable PWM output toggling | ||
| 259 | */ | 258 | */ |
| 260 | struct pwm_ops { | 259 | struct pwm_ops { |
| 261 | int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); | 260 | int (*request)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 262 | void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); | 261 | void (*free)(struct pwm_chip *chip, struct pwm_device *pwm); |
| 263 | int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, | ||
| 264 | int duty_ns, int period_ns); | ||
| 265 | int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, | ||
| 266 | enum pwm_polarity polarity); | ||
| 267 | int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, | 262 | int (*capture)(struct pwm_chip *chip, struct pwm_device *pwm, |
| 268 | struct pwm_capture *result, unsigned long timeout); | 263 | struct pwm_capture *result, unsigned long timeout); |
| 269 | int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); | ||
| 270 | void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); | ||
| 271 | int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, | 264 | int (*apply)(struct pwm_chip *chip, struct pwm_device *pwm, |
| 272 | struct pwm_state *state); | 265 | struct pwm_state *state); |
| 273 | void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, | 266 | void (*get_state)(struct pwm_chip *chip, struct pwm_device *pwm, |
| 274 | struct pwm_state *state); | 267 | struct pwm_state *state); |
| 275 | #ifdef CONFIG_DEBUG_FS | ||
| 276 | void (*dbg_show)(struct pwm_chip *chip, struct seq_file *s); | ||
| 277 | #endif | ||
| 278 | struct module *owner; | 268 | struct module *owner; |
| 269 | |||
| 270 | /* Only used by legacy drivers */ | ||
| 271 | int (*config)(struct pwm_chip *chip, struct pwm_device *pwm, | ||
| 272 | int duty_ns, int period_ns); | ||
| 273 | int (*set_polarity)(struct pwm_chip *chip, struct pwm_device *pwm, | ||
| 274 | enum pwm_polarity polarity); | ||
| 275 | int (*enable)(struct pwm_chip *chip, struct pwm_device *pwm); | ||
| 276 | void (*disable)(struct pwm_chip *chip, struct pwm_device *pwm); | ||
| 279 | }; | 277 | }; |
| 280 | 278 | ||
| 281 | /** | 279 | /** |
| 282 | * struct pwm_chip - abstract a PWM controller | 280 | * struct pwm_chip - abstract a PWM controller |
| 283 | * @dev: device providing the PWMs | 281 | * @dev: device providing the PWMs |
| 284 | * @list: list node for internal use | ||
| 285 | * @ops: callbacks for this PWM controller | 282 | * @ops: callbacks for this PWM controller |
| 286 | * @base: number of first PWM controlled by this chip | 283 | * @base: number of first PWM controlled by this chip |
| 287 | * @npwm: number of PWMs controlled by this chip | 284 | * @npwm: number of PWMs controlled by this chip |
| 288 | * @pwms: array of PWM devices allocated by the framework | ||
| 289 | * @of_xlate: request a PWM device given a device tree PWM specifier | 285 | * @of_xlate: request a PWM device given a device tree PWM specifier |
| 290 | * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier | 286 | * @of_pwm_n_cells: number of cells expected in the device tree PWM specifier |
| 287 | * @list: list node for internal use | ||
| 288 | * @pwms: array of PWM devices allocated by the framework | ||
| 291 | */ | 289 | */ |
| 292 | struct pwm_chip { | 290 | struct pwm_chip { |
| 293 | struct device *dev; | 291 | struct device *dev; |
| 294 | struct list_head list; | ||
| 295 | const struct pwm_ops *ops; | 292 | const struct pwm_ops *ops; |
| 296 | int base; | 293 | int base; |
| 297 | unsigned int npwm; | 294 | unsigned int npwm; |
| 298 | 295 | ||
| 299 | struct pwm_device *pwms; | ||
| 300 | |||
| 301 | struct pwm_device * (*of_xlate)(struct pwm_chip *pc, | 296 | struct pwm_device * (*of_xlate)(struct pwm_chip *pc, |
| 302 | const struct of_phandle_args *args); | 297 | const struct of_phandle_args *args); |
| 303 | unsigned int of_pwm_n_cells; | 298 | unsigned int of_pwm_n_cells; |
| 299 | |||
| 300 | /* only used internally by the PWM framework */ | ||
| 301 | struct list_head list; | ||
| 302 | struct pwm_device *pwms; | ||
| 304 | }; | 303 | }; |
| 305 | 304 | ||
| 306 | /** | 305 | /** |
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h index 1637385bcc17..d0aecc04c54b 100644 --- a/include/linux/qcom_scm.h +++ b/include/linux/qcom_scm.h | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #ifndef __QCOM_SCM_H | 13 | #ifndef __QCOM_SCM_H |
| 14 | #define __QCOM_SCM_H | 14 | #define __QCOM_SCM_H |
| 15 | 15 | ||
| 16 | #include <linux/err.h> | ||
| 16 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 17 | #include <linux/cpumask.h> | 18 | #include <linux/cpumask.h> |
| 18 | 19 | ||
diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index 59ddf9af909e..2dd0a9ed5b36 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h | |||
| @@ -663,6 +663,37 @@ out: | |||
| 663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, | 663 | static inline void qed_chain_set_prod(struct qed_chain *p_chain, |
| 664 | u32 prod_idx, void *p_prod_elem) | 664 | u32 prod_idx, void *p_prod_elem) |
| 665 | { | 665 | { |
| 666 | if (p_chain->mode == QED_CHAIN_MODE_PBL) { | ||
| 667 | u32 cur_prod, page_mask, page_cnt, page_diff; | ||
| 668 | |||
| 669 | cur_prod = is_chain_u16(p_chain) ? p_chain->u.chain16.prod_idx : | ||
| 670 | p_chain->u.chain32.prod_idx; | ||
| 671 | |||
| 672 | /* Assume that number of elements in a page is power of 2 */ | ||
| 673 | page_mask = ~p_chain->elem_per_page_mask; | ||
| 674 | |||
| 675 | /* Use "cur_prod - 1" and "prod_idx - 1" since producer index | ||
| 676 | * reaches the first element of next page before the page index | ||
| 677 | * is incremented. See qed_chain_produce(). | ||
| 678 | * Index wrap around is not a problem because the difference | ||
| 679 | * between current and given producer indices is always | ||
| 680 | * positive and lower than the chain's capacity. | ||
| 681 | */ | ||
| 682 | page_diff = (((cur_prod - 1) & page_mask) - | ||
| 683 | ((prod_idx - 1) & page_mask)) / | ||
| 684 | p_chain->elem_per_page; | ||
| 685 | |||
| 686 | page_cnt = qed_chain_get_page_cnt(p_chain); | ||
| 687 | if (is_chain_u16(p_chain)) | ||
| 688 | p_chain->pbl.c.u16.prod_page_idx = | ||
| 689 | (p_chain->pbl.c.u16.prod_page_idx - | ||
| 690 | page_diff + page_cnt) % page_cnt; | ||
| 691 | else | ||
| 692 | p_chain->pbl.c.u32.prod_page_idx = | ||
| 693 | (p_chain->pbl.c.u32.prod_page_idx - | ||
| 694 | page_diff + page_cnt) % page_cnt; | ||
| 695 | } | ||
| 696 | |||
| 666 | if (is_chain_u16(p_chain)) | 697 | if (is_chain_u16(p_chain)) |
| 667 | p_chain->u.chain16.prod_idx = (u16) prod_idx; | 698 | p_chain->u.chain16.prod_idx = (u16) prod_idx; |
| 668 | else | 699 | else |
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 91c536a01b56..f6165d304b4d 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h | |||
| @@ -38,7 +38,6 @@ | |||
| 38 | #include <linux/netdevice.h> | 38 | #include <linux/netdevice.h> |
| 39 | #include <linux/pci.h> | 39 | #include <linux/pci.h> |
| 40 | #include <linux/skbuff.h> | 40 | #include <linux/skbuff.h> |
| 41 | #include <linux/types.h> | ||
| 42 | #include <asm/byteorder.h> | 41 | #include <asm/byteorder.h> |
| 43 | #include <linux/io.h> | 42 | #include <linux/io.h> |
| 44 | #include <linux/compiler.h> | 43 | #include <linux/compiler.h> |
| @@ -644,6 +643,7 @@ struct qed_dev_info { | |||
| 644 | u16 mtu; | 643 | u16 mtu; |
| 645 | 644 | ||
| 646 | bool wol_support; | 645 | bool wol_support; |
| 646 | bool smart_an; | ||
| 647 | 647 | ||
| 648 | /* MBI version */ | 648 | /* MBI version */ |
| 649 | u32 mbi_version; | 649 | u32 mbi_version; |
| @@ -764,6 +764,7 @@ struct qed_probe_params { | |||
| 764 | u32 dp_module; | 764 | u32 dp_module; |
| 765 | u8 dp_level; | 765 | u8 dp_level; |
| 766 | bool is_vf; | 766 | bool is_vf; |
| 767 | bool recov_in_prog; | ||
| 767 | }; | 768 | }; |
| 768 | 769 | ||
| 769 | #define QED_DRV_VER_STR_SIZE 12 | 770 | #define QED_DRV_VER_STR_SIZE 12 |
| @@ -810,6 +811,7 @@ struct qed_common_cb_ops { | |||
| 810 | void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); | 811 | void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc); |
| 811 | void (*link_update)(void *dev, | 812 | void (*link_update)(void *dev, |
| 812 | struct qed_link_output *link); | 813 | struct qed_link_output *link); |
| 814 | void (*schedule_recovery_handler)(void *dev); | ||
| 813 | void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); | 815 | void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type); |
| 814 | void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); | 816 | void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data); |
| 815 | void (*get_protocol_tlv_data)(void *dev, void *data); | 817 | void (*get_protocol_tlv_data)(void *dev, void *data); |
| @@ -1058,6 +1060,24 @@ struct qed_common_ops { | |||
| 1058 | void __iomem *db_addr, void *db_data); | 1060 | void __iomem *db_addr, void *db_data); |
| 1059 | 1061 | ||
| 1060 | /** | 1062 | /** |
| 1063 | * @brief recovery_process - Trigger a recovery process | ||
| 1064 | * | ||
| 1065 | * @param cdev | ||
| 1066 | * | ||
| 1067 | * @return 0 on success, error otherwise. | ||
| 1068 | */ | ||
| 1069 | int (*recovery_process)(struct qed_dev *cdev); | ||
| 1070 | |||
| 1071 | /** | ||
| 1072 | * @brief recovery_prolog - Execute the prolog operations of a recovery process | ||
| 1073 | * | ||
| 1074 | * @param cdev | ||
| 1075 | * | ||
| 1076 | * @return 0 on success, error otherwise. | ||
| 1077 | */ | ||
| 1078 | int (*recovery_prolog)(struct qed_dev *cdev); | ||
| 1079 | |||
| 1080 | /** | ||
| 1061 | * @brief update_drv_state - API to inform the change in the driver state. | 1081 | * @brief update_drv_state - API to inform the change in the driver state. |
| 1062 | * | 1082 | * |
| 1063 | * @param cdev | 1083 | * @param cdev |
diff --git a/include/linux/qed/qede_rdma.h b/include/linux/qed/qede_rdma.h index 9904617a9730..5a00c7a473bf 100644 --- a/include/linux/qed/qede_rdma.h +++ b/include/linux/qed/qede_rdma.h | |||
| @@ -74,21 +74,23 @@ void qede_rdma_unregister_driver(struct qedr_driver *drv); | |||
| 74 | bool qede_rdma_supported(struct qede_dev *dev); | 74 | bool qede_rdma_supported(struct qede_dev *dev); |
| 75 | 75 | ||
| 76 | #if IS_ENABLED(CONFIG_QED_RDMA) | 76 | #if IS_ENABLED(CONFIG_QED_RDMA) |
| 77 | int qede_rdma_dev_add(struct qede_dev *dev); | 77 | int qede_rdma_dev_add(struct qede_dev *dev, bool recovery); |
| 78 | void qede_rdma_dev_event_open(struct qede_dev *dev); | 78 | void qede_rdma_dev_event_open(struct qede_dev *dev); |
| 79 | void qede_rdma_dev_event_close(struct qede_dev *dev); | 79 | void qede_rdma_dev_event_close(struct qede_dev *dev); |
| 80 | void qede_rdma_dev_remove(struct qede_dev *dev); | 80 | void qede_rdma_dev_remove(struct qede_dev *dev, bool recovery); |
| 81 | void qede_rdma_event_changeaddr(struct qede_dev *edr); | 81 | void qede_rdma_event_changeaddr(struct qede_dev *edr); |
| 82 | 82 | ||
| 83 | #else | 83 | #else |
| 84 | static inline int qede_rdma_dev_add(struct qede_dev *dev) | 84 | static inline int qede_rdma_dev_add(struct qede_dev *dev, |
| 85 | bool recovery) | ||
| 85 | { | 86 | { |
| 86 | return 0; | 87 | return 0; |
| 87 | } | 88 | } |
| 88 | 89 | ||
| 89 | static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} | 90 | static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {} |
| 90 | static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} | 91 | static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {} |
| 91 | static inline void qede_rdma_dev_remove(struct qede_dev *dev) {} | 92 | static inline void qede_rdma_dev_remove(struct qede_dev *dev, |
| 93 | bool recovery) {} | ||
| 92 | static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} | 94 | static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {} |
| 93 | #endif | 95 | #endif |
| 94 | #endif | 96 | #endif |
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h index 426cee67f0e2..b8e094b125ee 100644 --- a/include/linux/rcu_node_tree.h +++ b/include/linux/rcu_node_tree.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * RCU node combining tree definitions. These are used to compute | 3 | * RCU node combining tree definitions. These are used to compute |
| 3 | * global attributes while avoiding common-case global contention. A key | 4 | * global attributes while avoiding common-case global contention. A key |
| @@ -11,23 +12,9 @@ | |||
| 11 | * because the size of the TREE SRCU srcu_struct structure depends | 12 | * because the size of the TREE SRCU srcu_struct structure depends |
| 12 | * on these definitions. | 13 | * on these definitions. |
| 13 | * | 14 | * |
| 14 | * This program is free software; you can redistribute it and/or modify | ||
| 15 | * it under the terms of the GNU General Public License as published by | ||
| 16 | * the Free Software Foundation; either version 2 of the License, or | ||
| 17 | * (at your option) any later version. | ||
| 18 | * | ||
| 19 | * This program is distributed in the hope that it will be useful, | ||
| 20 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 21 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 22 | * GNU General Public License for more details. | ||
| 23 | * | ||
| 24 | * You should have received a copy of the GNU General Public License | ||
| 25 | * along with this program; if not, you can access it online at | ||
| 26 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 27 | * | ||
| 28 | * Copyright IBM Corporation, 2017 | 15 | * Copyright IBM Corporation, 2017 |
| 29 | * | 16 | * |
| 30 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 17 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 31 | */ | 18 | */ |
| 32 | 19 | ||
| 33 | #ifndef __LINUX_RCU_NODE_TREE_H | 20 | #ifndef __LINUX_RCU_NODE_TREE_H |
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h index c3ad00e63556..87404cb015f1 100644 --- a/include/linux/rcu_segcblist.h +++ b/include/linux/rcu_segcblist.h | |||
| @@ -1,3 +1,4 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * RCU segmented callback lists | 3 | * RCU segmented callback lists |
| 3 | * | 4 | * |
| @@ -5,23 +6,9 @@ | |||
| 5 | * because the size of the TREE SRCU srcu_struct structure depends | 6 | * because the size of the TREE SRCU srcu_struct structure depends |
| 6 | * on these definitions. | 7 | * on these definitions. |
| 7 | * | 8 | * |
| 8 | * This program is free software; you can redistribute it and/or modify | ||
| 9 | * it under the terms of the GNU General Public License as published by | ||
| 10 | * the Free Software Foundation; either version 2 of the License, or | ||
| 11 | * (at your option) any later version. | ||
| 12 | * | ||
| 13 | * This program is distributed in the hope that it will be useful, | ||
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 16 | * GNU General Public License for more details. | ||
| 17 | * | ||
| 18 | * You should have received a copy of the GNU General Public License | ||
| 19 | * along with this program; if not, you can access it online at | ||
| 20 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 21 | * | ||
| 22 | * Copyright IBM Corporation, 2017 | 9 | * Copyright IBM Corporation, 2017 |
| 23 | * | 10 | * |
| 24 | * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 11 | * Authors: Paul E. McKenney <paulmck@linux.net.ibm.com> |
| 25 | */ | 12 | */ |
| 26 | 13 | ||
| 27 | #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H | 14 | #ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H |
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h index ece7ed9a4a70..6fc53a1345b3 100644 --- a/include/linux/rcu_sync.h +++ b/include/linux/rcu_sync.h | |||
| @@ -1,20 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * RCU-based infrastructure for lightweight reader-writer locking | 3 | * RCU-based infrastructure for lightweight reader-writer locking |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright (c) 2015, Red Hat, Inc. | 5 | * Copyright (c) 2015, Red Hat, Inc. |
| 19 | * | 6 | * |
| 20 | * Author: Oleg Nesterov <oleg@redhat.com> | 7 | * Author: Oleg Nesterov <oleg@redhat.com> |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 4db8bcacc51a..6cdb1db776cf 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -1,25 +1,12 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion | 3 | * Read-Copy Update mechanism for mutual exclusion |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2001 | 5 | * Copyright IBM Corporation, 2001 |
| 19 | * | 6 | * |
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
| 21 | * | 8 | * |
| 22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 9 | * Based on the original work by Paul McKenney <paulmck@vnet.ibm.com> |
| 23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 10 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 24 | * Papers: | 11 | * Papers: |
| 25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | 12 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf |
| @@ -89,7 +76,7 @@ static inline int rcu_preempt_depth(void) | |||
| 89 | /* Internal to kernel */ | 76 | /* Internal to kernel */ |
| 90 | void rcu_init(void); | 77 | void rcu_init(void); |
| 91 | extern int rcu_scheduler_active __read_mostly; | 78 | extern int rcu_scheduler_active __read_mostly; |
| 92 | void rcu_check_callbacks(int user); | 79 | void rcu_sched_clock_irq(int user); |
| 93 | void rcu_report_dead(unsigned int cpu); | 80 | void rcu_report_dead(unsigned int cpu); |
| 94 | void rcutree_migrate_callbacks(int cpu); | 81 | void rcutree_migrate_callbacks(int cpu); |
| 95 | 82 | ||
| @@ -309,16 +296,16 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
| 309 | */ | 296 | */ |
| 310 | 297 | ||
| 311 | #ifdef __CHECKER__ | 298 | #ifdef __CHECKER__ |
| 312 | #define rcu_dereference_sparse(p, space) \ | 299 | #define rcu_check_sparse(p, space) \ |
| 313 | ((void)(((typeof(*p) space *)p) == p)) | 300 | ((void)(((typeof(*p) space *)p) == p)) |
| 314 | #else /* #ifdef __CHECKER__ */ | 301 | #else /* #ifdef __CHECKER__ */ |
| 315 | #define rcu_dereference_sparse(p, space) | 302 | #define rcu_check_sparse(p, space) |
| 316 | #endif /* #else #ifdef __CHECKER__ */ | 303 | #endif /* #else #ifdef __CHECKER__ */ |
| 317 | 304 | ||
| 318 | #define __rcu_access_pointer(p, space) \ | 305 | #define __rcu_access_pointer(p, space) \ |
| 319 | ({ \ | 306 | ({ \ |
| 320 | typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ | 307 | typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
| 321 | rcu_dereference_sparse(p, space); \ | 308 | rcu_check_sparse(p, space); \ |
| 322 | ((typeof(*p) __force __kernel *)(_________p1)); \ | 309 | ((typeof(*p) __force __kernel *)(_________p1)); \ |
| 323 | }) | 310 | }) |
| 324 | #define __rcu_dereference_check(p, c, space) \ | 311 | #define __rcu_dereference_check(p, c, space) \ |
| @@ -326,13 +313,13 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
| 326 | /* Dependency order vs. p above. */ \ | 313 | /* Dependency order vs. p above. */ \ |
| 327 | typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ | 314 | typeof(*p) *________p1 = (typeof(*p) *__force)READ_ONCE(p); \ |
| 328 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ | 315 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_check() usage"); \ |
| 329 | rcu_dereference_sparse(p, space); \ | 316 | rcu_check_sparse(p, space); \ |
| 330 | ((typeof(*p) __force __kernel *)(________p1)); \ | 317 | ((typeof(*p) __force __kernel *)(________p1)); \ |
| 331 | }) | 318 | }) |
| 332 | #define __rcu_dereference_protected(p, c, space) \ | 319 | #define __rcu_dereference_protected(p, c, space) \ |
| 333 | ({ \ | 320 | ({ \ |
| 334 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ | 321 | RCU_LOCKDEP_WARN(!(c), "suspicious rcu_dereference_protected() usage"); \ |
| 335 | rcu_dereference_sparse(p, space); \ | 322 | rcu_check_sparse(p, space); \ |
| 336 | ((typeof(*p) __force __kernel *)(p)); \ | 323 | ((typeof(*p) __force __kernel *)(p)); \ |
| 337 | }) | 324 | }) |
| 338 | #define rcu_dereference_raw(p) \ | 325 | #define rcu_dereference_raw(p) \ |
| @@ -382,6 +369,7 @@ static inline void rcu_preempt_sleep_check(void) { } | |||
| 382 | #define rcu_assign_pointer(p, v) \ | 369 | #define rcu_assign_pointer(p, v) \ |
| 383 | ({ \ | 370 | ({ \ |
| 384 | uintptr_t _r_a_p__v = (uintptr_t)(v); \ | 371 | uintptr_t _r_a_p__v = (uintptr_t)(v); \ |
| 372 | rcu_check_sparse(p, __rcu); \ | ||
| 385 | \ | 373 | \ |
| 386 | if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ | 374 | if (__builtin_constant_p(v) && (_r_a_p__v) == (uintptr_t)NULL) \ |
| 387 | WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ | 375 | WRITE_ONCE((p), (typeof(p))(_r_a_p__v)); \ |
| @@ -785,7 +773,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 785 | */ | 773 | */ |
| 786 | #define RCU_INIT_POINTER(p, v) \ | 774 | #define RCU_INIT_POINTER(p, v) \ |
| 787 | do { \ | 775 | do { \ |
| 788 | rcu_dereference_sparse(p, __rcu); \ | 776 | rcu_check_sparse(p, __rcu); \ |
| 789 | WRITE_ONCE(p, RCU_INITIALIZER(v)); \ | 777 | WRITE_ONCE(p, RCU_INITIALIZER(v)); \ |
| 790 | } while (0) | 778 | } while (0) |
| 791 | 779 | ||
| @@ -859,7 +847,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 859 | 847 | ||
| 860 | /* Has the specified rcu_head structure been handed to call_rcu()? */ | 848 | /* Has the specified rcu_head structure been handed to call_rcu()? */ |
| 861 | 849 | ||
| 862 | /* | 850 | /** |
| 863 | * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() | 851 | * rcu_head_init - Initialize rcu_head for rcu_head_after_call_rcu() |
| 864 | * @rhp: The rcu_head structure to initialize. | 852 | * @rhp: The rcu_head structure to initialize. |
| 865 | * | 853 | * |
| @@ -874,10 +862,10 @@ static inline void rcu_head_init(struct rcu_head *rhp) | |||
| 874 | rhp->func = (rcu_callback_t)~0L; | 862 | rhp->func = (rcu_callback_t)~0L; |
| 875 | } | 863 | } |
| 876 | 864 | ||
| 877 | /* | 865 | /** |
| 878 | * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? | 866 | * rcu_head_after_call_rcu - Has this rcu_head been passed to call_rcu()? |
| 879 | * @rhp: The rcu_head structure to test. | 867 | * @rhp: The rcu_head structure to test. |
| 880 | * @func: The function passed to call_rcu() along with @rhp. | 868 | * @f: The function passed to call_rcu() along with @rhp. |
| 881 | * | 869 | * |
| 882 | * Returns @true if the @rhp has been passed to call_rcu() with @func, | 870 | * Returns @true if the @rhp has been passed to call_rcu() with @func, |
| 883 | * and @false otherwise. Emits a warning in any other case, including | 871 | * and @false otherwise. Emits a warning in any other case, including |
| @@ -896,57 +884,4 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f) | |||
| 896 | return false; | 884 | return false; |
| 897 | } | 885 | } |
| 898 | 886 | ||
| 899 | |||
| 900 | /* Transitional pre-consolidation compatibility definitions. */ | ||
| 901 | |||
| 902 | static inline void synchronize_rcu_bh(void) | ||
| 903 | { | ||
| 904 | synchronize_rcu(); | ||
| 905 | } | ||
| 906 | |||
| 907 | static inline void synchronize_rcu_bh_expedited(void) | ||
| 908 | { | ||
| 909 | synchronize_rcu_expedited(); | ||
| 910 | } | ||
| 911 | |||
| 912 | static inline void call_rcu_bh(struct rcu_head *head, rcu_callback_t func) | ||
| 913 | { | ||
| 914 | call_rcu(head, func); | ||
| 915 | } | ||
| 916 | |||
| 917 | static inline void rcu_barrier_bh(void) | ||
| 918 | { | ||
| 919 | rcu_barrier(); | ||
| 920 | } | ||
| 921 | |||
| 922 | static inline void synchronize_sched(void) | ||
| 923 | { | ||
| 924 | synchronize_rcu(); | ||
| 925 | } | ||
| 926 | |||
| 927 | static inline void synchronize_sched_expedited(void) | ||
| 928 | { | ||
| 929 | synchronize_rcu_expedited(); | ||
| 930 | } | ||
| 931 | |||
| 932 | static inline void call_rcu_sched(struct rcu_head *head, rcu_callback_t func) | ||
| 933 | { | ||
| 934 | call_rcu(head, func); | ||
| 935 | } | ||
| 936 | |||
| 937 | static inline void rcu_barrier_sched(void) | ||
| 938 | { | ||
| 939 | rcu_barrier(); | ||
| 940 | } | ||
| 941 | |||
| 942 | static inline unsigned long get_state_synchronize_sched(void) | ||
| 943 | { | ||
| 944 | return get_state_synchronize_rcu(); | ||
| 945 | } | ||
| 946 | |||
| 947 | static inline void cond_synchronize_sched(unsigned long oldstate) | ||
| 948 | { | ||
| 949 | cond_synchronize_rcu(oldstate); | ||
| 950 | } | ||
| 951 | |||
| 952 | #endif /* __LINUX_RCUPDATE_H */ | 887 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index af65d1f36ddb..8e727f57d814 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -1,23 +1,10 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. | 3 | * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition. |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2008 | 5 | * Copyright IBM Corporation, 2008 |
| 19 | * | 6 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 21 | * | 8 | * |
| 22 | * For detailed explanation of Read-Copy Update mechanism see - | 9 | * For detailed explanation of Read-Copy Update mechanism see - |
| 23 | * Documentation/RCU | 10 | * Documentation/RCU |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 7f83179177d1..735601ac27d3 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -1,26 +1,13 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) | 3 | * Read-Copy Update mechanism for mutual exclusion (tree-based version) |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2008 | 5 | * Copyright IBM Corporation, 2008 |
| 19 | * | 6 | * |
| 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 7 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
| 21 | * Paul E. McKenney <paulmck@linux.vnet.ibm.com> Hierarchical algorithm | 8 | * Paul E. McKenney <paulmck@linux.ibm.com> Hierarchical algorithm |
| 22 | * | 9 | * |
| 23 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | 10 | * Based on the original work by Paul McKenney <paulmck@linux.ibm.com> |
| 24 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | 11 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. |
| 25 | * | 12 | * |
| 26 | * For detailed explanation of Read-Copy Update mechanism see - | 13 | * For detailed explanation of Read-Copy Update mechanism see - |
diff --git a/include/linux/regmap.h b/include/linux/regmap.h index 1781b6cb793c..daeec7dbd65c 100644 --- a/include/linux/regmap.h +++ b/include/linux/regmap.h | |||
| @@ -1131,11 +1131,37 @@ struct regmap_irq { | |||
| 1131 | .reg_offset = (_id) / (_reg_bits), \ | 1131 | .reg_offset = (_id) / (_reg_bits), \ |
| 1132 | } | 1132 | } |
| 1133 | 1133 | ||
| 1134 | #define REGMAP_IRQ_MAIN_REG_OFFSET(arr) \ | ||
| 1135 | { .num_regs = ARRAY_SIZE((arr)), .offset = &(arr)[0] } | ||
| 1136 | |||
| 1137 | struct regmap_irq_sub_irq_map { | ||
| 1138 | unsigned int num_regs; | ||
| 1139 | unsigned int *offset; | ||
| 1140 | }; | ||
| 1141 | |||
| 1134 | /** | 1142 | /** |
| 1135 | * struct regmap_irq_chip - Description of a generic regmap irq_chip. | 1143 | * struct regmap_irq_chip - Description of a generic regmap irq_chip. |
| 1136 | * | 1144 | * |
| 1137 | * @name: Descriptive name for IRQ controller. | 1145 | * @name: Descriptive name for IRQ controller. |
| 1138 | * | 1146 | * |
| 1147 | * @main_status: Base main status register address. For chips which have | ||
| 1148 | * interrupts arranged in separate sub-irq blocks with own IRQ | ||
| 1149 | * registers and which have a main IRQ registers indicating | ||
| 1150 | * sub-irq blocks with unhandled interrupts. For such chips fill | ||
| 1151 | * sub-irq register information in status_base, mask_base and | ||
| 1152 | * ack_base. | ||
| 1153 | * @num_main_status_bits: Should be given to chips where number of meaningfull | ||
| 1154 | * main status bits differs from num_regs. | ||
| 1155 | * @sub_reg_offsets: arrays of mappings from main register bits to sub irq | ||
| 1156 | * registers. First item in array describes the registers | ||
| 1157 | * for first main status bit. Second array for second bit etc. | ||
| 1158 | * Offset is given as sub register status offset to | ||
| 1159 | * status_base. Should contain num_regs arrays. | ||
| 1160 | * Can be provided for chips with more complex mapping than | ||
| 1161 | * 1.st bit to 1.st sub-reg, 2.nd bit to 2.nd sub-reg, ... | ||
| 1162 | * @num_main_regs: Number of 'main status' irq registers for chips which have | ||
| 1163 | * main_status set. | ||
| 1164 | * | ||
| 1139 | * @status_base: Base status register address. | 1165 | * @status_base: Base status register address. |
| 1140 | * @mask_base: Base mask register address. | 1166 | * @mask_base: Base mask register address. |
| 1141 | * @mask_writeonly: Base mask register is write only. | 1167 | * @mask_writeonly: Base mask register is write only. |
| @@ -1181,6 +1207,11 @@ struct regmap_irq { | |||
| 1181 | struct regmap_irq_chip { | 1207 | struct regmap_irq_chip { |
| 1182 | const char *name; | 1208 | const char *name; |
| 1183 | 1209 | ||
| 1210 | unsigned int main_status; | ||
| 1211 | unsigned int num_main_status_bits; | ||
| 1212 | struct regmap_irq_sub_irq_map *sub_reg_offsets; | ||
| 1213 | int num_main_regs; | ||
| 1214 | |||
| 1184 | unsigned int status_base; | 1215 | unsigned int status_base; |
| 1185 | unsigned int mask_base; | 1216 | unsigned int mask_base; |
| 1186 | unsigned int unmask_base; | 1217 | unsigned int unmask_base; |
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h index 389bcaf7900f..377da2357118 100644 --- a/include/linux/regulator/driver.h +++ b/include/linux/regulator/driver.h | |||
| @@ -264,6 +264,7 @@ enum regulator_type { | |||
| 264 | * @continuous_voltage_range: Indicates if the regulator can set any | 264 | * @continuous_voltage_range: Indicates if the regulator can set any |
| 265 | * voltage within constrains range. | 265 | * voltage within constrains range. |
| 266 | * @n_voltages: Number of selectors available for ops.list_voltage(). | 266 | * @n_voltages: Number of selectors available for ops.list_voltage(). |
| 267 | * @n_current_limits: Number of selectors available for current limits | ||
| 267 | * | 268 | * |
| 268 | * @min_uV: Voltage given by the lowest selector (if linear mapping) | 269 | * @min_uV: Voltage given by the lowest selector (if linear mapping) |
| 269 | * @uV_step: Voltage increase with each selector (if linear mapping) | 270 | * @uV_step: Voltage increase with each selector (if linear mapping) |
| @@ -278,14 +279,15 @@ enum regulator_type { | |||
| 278 | * @n_linear_ranges: Number of entries in the @linear_ranges (and in | 279 | * @n_linear_ranges: Number of entries in the @linear_ranges (and in |
| 279 | * linear_range_selectors if used) table(s). | 280 | * linear_range_selectors if used) table(s). |
| 280 | * @volt_table: Voltage mapping table (if table based mapping) | 281 | * @volt_table: Voltage mapping table (if table based mapping) |
| 282 | * @curr_table: Current limit mapping table (if table based mapping) | ||
| 281 | * | 283 | * |
| 282 | * @vsel_range_reg: Register for range selector when using pickable ranges | 284 | * @vsel_range_reg: Register for range selector when using pickable ranges |
| 283 | * and regulator_regmap_X_voltage_X_pickable functions. | 285 | * and regulator_regmap_X_voltage_X_pickable functions. |
| 284 | * @vsel_range_mask: Mask for register bitfield used for range selector | 286 | * @vsel_range_mask: Mask for register bitfield used for range selector |
| 285 | * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ | 287 | * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_ |
| 286 | * @vsel_mask: Mask for register bitfield used for selector | 288 | * @vsel_mask: Mask for register bitfield used for selector |
| 287 | * @csel_reg: Register for TPS65218 LS3 current regulator | 289 | * @csel_reg: Register for current limit selector using regmap set_current_limit |
| 288 | * @csel_mask: Mask for TPS65218 LS3 current regulator | 290 | * @csel_mask: Mask for register bitfield used for current limit selector |
| 289 | * @apply_reg: Register for initiate voltage change on the output when | 291 | * @apply_reg: Register for initiate voltage change on the output when |
| 290 | * using regulator_set_voltage_sel_regmap | 292 | * using regulator_set_voltage_sel_regmap |
| 291 | * @apply_bit: Register bitfield used for initiate voltage change on the | 293 | * @apply_bit: Register bitfield used for initiate voltage change on the |
| @@ -333,6 +335,7 @@ struct regulator_desc { | |||
| 333 | int id; | 335 | int id; |
| 334 | unsigned int continuous_voltage_range:1; | 336 | unsigned int continuous_voltage_range:1; |
| 335 | unsigned n_voltages; | 337 | unsigned n_voltages; |
| 338 | unsigned int n_current_limits; | ||
| 336 | const struct regulator_ops *ops; | 339 | const struct regulator_ops *ops; |
| 337 | int irq; | 340 | int irq; |
| 338 | enum regulator_type type; | 341 | enum regulator_type type; |
| @@ -351,6 +354,7 @@ struct regulator_desc { | |||
| 351 | int n_linear_ranges; | 354 | int n_linear_ranges; |
| 352 | 355 | ||
| 353 | const unsigned int *volt_table; | 356 | const unsigned int *volt_table; |
| 357 | const unsigned int *curr_table; | ||
| 354 | 358 | ||
| 355 | unsigned int vsel_range_reg; | 359 | unsigned int vsel_range_reg; |
| 356 | unsigned int vsel_range_mask; | 360 | unsigned int vsel_range_mask; |
| @@ -401,13 +405,7 @@ struct regulator_desc { | |||
| 401 | * NULL). | 405 | * NULL). |
| 402 | * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is | 406 | * @regmap: regmap to use for core regmap helpers if dev_get_regmap() is |
| 403 | * insufficient. | 407 | * insufficient. |
| 404 | * @ena_gpio_initialized: GPIO controlling regulator enable was properly | 408 | * @ena_gpiod: GPIO controlling regulator enable. |
| 405 | * initialized, meaning that >= 0 is a valid gpio | ||
| 406 | * identifier and < 0 is a non existent gpio. | ||
| 407 | * @ena_gpio: GPIO controlling regulator enable. | ||
| 408 | * @ena_gpiod: GPIO descriptor controlling regulator enable. | ||
| 409 | * @ena_gpio_invert: Sense for GPIO enable control. | ||
| 410 | * @ena_gpio_flags: Flags to use when calling gpio_request_one() | ||
| 411 | */ | 409 | */ |
| 412 | struct regulator_config { | 410 | struct regulator_config { |
| 413 | struct device *dev; | 411 | struct device *dev; |
| @@ -416,11 +414,7 @@ struct regulator_config { | |||
| 416 | struct device_node *of_node; | 414 | struct device_node *of_node; |
| 417 | struct regmap *regmap; | 415 | struct regmap *regmap; |
| 418 | 416 | ||
| 419 | bool ena_gpio_initialized; | ||
| 420 | int ena_gpio; | ||
| 421 | struct gpio_desc *ena_gpiod; | 417 | struct gpio_desc *ena_gpiod; |
| 422 | unsigned int ena_gpio_invert:1; | ||
| 423 | unsigned int ena_gpio_flags; | ||
| 424 | }; | 418 | }; |
| 425 | 419 | ||
| 426 | /* | 420 | /* |
| @@ -503,6 +497,7 @@ int regulator_notifier_call_chain(struct regulator_dev *rdev, | |||
| 503 | 497 | ||
| 504 | void *rdev_get_drvdata(struct regulator_dev *rdev); | 498 | void *rdev_get_drvdata(struct regulator_dev *rdev); |
| 505 | struct device *rdev_get_dev(struct regulator_dev *rdev); | 499 | struct device *rdev_get_dev(struct regulator_dev *rdev); |
| 500 | struct regmap *rdev_get_regmap(struct regulator_dev *rdev); | ||
| 506 | int rdev_get_id(struct regulator_dev *rdev); | 501 | int rdev_get_id(struct regulator_dev *rdev); |
| 507 | 502 | ||
| 508 | int regulator_mode_to_status(unsigned int); | 503 | int regulator_mode_to_status(unsigned int); |
| @@ -543,9 +538,18 @@ int regulator_set_pull_down_regmap(struct regulator_dev *rdev); | |||
| 543 | 538 | ||
| 544 | int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, | 539 | int regulator_set_active_discharge_regmap(struct regulator_dev *rdev, |
| 545 | bool enable); | 540 | bool enable); |
| 541 | int regulator_set_current_limit_regmap(struct regulator_dev *rdev, | ||
| 542 | int min_uA, int max_uA); | ||
| 543 | int regulator_get_current_limit_regmap(struct regulator_dev *rdev); | ||
| 546 | void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); | 544 | void *regulator_get_init_drvdata(struct regulator_init_data *reg_init_data); |
| 547 | 545 | ||
| 548 | void regulator_lock(struct regulator_dev *rdev); | 546 | void regulator_lock(struct regulator_dev *rdev); |
| 549 | void regulator_unlock(struct regulator_dev *rdev); | 547 | void regulator_unlock(struct regulator_dev *rdev); |
| 550 | 548 | ||
| 549 | /* | ||
| 550 | * Helper functions intended to be used by regulator drivers prior registering | ||
| 551 | * their regulators. | ||
| 552 | */ | ||
| 553 | int regulator_desc_list_voltage_linear_range(const struct regulator_desc *desc, | ||
| 554 | unsigned int selector); | ||
| 551 | #endif | 555 | #endif |
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h index 1a4340ed8e2b..f10140da7145 100644 --- a/include/linux/regulator/fixed.h +++ b/include/linux/regulator/fixed.h | |||
| @@ -25,14 +25,6 @@ struct regulator_init_data; | |||
| 25 | * @input_supply: Name of the input regulator supply | 25 | * @input_supply: Name of the input regulator supply |
| 26 | * @microvolts: Output voltage of regulator | 26 | * @microvolts: Output voltage of regulator |
| 27 | * @startup_delay: Start-up time in microseconds | 27 | * @startup_delay: Start-up time in microseconds |
| 28 | * @gpio_is_open_drain: Gpio pin is open drain or normal type. | ||
| 29 | * If it is open drain type then HIGH will be set | ||
| 30 | * through PULL-UP with setting gpio as input | ||
| 31 | * and low will be set as gpio-output with driven | ||
| 32 | * to low. For non-open-drain case, the gpio will | ||
| 33 | * will be in output and drive to low/high accordingly. | ||
| 34 | * @enable_high: Polarity of enable GPIO | ||
| 35 | * 1 = Active high, 0 = Active low | ||
| 36 | * @enabled_at_boot: Whether regulator has been enabled at | 28 | * @enabled_at_boot: Whether regulator has been enabled at |
| 37 | * boot or not. 1 = Yes, 0 = No | 29 | * boot or not. 1 = Yes, 0 = No |
| 38 | * This is used to keep the regulator at | 30 | * This is used to keep the regulator at |
| @@ -48,8 +40,6 @@ struct fixed_voltage_config { | |||
| 48 | const char *input_supply; | 40 | const char *input_supply; |
| 49 | int microvolts; | 41 | int microvolts; |
| 50 | unsigned startup_delay; | 42 | unsigned startup_delay; |
| 51 | unsigned gpio_is_open_drain:1; | ||
| 52 | unsigned enable_high:1; | ||
| 53 | unsigned enabled_at_boot:1; | 43 | unsigned enabled_at_boot:1; |
| 54 | struct regulator_init_data *init_data; | 44 | struct regulator_init_data *init_data; |
| 55 | }; | 45 | }; |
diff --git a/include/linux/regulator/gpio-regulator.h b/include/linux/regulator/gpio-regulator.h index 19fbd267406d..11cd6375215d 100644 --- a/include/linux/regulator/gpio-regulator.h +++ b/include/linux/regulator/gpio-regulator.h | |||
| @@ -21,6 +21,8 @@ | |||
| 21 | #ifndef __REGULATOR_GPIO_H | 21 | #ifndef __REGULATOR_GPIO_H |
| 22 | #define __REGULATOR_GPIO_H | 22 | #define __REGULATOR_GPIO_H |
| 23 | 23 | ||
| 24 | #include <linux/gpio/consumer.h> | ||
| 25 | |||
| 24 | struct regulator_init_data; | 26 | struct regulator_init_data; |
| 25 | 27 | ||
| 26 | enum regulator_type; | 28 | enum regulator_type; |
| @@ -44,18 +46,14 @@ struct gpio_regulator_state { | |||
| 44 | /** | 46 | /** |
| 45 | * struct gpio_regulator_config - config structure | 47 | * struct gpio_regulator_config - config structure |
| 46 | * @supply_name: Name of the regulator supply | 48 | * @supply_name: Name of the regulator supply |
| 47 | * @enable_gpio: GPIO to use for enable control | ||
| 48 | * set to -EINVAL if not used | ||
| 49 | * @enable_high: Polarity of enable GPIO | ||
| 50 | * 1 = Active high, 0 = Active low | ||
| 51 | * @enabled_at_boot: Whether regulator has been enabled at | 49 | * @enabled_at_boot: Whether regulator has been enabled at |
| 52 | * boot or not. 1 = Yes, 0 = No | 50 | * boot or not. 1 = Yes, 0 = No |
| 53 | * This is used to keep the regulator at | 51 | * This is used to keep the regulator at |
| 54 | * the default state | 52 | * the default state |
| 55 | * @startup_delay: Start-up time in microseconds | 53 | * @startup_delay: Start-up time in microseconds |
| 56 | * @gpios: Array containing the gpios needed to control | 54 | * @gflags: Array of GPIO configuration flags for initial |
| 57 | * the setting of the regulator | 55 | * states |
| 58 | * @nr_gpios: Number of gpios | 56 | * @ngpios: Number of GPIOs and configurations available |
| 59 | * @states: Array of gpio_regulator_state entries describing | 57 | * @states: Array of gpio_regulator_state entries describing |
| 60 | * the gpio state for specific voltages | 58 | * the gpio state for specific voltages |
| 61 | * @nr_states: Number of states available | 59 | * @nr_states: Number of states available |
| @@ -69,13 +67,11 @@ struct gpio_regulator_state { | |||
| 69 | struct gpio_regulator_config { | 67 | struct gpio_regulator_config { |
| 70 | const char *supply_name; | 68 | const char *supply_name; |
| 71 | 69 | ||
| 72 | int enable_gpio; | ||
| 73 | unsigned enable_high:1; | ||
| 74 | unsigned enabled_at_boot:1; | 70 | unsigned enabled_at_boot:1; |
| 75 | unsigned startup_delay; | 71 | unsigned startup_delay; |
| 76 | 72 | ||
| 77 | struct gpio *gpios; | 73 | enum gpiod_flags *gflags; |
| 78 | int nr_gpios; | 74 | int ngpios; |
| 79 | 75 | ||
| 80 | struct gpio_regulator_state *states; | 76 | struct gpio_regulator_state *states; |
| 81 | int nr_states; | 77 | int nr_states; |
diff --git a/include/linux/relay.h b/include/linux/relay.h index e1bdf01a86e2..c759f96e39c1 100644 --- a/include/linux/relay.h +++ b/include/linux/relay.h | |||
| @@ -66,7 +66,7 @@ struct rchan | |||
| 66 | struct kref kref; /* channel refcount */ | 66 | struct kref kref; /* channel refcount */ |
| 67 | void *private_data; /* for user-defined data */ | 67 | void *private_data; /* for user-defined data */ |
| 68 | size_t last_toobig; /* tried to log event > subbuf size */ | 68 | size_t last_toobig; /* tried to log event > subbuf size */ |
| 69 | struct rchan_buf ** __percpu buf; /* per-cpu channel buffers */ | 69 | struct rchan_buf * __percpu *buf; /* per-cpu channel buffers */ |
| 70 | int is_global; /* One global buffer ? */ | 70 | int is_global; /* One global buffer ? */ |
| 71 | struct list_head list; /* for channel list */ | 71 | struct list_head list; /* for channel list */ |
| 72 | struct dentry *parent; /* parent dentry passed to open */ | 72 | struct dentry *parent; /* parent dentry passed to open */ |
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h index 507a2b524208..04d04709f2bd 100644 --- a/include/linux/remoteproc.h +++ b/include/linux/remoteproc.h | |||
| @@ -345,9 +345,9 @@ struct firmware; | |||
| 345 | * @stop: power off the device | 345 | * @stop: power off the device |
| 346 | * @kick: kick a virtqueue (virtqueue id given as a parameter) | 346 | * @kick: kick a virtqueue (virtqueue id given as a parameter) |
| 347 | * @da_to_va: optional platform hook to perform address translations | 347 | * @da_to_va: optional platform hook to perform address translations |
| 348 | * @load_rsc_table: load resource table from firmware image | 348 | * @parse_fw: parse firmware to extract information (e.g. resource table) |
| 349 | * @find_loaded_rsc_table: find the loaded resouce table | 349 | * @find_loaded_rsc_table: find the loaded resouce table |
| 350 | * @load: load firmeware to memory, where the remote processor | 350 | * @load: load firmware to memory, where the remote processor |
| 351 | * expects to find it | 351 | * expects to find it |
| 352 | * @sanity_check: sanity check the fw image | 352 | * @sanity_check: sanity check the fw image |
| 353 | * @get_boot_addr: get boot address to entry point specified in firmware | 353 | * @get_boot_addr: get boot address to entry point specified in firmware |
| @@ -554,11 +554,11 @@ struct rproc_vdev { | |||
| 554 | struct kref refcount; | 554 | struct kref refcount; |
| 555 | 555 | ||
| 556 | struct rproc_subdev subdev; | 556 | struct rproc_subdev subdev; |
| 557 | struct device dev; | ||
| 557 | 558 | ||
| 558 | unsigned int id; | 559 | unsigned int id; |
| 559 | struct list_head node; | 560 | struct list_head node; |
| 560 | struct rproc *rproc; | 561 | struct rproc *rproc; |
| 561 | struct virtio_device vdev; | ||
| 562 | struct rproc_vring vring[RVDEV_NUM_VRINGS]; | 562 | struct rproc_vring vring[RVDEV_NUM_VRINGS]; |
| 563 | u32 rsc_offset; | 563 | u32 rsc_offset; |
| 564 | u32 index; | 564 | u32 index; |
| @@ -601,7 +601,7 @@ int rproc_coredump_add_custom_segment(struct rproc *rproc, | |||
| 601 | 601 | ||
| 602 | static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) | 602 | static inline struct rproc_vdev *vdev_to_rvdev(struct virtio_device *vdev) |
| 603 | { | 603 | { |
| 604 | return container_of(vdev, struct rproc_vdev, vdev); | 604 | return container_of(vdev->dev.parent, struct rproc_vdev, dev); |
| 605 | } | 605 | } |
| 606 | 606 | ||
| 607 | static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) | 607 | static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev) |
diff --git a/include/linux/reset.h b/include/linux/reset.h index 29af6d6b2f4b..c1901b61ca30 100644 --- a/include/linux/reset.h +++ b/include/linux/reset.h | |||
| @@ -32,6 +32,8 @@ struct reset_control *devm_reset_control_array_get(struct device *dev, | |||
| 32 | struct reset_control *of_reset_control_array_get(struct device_node *np, | 32 | struct reset_control *of_reset_control_array_get(struct device_node *np, |
| 33 | bool shared, bool optional); | 33 | bool shared, bool optional); |
| 34 | 34 | ||
| 35 | int reset_control_get_count(struct device *dev); | ||
| 36 | |||
| 35 | #else | 37 | #else |
| 36 | 38 | ||
| 37 | static inline int reset_control_reset(struct reset_control *rstc) | 39 | static inline int reset_control_reset(struct reset_control *rstc) |
| @@ -97,6 +99,11 @@ of_reset_control_array_get(struct device_node *np, bool shared, bool optional) | |||
| 97 | return optional ? NULL : ERR_PTR(-ENOTSUPP); | 99 | return optional ? NULL : ERR_PTR(-ENOTSUPP); |
| 98 | } | 100 | } |
| 99 | 101 | ||
| 102 | static inline int reset_control_get_count(struct device *dev) | ||
| 103 | { | ||
| 104 | return -ENOENT; | ||
| 105 | } | ||
| 106 | |||
| 100 | #endif /* CONFIG_RESET_CONTROLLER */ | 107 | #endif /* CONFIG_RESET_CONTROLLER */ |
| 101 | 108 | ||
| 102 | static inline int __must_check device_reset(struct device *dev) | 109 | static inline int __must_check device_reset(struct device *dev) |
| @@ -138,7 +145,7 @@ __must_check reset_control_get_exclusive(struct device *dev, const char *id) | |||
| 138 | * | 145 | * |
| 139 | * Returns a struct reset_control or IS_ERR() condition containing errno. | 146 | * Returns a struct reset_control or IS_ERR() condition containing errno. |
| 140 | * This function is intended for use with reset-controls which are shared | 147 | * This function is intended for use with reset-controls which are shared |
| 141 | * between hardware-blocks. | 148 | * between hardware blocks. |
| 142 | * | 149 | * |
| 143 | * When a reset-control is shared, the behavior of reset_control_assert / | 150 | * When a reset-control is shared, the behavior of reset_control_assert / |
| 144 | * deassert is changed, the reset-core will keep track of a deassert_count | 151 | * deassert is changed, the reset-core will keep track of a deassert_count |
| @@ -187,7 +194,7 @@ static inline struct reset_control *of_reset_control_get_exclusive( | |||
| 187 | } | 194 | } |
| 188 | 195 | ||
| 189 | /** | 196 | /** |
| 190 | * of_reset_control_get_shared - Lookup and obtain an shared reference | 197 | * of_reset_control_get_shared - Lookup and obtain a shared reference |
| 191 | * to a reset controller. | 198 | * to a reset controller. |
| 192 | * @node: device to be reset by the controller | 199 | * @node: device to be reset by the controller |
| 193 | * @id: reset line name | 200 | * @id: reset line name |
| @@ -229,7 +236,7 @@ static inline struct reset_control *of_reset_control_get_exclusive_by_index( | |||
| 229 | } | 236 | } |
| 230 | 237 | ||
| 231 | /** | 238 | /** |
| 232 | * of_reset_control_get_shared_by_index - Lookup and obtain an shared | 239 | * of_reset_control_get_shared_by_index - Lookup and obtain a shared |
| 233 | * reference to a reset controller | 240 | * reference to a reset controller |
| 234 | * by index. | 241 | * by index. |
| 235 | * @node: device to be reset by the controller | 242 | * @node: device to be reset by the controller |
| @@ -322,7 +329,7 @@ devm_reset_control_get_exclusive_by_index(struct device *dev, int index) | |||
| 322 | 329 | ||
| 323 | /** | 330 | /** |
| 324 | * devm_reset_control_get_shared_by_index - resource managed | 331 | * devm_reset_control_get_shared_by_index - resource managed |
| 325 | * reset_control_get_shared | 332 | * reset_control_get_shared |
| 326 | * @dev: device to be reset by the controller | 333 | * @dev: device to be reset by the controller |
| 327 | * @index: index of the reset controller | 334 | * @index: index of the reset controller |
| 328 | * | 335 | * |
diff --git a/include/linux/reset/socfpga.h b/include/linux/reset/socfpga.h new file mode 100644 index 000000000000..b11a2047c342 --- /dev/null +++ b/include/linux/reset/socfpga.h | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __LINUX_RESET_SOCFPGA_H__ | ||
| 3 | #define __LINUX_RESET_SOCFPGA_H__ | ||
| 4 | |||
| 5 | void __init socfpga_reset_init(void); | ||
| 6 | |||
| 7 | #endif /* __LINUX_RESET_SOCFPGA_H__ */ | ||
diff --git a/include/linux/reset/sunxi.h b/include/linux/reset/sunxi.h new file mode 100644 index 000000000000..1ad7fffb413e --- /dev/null +++ b/include/linux/reset/sunxi.h | |||
| @@ -0,0 +1,7 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 2 | #ifndef __LINUX_RESET_SUNXI_H__ | ||
| 3 | #define __LINUX_RESET_SUNXI_H__ | ||
| 4 | |||
| 5 | void __init sun6i_reset_init(void); | ||
| 6 | |||
| 7 | #endif /* __LINUX_RESET_SUNXI_H__ */ | ||
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 20f9c6af7473..ae9c0f71f311 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h | |||
| @@ -1113,14 +1113,6 @@ static inline int rhashtable_replace_fast( | |||
| 1113 | return err; | 1113 | return err; |
| 1114 | } | 1114 | } |
| 1115 | 1115 | ||
| 1116 | /* Obsolete function, do not use in new code. */ | ||
| 1117 | static inline int rhashtable_walk_init(struct rhashtable *ht, | ||
| 1118 | struct rhashtable_iter *iter, gfp_t gfp) | ||
| 1119 | { | ||
| 1120 | rhashtable_walk_enter(ht, iter); | ||
| 1121 | return 0; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | /** | 1116 | /** |
| 1125 | * rhltable_walk_enter - Initialise an iterator | 1117 | * rhltable_walk_enter - Initialise an iterator |
| 1126 | * @hlt: Table to walk over | 1118 | * @hlt: Table to walk over |
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h index 5b9ae62272bb..1a40277b512c 100644 --- a/include/linux/ring_buffer.h +++ b/include/linux/ring_buffer.h | |||
| @@ -128,7 +128,7 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, | |||
| 128 | unsigned long *lost_events); | 128 | unsigned long *lost_events); |
| 129 | 129 | ||
| 130 | struct ring_buffer_iter * | 130 | struct ring_buffer_iter * |
| 131 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu); | 131 | ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); |
| 132 | void ring_buffer_read_prepare_sync(void); | 132 | void ring_buffer_read_prepare_sync(void); |
| 133 | void ring_buffer_read_start(struct ring_buffer_iter *iter); | 133 | void ring_buffer_read_start(struct ring_buffer_iter *iter); |
| 134 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); | 134 | void ring_buffer_read_finish(struct ring_buffer_iter *iter); |
| @@ -187,8 +187,6 @@ void ring_buffer_set_clock(struct ring_buffer *buffer, | |||
| 187 | void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); | 187 | void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); |
| 188 | bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); | 188 | bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); |
| 189 | 189 | ||
| 190 | size_t ring_buffer_page_len(void *page); | ||
| 191 | |||
| 192 | size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); | 190 | size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); |
| 193 | size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); | 191 | size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); |
| 194 | 192 | ||
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index c1089fe5344a..f89bfbb54902 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
| @@ -67,7 +67,7 @@ extern struct class *rtc_class; | |||
| 67 | * | 67 | * |
| 68 | * The (current) exceptions are mostly filesystem hooks: | 68 | * The (current) exceptions are mostly filesystem hooks: |
| 69 | * - the proc() hook for procfs | 69 | * - the proc() hook for procfs |
| 70 | * - non-ioctl() chardev hooks: open(), release(), read_callback() | 70 | * - non-ioctl() chardev hooks: open(), release() |
| 71 | * | 71 | * |
| 72 | * REVISIT those periodic irq calls *do* have ops_lock when they're | 72 | * REVISIT those periodic irq calls *do* have ops_lock when they're |
| 73 | * issued through ioctl() ... | 73 | * issued through ioctl() ... |
| @@ -81,7 +81,6 @@ struct rtc_class_ops { | |||
| 81 | int (*proc)(struct device *, struct seq_file *); | 81 | int (*proc)(struct device *, struct seq_file *); |
| 82 | int (*set_mmss64)(struct device *, time64_t secs); | 82 | int (*set_mmss64)(struct device *, time64_t secs); |
| 83 | int (*set_mmss)(struct device *, unsigned long secs); | 83 | int (*set_mmss)(struct device *, unsigned long secs); |
| 84 | int (*read_callback)(struct device *, int data); | ||
| 85 | int (*alarm_irq_enable)(struct device *, unsigned int enabled); | 84 | int (*alarm_irq_enable)(struct device *, unsigned int enabled); |
| 86 | int (*read_offset)(struct device *, long *offset); | 85 | int (*read_offset)(struct device *, long *offset); |
| 87 | int (*set_offset)(struct device *, long offset); | 86 | int (*set_offset)(struct device *, long offset); |
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h index 14d558146aea..20f3e3f029b9 100644 --- a/include/linux/sbitmap.h +++ b/include/linux/sbitmap.h | |||
| @@ -330,7 +330,7 @@ static inline void sbitmap_clear_bit(struct sbitmap *sb, unsigned int bitnr) | |||
| 330 | /* | 330 | /* |
| 331 | * This one is special, since it doesn't actually clear the bit, rather it | 331 | * This one is special, since it doesn't actually clear the bit, rather it |
| 332 | * sets the corresponding bit in the ->cleared mask instead. Paired with | 332 | * sets the corresponding bit in the ->cleared mask instead. Paired with |
| 333 | * the caller doing sbitmap_batch_clear() if a given index is full, which | 333 | * the caller doing sbitmap_deferred_clear() if a given index is full, which |
| 334 | * will clear the previously freed entries in the corresponding ->word. | 334 | * will clear the previously freed entries in the corresponding ->word. |
| 335 | */ | 335 | */ |
| 336 | static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) | 336 | static inline void sbitmap_deferred_clear_bit(struct sbitmap *sb, unsigned int bitnr) |
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index b96f0d0b5b8f..b4be960c7e5d 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h | |||
| @@ -339,12 +339,12 @@ int sg_alloc_table_chained(struct sg_table *table, int nents, | |||
| 339 | /* | 339 | /* |
| 340 | * sg page iterator | 340 | * sg page iterator |
| 341 | * | 341 | * |
| 342 | * Iterates over sg entries page-by-page. On each successful iteration, | 342 | * Iterates over sg entries page-by-page. On each successful iteration, you |
| 343 | * you can call sg_page_iter_page(@piter) and sg_page_iter_dma_address(@piter) | 343 | * can call sg_page_iter_page(@piter) to get the current page and its dma |
| 344 | * to get the current page and its dma address. @piter->sg will point to the | 344 | * address. @piter->sg will point to the sg holding this page and |
| 345 | * sg holding this page and @piter->sg_pgoffset to the page's page offset | 345 | * @piter->sg_pgoffset to the page's page offset within the sg. The iteration |
| 346 | * within the sg. The iteration will stop either when a maximum number of sg | 346 | * will stop either when a maximum number of sg entries was reached or a |
| 347 | * entries was reached or a terminating sg (sg_last(sg) == true) was reached. | 347 | * terminating sg (sg_last(sg) == true) was reached. |
| 348 | */ | 348 | */ |
| 349 | struct sg_page_iter { | 349 | struct sg_page_iter { |
| 350 | struct scatterlist *sg; /* sg holding the page */ | 350 | struct scatterlist *sg; /* sg holding the page */ |
| @@ -356,7 +356,19 @@ struct sg_page_iter { | |||
| 356 | * next step */ | 356 | * next step */ |
| 357 | }; | 357 | }; |
| 358 | 358 | ||
| 359 | /* | ||
| 360 | * sg page iterator for DMA addresses | ||
| 361 | * | ||
| 362 | * This is the same as sg_page_iter however you can call | ||
| 363 | * sg_page_iter_dma_address(@dma_iter) to get the page's DMA | ||
| 364 | * address. sg_page_iter_page() cannot be called on this iterator. | ||
| 365 | */ | ||
| 366 | struct sg_dma_page_iter { | ||
| 367 | struct sg_page_iter base; | ||
| 368 | }; | ||
| 369 | |||
| 359 | bool __sg_page_iter_next(struct sg_page_iter *piter); | 370 | bool __sg_page_iter_next(struct sg_page_iter *piter); |
| 371 | bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter); | ||
| 360 | void __sg_page_iter_start(struct sg_page_iter *piter, | 372 | void __sg_page_iter_start(struct sg_page_iter *piter, |
| 361 | struct scatterlist *sglist, unsigned int nents, | 373 | struct scatterlist *sglist, unsigned int nents, |
| 362 | unsigned long pgoffset); | 374 | unsigned long pgoffset); |
| @@ -372,11 +384,13 @@ static inline struct page *sg_page_iter_page(struct sg_page_iter *piter) | |||
| 372 | /** | 384 | /** |
| 373 | * sg_page_iter_dma_address - get the dma address of the current page held by | 385 | * sg_page_iter_dma_address - get the dma address of the current page held by |
| 374 | * the page iterator. | 386 | * the page iterator. |
| 375 | * @piter: page iterator holding the page | 387 | * @dma_iter: page iterator holding the page |
| 376 | */ | 388 | */ |
| 377 | static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) | 389 | static inline dma_addr_t |
| 390 | sg_page_iter_dma_address(struct sg_dma_page_iter *dma_iter) | ||
| 378 | { | 391 | { |
| 379 | return sg_dma_address(piter->sg) + (piter->sg_pgoffset << PAGE_SHIFT); | 392 | return sg_dma_address(dma_iter->base.sg) + |
| 393 | (dma_iter->base.sg_pgoffset << PAGE_SHIFT); | ||
| 380 | } | 394 | } |
| 381 | 395 | ||
| 382 | /** | 396 | /** |
| @@ -385,11 +399,28 @@ static inline dma_addr_t sg_page_iter_dma_address(struct sg_page_iter *piter) | |||
| 385 | * @piter: page iterator to hold current page, sg, sg_pgoffset | 399 | * @piter: page iterator to hold current page, sg, sg_pgoffset |
| 386 | * @nents: maximum number of sg entries to iterate over | 400 | * @nents: maximum number of sg entries to iterate over |
| 387 | * @pgoffset: starting page offset | 401 | * @pgoffset: starting page offset |
| 402 | * | ||
| 403 | * Callers may use sg_page_iter_page() to get each page pointer. | ||
| 388 | */ | 404 | */ |
| 389 | #define for_each_sg_page(sglist, piter, nents, pgoffset) \ | 405 | #define for_each_sg_page(sglist, piter, nents, pgoffset) \ |
| 390 | for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ | 406 | for (__sg_page_iter_start((piter), (sglist), (nents), (pgoffset)); \ |
| 391 | __sg_page_iter_next(piter);) | 407 | __sg_page_iter_next(piter);) |
| 392 | 408 | ||
| 409 | /** | ||
| 410 | * for_each_sg_dma_page - iterate over the pages of the given sg list | ||
| 411 | * @sglist: sglist to iterate over | ||
| 412 | * @dma_iter: page iterator to hold current page | ||
| 413 | * @dma_nents: maximum number of sg entries to iterate over, this is the value | ||
| 414 | * returned from dma_map_sg | ||
| 415 | * @pgoffset: starting page offset | ||
| 416 | * | ||
| 417 | * Callers may use sg_page_iter_dma_address() to get each page's DMA address. | ||
| 418 | */ | ||
| 419 | #define for_each_sg_dma_page(sglist, dma_iter, dma_nents, pgoffset) \ | ||
| 420 | for (__sg_page_iter_start(&(dma_iter)->base, sglist, dma_nents, \ | ||
| 421 | pgoffset); \ | ||
| 422 | __sg_page_iter_dma_next(dma_iter);) | ||
| 423 | |||
| 393 | /* | 424 | /* |
| 394 | * Mapping sg iterator | 425 | * Mapping sg iterator |
| 395 | * | 426 | * |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 89541d248893..1549584a1538 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/seccomp.h> | 21 | #include <linux/seccomp.h> |
| 22 | #include <linux/nodemask.h> | 22 | #include <linux/nodemask.h> |
| 23 | #include <linux/rcupdate.h> | 23 | #include <linux/rcupdate.h> |
| 24 | #include <linux/refcount.h> | ||
| 24 | #include <linux/resource.h> | 25 | #include <linux/resource.h> |
| 25 | #include <linux/latencytop.h> | 26 | #include <linux/latencytop.h> |
| 26 | #include <linux/sched/prio.h> | 27 | #include <linux/sched/prio.h> |
| @@ -47,6 +48,7 @@ struct pid_namespace; | |||
| 47 | struct pipe_inode_info; | 48 | struct pipe_inode_info; |
| 48 | struct rcu_node; | 49 | struct rcu_node; |
| 49 | struct reclaim_state; | 50 | struct reclaim_state; |
| 51 | struct capture_control; | ||
| 50 | struct robust_list_head; | 52 | struct robust_list_head; |
| 51 | struct sched_attr; | 53 | struct sched_attr; |
| 52 | struct sched_param; | 54 | struct sched_param; |
| @@ -356,12 +358,6 @@ struct util_est { | |||
| 356 | * For cfs_rq, it is the aggregated load_avg of all runnable and | 358 | * For cfs_rq, it is the aggregated load_avg of all runnable and |
| 357 | * blocked sched_entities. | 359 | * blocked sched_entities. |
| 358 | * | 360 | * |
| 359 | * load_avg may also take frequency scaling into account: | ||
| 360 | * | ||
| 361 | * load_avg = runnable% * scale_load_down(load) * freq% | ||
| 362 | * | ||
| 363 | * where freq% is the CPU frequency normalized to the highest frequency. | ||
| 364 | * | ||
| 365 | * [util_avg definition] | 361 | * [util_avg definition] |
| 366 | * | 362 | * |
| 367 | * util_avg = running% * SCHED_CAPACITY_SCALE | 363 | * util_avg = running% * SCHED_CAPACITY_SCALE |
| @@ -370,17 +366,14 @@ struct util_est { | |||
| 370 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable | 366 | * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable |
| 371 | * and blocked sched_entities. | 367 | * and blocked sched_entities. |
| 372 | * | 368 | * |
| 373 | * util_avg may also factor frequency scaling and CPU capacity scaling: | 369 | * load_avg and util_avg don't direcly factor frequency scaling and CPU |
| 374 | * | 370 | * capacity scaling. The scaling is done through the rq_clock_pelt that |
| 375 | * util_avg = running% * SCHED_CAPACITY_SCALE * freq% * capacity% | 371 | * is used for computing those signals (see update_rq_clock_pelt()) |
| 376 | * | ||
| 377 | * where freq% is the same as above, and capacity% is the CPU capacity | ||
| 378 | * normalized to the greatest capacity (due to uarch differences, etc). | ||
| 379 | * | 372 | * |
| 380 | * N.B., the above ratios (runnable%, running%, freq%, and capacity%) | 373 | * N.B., the above ratios (runnable% and running%) themselves are in the |
| 381 | * themselves are in the range of [0, 1]. To do fixed point arithmetics, | 374 | * range of [0, 1]. To do fixed point arithmetics, we therefore scale them |
| 382 | * we therefore scale them to as large a range as necessary. This is for | 375 | * to as large a range as necessary. This is for example reflected by |
| 383 | * example reflected by util_avg's SCHED_CAPACITY_SCALE. | 376 | * util_avg's SCHED_CAPACITY_SCALE. |
| 384 | * | 377 | * |
| 385 | * [Overflow issue] | 378 | * [Overflow issue] |
| 386 | * | 379 | * |
| @@ -607,7 +600,7 @@ struct task_struct { | |||
| 607 | randomized_struct_fields_start | 600 | randomized_struct_fields_start |
| 608 | 601 | ||
| 609 | void *stack; | 602 | void *stack; |
| 610 | atomic_t usage; | 603 | refcount_t usage; |
| 611 | /* Per task flags (PF_*), defined further below: */ | 604 | /* Per task flags (PF_*), defined further below: */ |
| 612 | unsigned int flags; | 605 | unsigned int flags; |
| 613 | unsigned int ptrace; | 606 | unsigned int ptrace; |
| @@ -739,12 +732,6 @@ struct task_struct { | |||
| 739 | unsigned use_memdelay:1; | 732 | unsigned use_memdelay:1; |
| 740 | #endif | 733 | #endif |
| 741 | 734 | ||
| 742 | /* | ||
| 743 | * May usercopy functions fault on kernel addresses? | ||
| 744 | * This is not just a single bit because this can potentially nest. | ||
| 745 | */ | ||
| 746 | unsigned int kernel_uaccess_faults_ok; | ||
| 747 | |||
| 748 | unsigned long atomic_flags; /* Flags requiring atomic access. */ | 735 | unsigned long atomic_flags; /* Flags requiring atomic access. */ |
| 749 | 736 | ||
| 750 | struct restart_block restart_block; | 737 | struct restart_block restart_block; |
| @@ -885,8 +872,10 @@ struct task_struct { | |||
| 885 | 872 | ||
| 886 | struct callback_head *task_works; | 873 | struct callback_head *task_works; |
| 887 | 874 | ||
| 888 | struct audit_context *audit_context; | 875 | #ifdef CONFIG_AUDIT |
| 889 | #ifdef CONFIG_AUDITSYSCALL | 876 | #ifdef CONFIG_AUDITSYSCALL |
| 877 | struct audit_context *audit_context; | ||
| 878 | #endif | ||
| 890 | kuid_t loginuid; | 879 | kuid_t loginuid; |
| 891 | unsigned int sessionid; | 880 | unsigned int sessionid; |
| 892 | #endif | 881 | #endif |
| @@ -964,6 +953,9 @@ struct task_struct { | |||
| 964 | 953 | ||
| 965 | struct io_context *io_context; | 954 | struct io_context *io_context; |
| 966 | 955 | ||
| 956 | #ifdef CONFIG_COMPACTION | ||
| 957 | struct capture_control *capture_control; | ||
| 958 | #endif | ||
| 967 | /* Ptrace state: */ | 959 | /* Ptrace state: */ |
| 968 | unsigned long ptrace_message; | 960 | unsigned long ptrace_message; |
| 969 | kernel_siginfo_t *last_siginfo; | 961 | kernel_siginfo_t *last_siginfo; |
| @@ -995,7 +987,7 @@ struct task_struct { | |||
| 995 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ | 987 | /* cg_list protected by css_set_lock and tsk->alloc_lock: */ |
| 996 | struct list_head cg_list; | 988 | struct list_head cg_list; |
| 997 | #endif | 989 | #endif |
| 998 | #ifdef CONFIG_RESCTRL | 990 | #ifdef CONFIG_X86_CPU_RESCTRL |
| 999 | u32 closid; | 991 | u32 closid; |
| 1000 | u32 rmid; | 992 | u32 rmid; |
| 1001 | #endif | 993 | #endif |
| @@ -1193,7 +1185,7 @@ struct task_struct { | |||
| 1193 | #endif | 1185 | #endif |
| 1194 | #ifdef CONFIG_THREAD_INFO_IN_TASK | 1186 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1195 | /* A live task holds one reference: */ | 1187 | /* A live task holds one reference: */ |
| 1196 | atomic_t stack_refcount; | 1188 | refcount_t stack_refcount; |
| 1197 | #endif | 1189 | #endif |
| 1198 | #ifdef CONFIG_LIVEPATCH | 1190 | #ifdef CONFIG_LIVEPATCH |
| 1199 | int patch_state; | 1191 | int patch_state; |
| @@ -1406,9 +1398,10 @@ extern struct pid *cad_pid; | |||
| 1406 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ | 1398 | #define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */ |
| 1407 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ | 1399 | #define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */ |
| 1408 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ | 1400 | #define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */ |
| 1401 | #define PF_UMH 0x02000000 /* I'm an Usermodehelper process */ | ||
| 1409 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ | 1402 | #define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_allowed */ |
| 1410 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ | 1403 | #define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */ |
| 1411 | #define PF_MUTEX_TESTER 0x20000000 /* Thread belongs to the rt mutex tester */ | 1404 | #define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */ |
| 1412 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ | 1405 | #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ |
| 1413 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ | 1406 | #define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */ |
| 1414 | 1407 | ||
| @@ -1458,6 +1451,7 @@ static inline bool is_percpu_thread(void) | |||
| 1458 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ | 1451 | #define PFA_SPEC_SSB_FORCE_DISABLE 4 /* Speculative Store Bypass force disabled*/ |
| 1459 | #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ | 1452 | #define PFA_SPEC_IB_DISABLE 5 /* Indirect branch speculation restricted */ |
| 1460 | #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ | 1453 | #define PFA_SPEC_IB_FORCE_DISABLE 6 /* Indirect branch speculation permanently restricted */ |
| 1454 | #define PFA_SPEC_SSB_NOEXEC 7 /* Speculative Store Bypass clear on execve() */ | ||
| 1461 | 1455 | ||
| 1462 | #define TASK_PFA_TEST(name, func) \ | 1456 | #define TASK_PFA_TEST(name, func) \ |
| 1463 | static inline bool task_##func(struct task_struct *p) \ | 1457 | static inline bool task_##func(struct task_struct *p) \ |
| @@ -1486,6 +1480,10 @@ TASK_PFA_TEST(SPEC_SSB_DISABLE, spec_ssb_disable) | |||
| 1486 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) | 1480 | TASK_PFA_SET(SPEC_SSB_DISABLE, spec_ssb_disable) |
| 1487 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) | 1481 | TASK_PFA_CLEAR(SPEC_SSB_DISABLE, spec_ssb_disable) |
| 1488 | 1482 | ||
| 1483 | TASK_PFA_TEST(SPEC_SSB_NOEXEC, spec_ssb_noexec) | ||
| 1484 | TASK_PFA_SET(SPEC_SSB_NOEXEC, spec_ssb_noexec) | ||
| 1485 | TASK_PFA_CLEAR(SPEC_SSB_NOEXEC, spec_ssb_noexec) | ||
| 1486 | |||
| 1489 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) | 1487 | TASK_PFA_TEST(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
| 1490 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) | 1488 | TASK_PFA_SET(SPEC_SSB_FORCE_DISABLE, spec_ssb_force_disable) |
| 1491 | 1489 | ||
| @@ -1753,9 +1751,9 @@ static __always_inline bool need_resched(void) | |||
| 1753 | static inline unsigned int task_cpu(const struct task_struct *p) | 1751 | static inline unsigned int task_cpu(const struct task_struct *p) |
| 1754 | { | 1752 | { |
| 1755 | #ifdef CONFIG_THREAD_INFO_IN_TASK | 1753 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 1756 | return p->cpu; | 1754 | return READ_ONCE(p->cpu); |
| 1757 | #else | 1755 | #else |
| 1758 | return task_thread_info(p)->cpu; | 1756 | return READ_ONCE(task_thread_info(p)->cpu); |
| 1759 | #endif | 1757 | #endif |
| 1760 | } | 1758 | } |
| 1761 | 1759 | ||
| @@ -1904,6 +1902,14 @@ static inline void rseq_execve(struct task_struct *t) | |||
| 1904 | 1902 | ||
| 1905 | #endif | 1903 | #endif |
| 1906 | 1904 | ||
| 1905 | void __exit_umh(struct task_struct *tsk); | ||
| 1906 | |||
| 1907 | static inline void exit_umh(struct task_struct *tsk) | ||
| 1908 | { | ||
| 1909 | if (unlikely(tsk->flags & PF_UMH)) | ||
| 1910 | __exit_umh(tsk); | ||
| 1911 | } | ||
| 1912 | |||
| 1907 | #ifdef CONFIG_DEBUG_RSEQ | 1913 | #ifdef CONFIG_DEBUG_RSEQ |
| 1908 | 1914 | ||
| 1909 | void rseq_syscall(struct pt_regs *regs); | 1915 | void rseq_syscall(struct pt_regs *regs); |
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index ec912d01126f..ecdc6542070f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h | |||
| @@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
| 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ | 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
| 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
| 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ | 73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ |
| 74 | #define MMF_OOM_REAP_QUEUED 26 /* mm was queued for oom_reaper */ | ||
| 74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) | 75 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
| 75 | 76 | ||
| 76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ | 77 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h index 3bfa6a0cbba4..a3fda9f024c3 100644 --- a/include/linux/sched/mm.h +++ b/include/linux/sched/mm.h | |||
| @@ -49,6 +49,27 @@ static inline void mmdrop(struct mm_struct *mm) | |||
| 49 | __mmdrop(mm); | 49 | __mmdrop(mm); |
| 50 | } | 50 | } |
| 51 | 51 | ||
| 52 | /* | ||
| 53 | * This has to be called after a get_task_mm()/mmget_not_zero() | ||
| 54 | * followed by taking the mmap_sem for writing before modifying the | ||
| 55 | * vmas or anything the coredump pretends not to change from under it. | ||
| 56 | * | ||
| 57 | * NOTE: find_extend_vma() called from GUP context is the only place | ||
| 58 | * that can modify the "mm" (notably the vm_start/end) under mmap_sem | ||
| 59 | * for reading and outside the context of the process, so it is also | ||
| 60 | * the only case that holds the mmap_sem for reading that must call | ||
| 61 | * this function. Generally if the mmap_sem is hold for reading | ||
| 62 | * there's no need of this check after get_task_mm()/mmget_not_zero(). | ||
| 63 | * | ||
| 64 | * This function can be obsoleted and the check can be removed, after | ||
| 65 | * the coredump code will hold the mmap_sem for writing before | ||
| 66 | * invoking the ->core_dump methods. | ||
| 67 | */ | ||
| 68 | static inline bool mmget_still_valid(struct mm_struct *mm) | ||
| 69 | { | ||
| 70 | return likely(!mm->core_state); | ||
| 71 | } | ||
| 72 | |||
| 52 | /** | 73 | /** |
| 53 | * mmget() - Pin the address space associated with a &struct mm_struct. | 74 | * mmget() - Pin the address space associated with a &struct mm_struct. |
| 54 | * @mm: The address space to pin. | 75 | * @mm: The address space to pin. |
| @@ -148,17 +169,25 @@ static inline bool in_vfork(struct task_struct *tsk) | |||
| 148 | * Applies per-task gfp context to the given allocation flags. | 169 | * Applies per-task gfp context to the given allocation flags. |
| 149 | * PF_MEMALLOC_NOIO implies GFP_NOIO | 170 | * PF_MEMALLOC_NOIO implies GFP_NOIO |
| 150 | * PF_MEMALLOC_NOFS implies GFP_NOFS | 171 | * PF_MEMALLOC_NOFS implies GFP_NOFS |
| 172 | * PF_MEMALLOC_NOCMA implies no allocation from CMA region. | ||
| 151 | */ | 173 | */ |
| 152 | static inline gfp_t current_gfp_context(gfp_t flags) | 174 | static inline gfp_t current_gfp_context(gfp_t flags) |
| 153 | { | 175 | { |
| 154 | /* | 176 | if (unlikely(current->flags & |
| 155 | * NOIO implies both NOIO and NOFS and it is a weaker context | 177 | (PF_MEMALLOC_NOIO | PF_MEMALLOC_NOFS | PF_MEMALLOC_NOCMA))) { |
| 156 | * so always make sure it makes precedence | 178 | /* |
| 157 | */ | 179 | * NOIO implies both NOIO and NOFS and it is a weaker context |
| 158 | if (unlikely(current->flags & PF_MEMALLOC_NOIO)) | 180 | * so always make sure it makes precedence |
| 159 | flags &= ~(__GFP_IO | __GFP_FS); | 181 | */ |
| 160 | else if (unlikely(current->flags & PF_MEMALLOC_NOFS)) | 182 | if (current->flags & PF_MEMALLOC_NOIO) |
| 161 | flags &= ~__GFP_FS; | 183 | flags &= ~(__GFP_IO | __GFP_FS); |
| 184 | else if (current->flags & PF_MEMALLOC_NOFS) | ||
| 185 | flags &= ~__GFP_FS; | ||
| 186 | #ifdef CONFIG_CMA | ||
| 187 | if (current->flags & PF_MEMALLOC_NOCMA) | ||
| 188 | flags &= ~__GFP_MOVABLE; | ||
| 189 | #endif | ||
| 190 | } | ||
| 162 | return flags; | 191 | return flags; |
| 163 | } | 192 | } |
| 164 | 193 | ||
| @@ -248,6 +277,30 @@ static inline void memalloc_noreclaim_restore(unsigned int flags) | |||
| 248 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; | 277 | current->flags = (current->flags & ~PF_MEMALLOC) | flags; |
| 249 | } | 278 | } |
| 250 | 279 | ||
| 280 | #ifdef CONFIG_CMA | ||
| 281 | static inline unsigned int memalloc_nocma_save(void) | ||
| 282 | { | ||
| 283 | unsigned int flags = current->flags & PF_MEMALLOC_NOCMA; | ||
| 284 | |||
| 285 | current->flags |= PF_MEMALLOC_NOCMA; | ||
| 286 | return flags; | ||
| 287 | } | ||
| 288 | |||
| 289 | static inline void memalloc_nocma_restore(unsigned int flags) | ||
| 290 | { | ||
| 291 | current->flags = (current->flags & ~PF_MEMALLOC_NOCMA) | flags; | ||
| 292 | } | ||
| 293 | #else | ||
| 294 | static inline unsigned int memalloc_nocma_save(void) | ||
| 295 | { | ||
| 296 | return 0; | ||
| 297 | } | ||
| 298 | |||
| 299 | static inline void memalloc_nocma_restore(unsigned int flags) | ||
| 300 | { | ||
| 301 | } | ||
| 302 | #endif | ||
| 303 | |||
| 251 | #ifdef CONFIG_MEMCG | 304 | #ifdef CONFIG_MEMCG |
| 252 | /** | 305 | /** |
| 253 | * memalloc_use_memcg - Starts the remote memcg charging scope. | 306 | * memalloc_use_memcg - Starts the remote memcg charging scope. |
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h index 13789d10a50e..e412c092c1e8 100644 --- a/include/linux/sched/signal.h +++ b/include/linux/sched/signal.h | |||
| @@ -8,13 +8,14 @@ | |||
| 8 | #include <linux/sched/jobctl.h> | 8 | #include <linux/sched/jobctl.h> |
| 9 | #include <linux/sched/task.h> | 9 | #include <linux/sched/task.h> |
| 10 | #include <linux/cred.h> | 10 | #include <linux/cred.h> |
| 11 | #include <linux/refcount.h> | ||
| 11 | 12 | ||
| 12 | /* | 13 | /* |
| 13 | * Types defining task->signal and task->sighand and APIs using them: | 14 | * Types defining task->signal and task->sighand and APIs using them: |
| 14 | */ | 15 | */ |
| 15 | 16 | ||
| 16 | struct sighand_struct { | 17 | struct sighand_struct { |
| 17 | atomic_t count; | 18 | refcount_t count; |
| 18 | struct k_sigaction action[_NSIG]; | 19 | struct k_sigaction action[_NSIG]; |
| 19 | spinlock_t siglock; | 20 | spinlock_t siglock; |
| 20 | wait_queue_head_t signalfd_wqh; | 21 | wait_queue_head_t signalfd_wqh; |
| @@ -82,7 +83,7 @@ struct multiprocess_signals { | |||
| 82 | * the locking of signal_struct. | 83 | * the locking of signal_struct. |
| 83 | */ | 84 | */ |
| 84 | struct signal_struct { | 85 | struct signal_struct { |
| 85 | atomic_t sigcnt; | 86 | refcount_t sigcnt; |
| 86 | atomic_t live; | 87 | atomic_t live; |
| 87 | int nr_threads; | 88 | int nr_threads; |
| 88 | struct list_head thread_head; | 89 | struct list_head thread_head; |
| @@ -417,10 +418,20 @@ static inline void set_restore_sigmask(void) | |||
| 417 | set_thread_flag(TIF_RESTORE_SIGMASK); | 418 | set_thread_flag(TIF_RESTORE_SIGMASK); |
| 418 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | 419 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 419 | } | 420 | } |
| 421 | |||
| 422 | static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 423 | { | ||
| 424 | clear_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); | ||
| 425 | } | ||
| 426 | |||
| 420 | static inline void clear_restore_sigmask(void) | 427 | static inline void clear_restore_sigmask(void) |
| 421 | { | 428 | { |
| 422 | clear_thread_flag(TIF_RESTORE_SIGMASK); | 429 | clear_thread_flag(TIF_RESTORE_SIGMASK); |
| 423 | } | 430 | } |
| 431 | static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 432 | { | ||
| 433 | return test_tsk_thread_flag(tsk, TIF_RESTORE_SIGMASK); | ||
| 434 | } | ||
| 424 | static inline bool test_restore_sigmask(void) | 435 | static inline bool test_restore_sigmask(void) |
| 425 | { | 436 | { |
| 426 | return test_thread_flag(TIF_RESTORE_SIGMASK); | 437 | return test_thread_flag(TIF_RESTORE_SIGMASK); |
| @@ -438,6 +449,10 @@ static inline void set_restore_sigmask(void) | |||
| 438 | current->restore_sigmask = true; | 449 | current->restore_sigmask = true; |
| 439 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); | 450 | WARN_ON(!test_thread_flag(TIF_SIGPENDING)); |
| 440 | } | 451 | } |
| 452 | static inline void clear_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 453 | { | ||
| 454 | tsk->restore_sigmask = false; | ||
| 455 | } | ||
| 441 | static inline void clear_restore_sigmask(void) | 456 | static inline void clear_restore_sigmask(void) |
| 442 | { | 457 | { |
| 443 | current->restore_sigmask = false; | 458 | current->restore_sigmask = false; |
| @@ -446,6 +461,10 @@ static inline bool test_restore_sigmask(void) | |||
| 446 | { | 461 | { |
| 447 | return current->restore_sigmask; | 462 | return current->restore_sigmask; |
| 448 | } | 463 | } |
| 464 | static inline bool test_tsk_restore_sigmask(struct task_struct *tsk) | ||
| 465 | { | ||
| 466 | return tsk->restore_sigmask; | ||
| 467 | } | ||
| 449 | static inline bool test_and_clear_restore_sigmask(void) | 468 | static inline bool test_and_clear_restore_sigmask(void) |
| 450 | { | 469 | { |
| 451 | if (!current->restore_sigmask) | 470 | if (!current->restore_sigmask) |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index a9c32daeb9d8..99ce6d728df7 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -83,4 +83,11 @@ extern int sysctl_schedstats(struct ctl_table *table, int write, | |||
| 83 | void __user *buffer, size_t *lenp, | 83 | void __user *buffer, size_t *lenp, |
| 84 | loff_t *ppos); | 84 | loff_t *ppos); |
| 85 | 85 | ||
| 86 | #if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) | ||
| 87 | extern unsigned int sysctl_sched_energy_aware; | ||
| 88 | extern int sched_energy_aware_handler(struct ctl_table *table, int write, | ||
| 89 | void __user *buffer, size_t *lenp, | ||
| 90 | loff_t *ppos); | ||
| 91 | #endif | ||
| 92 | |||
| 86 | #endif /* _LINUX_SCHED_SYSCTL_H */ | 93 | #endif /* _LINUX_SCHED_SYSCTL_H */ |
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index 44c6f15800ff..2e97a2227045 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h | |||
| @@ -88,13 +88,13 @@ extern void sched_exec(void); | |||
| 88 | #define sched_exec() {} | 88 | #define sched_exec() {} |
| 89 | #endif | 89 | #endif |
| 90 | 90 | ||
| 91 | #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) | 91 | #define get_task_struct(tsk) do { refcount_inc(&(tsk)->usage); } while(0) |
| 92 | 92 | ||
| 93 | extern void __put_task_struct(struct task_struct *t); | 93 | extern void __put_task_struct(struct task_struct *t); |
| 94 | 94 | ||
| 95 | static inline void put_task_struct(struct task_struct *t) | 95 | static inline void put_task_struct(struct task_struct *t) |
| 96 | { | 96 | { |
| 97 | if (atomic_dec_and_test(&t->usage)) | 97 | if (refcount_dec_and_test(&t->usage)) |
| 98 | __put_task_struct(t); | 98 | __put_task_struct(t); |
| 99 | } | 99 | } |
| 100 | 100 | ||
diff --git a/include/linux/sched/task_stack.h b/include/linux/sched/task_stack.h index 6a841929073f..2413427e439c 100644 --- a/include/linux/sched/task_stack.h +++ b/include/linux/sched/task_stack.h | |||
| @@ -61,7 +61,7 @@ static inline unsigned long *end_of_stack(struct task_struct *p) | |||
| 61 | #ifdef CONFIG_THREAD_INFO_IN_TASK | 61 | #ifdef CONFIG_THREAD_INFO_IN_TASK |
| 62 | static inline void *try_get_task_stack(struct task_struct *tsk) | 62 | static inline void *try_get_task_stack(struct task_struct *tsk) |
| 63 | { | 63 | { |
| 64 | return atomic_inc_not_zero(&tsk->stack_refcount) ? | 64 | return refcount_inc_not_zero(&tsk->stack_refcount) ? |
| 65 | task_stack_page(tsk) : NULL; | 65 | task_stack_page(tsk) : NULL; |
| 66 | } | 66 | } |
| 67 | 67 | ||
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h index c31d3a47a47c..57c7ed3fe465 100644 --- a/include/linux/sched/topology.h +++ b/include/linux/sched/topology.h | |||
| @@ -176,10 +176,10 @@ typedef int (*sched_domain_flags_f)(void); | |||
| 176 | #define SDTL_OVERLAP 0x01 | 176 | #define SDTL_OVERLAP 0x01 |
| 177 | 177 | ||
| 178 | struct sd_data { | 178 | struct sd_data { |
| 179 | struct sched_domain **__percpu sd; | 179 | struct sched_domain *__percpu *sd; |
| 180 | struct sched_domain_shared **__percpu sds; | 180 | struct sched_domain_shared *__percpu *sds; |
| 181 | struct sched_group **__percpu sg; | 181 | struct sched_group *__percpu *sg; |
| 182 | struct sched_group_capacity **__percpu sgc; | 182 | struct sched_group_capacity *__percpu *sgc; |
| 183 | }; | 183 | }; |
| 184 | 184 | ||
| 185 | struct sched_domain_topology_level { | 185 | struct sched_domain_topology_level { |
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h index 39ad98c09c58..c7b5f86b91a1 100644 --- a/include/linux/sched/user.h +++ b/include/linux/sched/user.h | |||
| @@ -40,7 +40,7 @@ struct user_struct { | |||
| 40 | kuid_t uid; | 40 | kuid_t uid; |
| 41 | 41 | ||
| 42 | #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ | 42 | #if defined(CONFIG_PERF_EVENTS) || defined(CONFIG_BPF_SYSCALL) || \ |
| 43 | defined(CONFIG_NET) | 43 | defined(CONFIG_NET) || defined(CONFIG_IO_URING) |
| 44 | atomic_long_t locked_vm; | 44 | atomic_long_t locked_vm; |
| 45 | #endif | 45 | #endif |
| 46 | 46 | ||
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h index 10b19a192b2d..ad826d2a4557 100644 --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h | |||
| @@ -24,9 +24,13 @@ | |||
| 24 | * called near the end of a function. Otherwise, the list can be | 24 | * called near the end of a function. Otherwise, the list can be |
| 25 | * re-initialized for later re-use by wake_q_init(). | 25 | * re-initialized for later re-use by wake_q_init(). |
| 26 | * | 26 | * |
| 27 | * Note that this can cause spurious wakeups. schedule() callers | 27 | * NOTE that this can cause spurious wakeups. schedule() callers |
| 28 | * must ensure the call is done inside a loop, confirming that the | 28 | * must ensure the call is done inside a loop, confirming that the |
| 29 | * wakeup condition has in fact occurred. | 29 | * wakeup condition has in fact occurred. |
| 30 | * | ||
| 31 | * NOTE that there is no guarantee the wakeup will happen any later than the | ||
| 32 | * wake_q_add() location. Therefore task must be ready to be woken at the | ||
| 33 | * location of the wake_q_add(). | ||
| 30 | */ | 34 | */ |
| 31 | 35 | ||
| 32 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
| @@ -47,8 +51,8 @@ static inline void wake_q_init(struct wake_q_head *head) | |||
| 47 | head->lastp = &head->first; | 51 | head->lastp = &head->first; |
| 48 | } | 52 | } |
| 49 | 53 | ||
| 50 | extern void wake_q_add(struct wake_q_head *head, | 54 | extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); |
| 51 | struct task_struct *task); | 55 | extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); |
| 52 | extern void wake_up_q(struct wake_q_head *head); | 56 | extern void wake_up_q(struct wake_q_head *head); |
| 53 | 57 | ||
| 54 | #endif /* _LINUX_SCHED_WAKE_Q_H */ | 58 | #endif /* _LINUX_SCHED_WAKE_Q_H */ |
diff --git a/include/linux/security.h b/include/linux/security.h index dbfb5a66babb..49f2685324b0 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -53,12 +53,18 @@ struct msg_msg; | |||
| 53 | struct xattr; | 53 | struct xattr; |
| 54 | struct xfrm_sec_ctx; | 54 | struct xfrm_sec_ctx; |
| 55 | struct mm_struct; | 55 | struct mm_struct; |
| 56 | struct fs_context; | ||
| 57 | struct fs_parameter; | ||
| 58 | enum fs_value_type; | ||
| 56 | 59 | ||
| 60 | /* Default (no) options for the capable function */ | ||
| 61 | #define CAP_OPT_NONE 0x0 | ||
| 57 | /* If capable should audit the security request */ | 62 | /* If capable should audit the security request */ |
| 58 | #define SECURITY_CAP_NOAUDIT 0 | 63 | #define CAP_OPT_NOAUDIT BIT(1) |
| 59 | #define SECURITY_CAP_AUDIT 1 | 64 | /* If capable is being called by a setid function */ |
| 65 | #define CAP_OPT_INSETID BIT(2) | ||
| 60 | 66 | ||
| 61 | /* LSM Agnostic defines for sb_set_mnt_opts */ | 67 | /* LSM Agnostic defines for fs_context::lsm_flags */ |
| 62 | #define SECURITY_LSM_NATIVE_LABELS 1 | 68 | #define SECURITY_LSM_NATIVE_LABELS 1 |
| 63 | 69 | ||
| 64 | struct ctl_table; | 70 | struct ctl_table; |
| @@ -72,7 +78,7 @@ enum lsm_event { | |||
| 72 | 78 | ||
| 73 | /* These functions are in security/commoncap.c */ | 79 | /* These functions are in security/commoncap.c */ |
| 74 | extern int cap_capable(const struct cred *cred, struct user_namespace *ns, | 80 | extern int cap_capable(const struct cred *cred, struct user_namespace *ns, |
| 75 | int cap, int audit); | 81 | int cap, unsigned int opts); |
| 76 | extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); | 82 | extern int cap_settime(const struct timespec64 *ts, const struct timezone *tz); |
| 77 | extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); | 83 | extern int cap_ptrace_access_check(struct task_struct *child, unsigned int mode); |
| 78 | extern int cap_ptrace_traceme(struct task_struct *parent); | 84 | extern int cap_ptrace_traceme(struct task_struct *parent); |
| @@ -207,10 +213,10 @@ int security_capset(struct cred *new, const struct cred *old, | |||
| 207 | const kernel_cap_t *effective, | 213 | const kernel_cap_t *effective, |
| 208 | const kernel_cap_t *inheritable, | 214 | const kernel_cap_t *inheritable, |
| 209 | const kernel_cap_t *permitted); | 215 | const kernel_cap_t *permitted); |
| 210 | int security_capable(const struct cred *cred, struct user_namespace *ns, | 216 | int security_capable(const struct cred *cred, |
| 211 | int cap); | 217 | struct user_namespace *ns, |
| 212 | int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, | 218 | int cap, |
| 213 | int cap); | 219 | unsigned int opts); |
| 214 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); | 220 | int security_quotactl(int cmds, int type, int id, struct super_block *sb); |
| 215 | int security_quota_on(struct dentry *dentry); | 221 | int security_quota_on(struct dentry *dentry); |
| 216 | int security_syslog(int type); | 222 | int security_syslog(int type); |
| @@ -220,6 +226,8 @@ int security_bprm_set_creds(struct linux_binprm *bprm); | |||
| 220 | int security_bprm_check(struct linux_binprm *bprm); | 226 | int security_bprm_check(struct linux_binprm *bprm); |
| 221 | void security_bprm_committing_creds(struct linux_binprm *bprm); | 227 | void security_bprm_committing_creds(struct linux_binprm *bprm); |
| 222 | void security_bprm_committed_creds(struct linux_binprm *bprm); | 228 | void security_bprm_committed_creds(struct linux_binprm *bprm); |
| 229 | int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc); | ||
| 230 | int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param); | ||
| 223 | int security_sb_alloc(struct super_block *sb); | 231 | int security_sb_alloc(struct super_block *sb); |
| 224 | void security_sb_free(struct super_block *sb); | 232 | void security_sb_free(struct super_block *sb); |
| 225 | void security_free_mnt_opts(void **mnt_opts); | 233 | void security_free_mnt_opts(void **mnt_opts); |
| @@ -366,8 +374,10 @@ int security_sem_semctl(struct kern_ipc_perm *sma, int cmd); | |||
| 366 | int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, | 374 | int security_sem_semop(struct kern_ipc_perm *sma, struct sembuf *sops, |
| 367 | unsigned nsops, int alter); | 375 | unsigned nsops, int alter); |
| 368 | void security_d_instantiate(struct dentry *dentry, struct inode *inode); | 376 | void security_d_instantiate(struct dentry *dentry, struct inode *inode); |
| 369 | int security_getprocattr(struct task_struct *p, char *name, char **value); | 377 | int security_getprocattr(struct task_struct *p, const char *lsm, char *name, |
| 370 | int security_setprocattr(const char *name, void *value, size_t size); | 378 | char **value); |
| 379 | int security_setprocattr(const char *lsm, const char *name, void *value, | ||
| 380 | size_t size); | ||
| 371 | int security_netlink_send(struct sock *sk, struct sk_buff *skb); | 381 | int security_netlink_send(struct sock *sk, struct sk_buff *skb); |
| 372 | int security_ismaclabel(const char *name); | 382 | int security_ismaclabel(const char *name); |
| 373 | int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); | 383 | int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen); |
| @@ -462,14 +472,11 @@ static inline int security_capset(struct cred *new, | |||
| 462 | } | 472 | } |
| 463 | 473 | ||
| 464 | static inline int security_capable(const struct cred *cred, | 474 | static inline int security_capable(const struct cred *cred, |
| 465 | struct user_namespace *ns, int cap) | 475 | struct user_namespace *ns, |
| 476 | int cap, | ||
| 477 | unsigned int opts) | ||
| 466 | { | 478 | { |
| 467 | return cap_capable(cred, ns, cap, SECURITY_CAP_AUDIT); | 479 | return cap_capable(cred, ns, cap, opts); |
| 468 | } | ||
| 469 | |||
| 470 | static inline int security_capable_noaudit(const struct cred *cred, | ||
| 471 | struct user_namespace *ns, int cap) { | ||
| 472 | return cap_capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); | ||
| 473 | } | 480 | } |
| 474 | 481 | ||
| 475 | static inline int security_quotactl(int cmds, int type, int id, | 482 | static inline int security_quotactl(int cmds, int type, int id, |
| @@ -517,6 +524,17 @@ static inline void security_bprm_committed_creds(struct linux_binprm *bprm) | |||
| 517 | { | 524 | { |
| 518 | } | 525 | } |
| 519 | 526 | ||
| 527 | static inline int security_fs_context_dup(struct fs_context *fc, | ||
| 528 | struct fs_context *src_fc) | ||
| 529 | { | ||
| 530 | return 0; | ||
| 531 | } | ||
| 532 | static inline int security_fs_context_parse_param(struct fs_context *fc, | ||
| 533 | struct fs_parameter *param) | ||
| 534 | { | ||
| 535 | return -ENOPARAM; | ||
| 536 | } | ||
| 537 | |||
| 520 | static inline int security_sb_alloc(struct super_block *sb) | 538 | static inline int security_sb_alloc(struct super_block *sb) |
| 521 | { | 539 | { |
| 522 | return 0; | 540 | return 0; |
| @@ -1112,15 +1130,18 @@ static inline int security_sem_semop(struct kern_ipc_perm *sma, | |||
| 1112 | return 0; | 1130 | return 0; |
| 1113 | } | 1131 | } |
| 1114 | 1132 | ||
| 1115 | static inline void security_d_instantiate(struct dentry *dentry, struct inode *inode) | 1133 | static inline void security_d_instantiate(struct dentry *dentry, |
| 1134 | struct inode *inode) | ||
| 1116 | { } | 1135 | { } |
| 1117 | 1136 | ||
| 1118 | static inline int security_getprocattr(struct task_struct *p, char *name, char **value) | 1137 | static inline int security_getprocattr(struct task_struct *p, const char *lsm, |
| 1138 | char *name, char **value) | ||
| 1119 | { | 1139 | { |
| 1120 | return -EINVAL; | 1140 | return -EINVAL; |
| 1121 | } | 1141 | } |
| 1122 | 1142 | ||
| 1123 | static inline int security_setprocattr(char *name, void *value, size_t size) | 1143 | static inline int security_setprocattr(const char *lsm, char *name, |
| 1144 | void *value, size_t size) | ||
| 1124 | { | 1145 | { |
| 1125 | return -EINVAL; | 1146 | return -EINVAL; |
| 1126 | } | 1147 | } |
| @@ -1674,8 +1695,7 @@ static inline int security_key_getsecurity(struct key *key, char **_buffer) | |||
| 1674 | #ifdef CONFIG_SECURITY | 1695 | #ifdef CONFIG_SECURITY |
| 1675 | int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); | 1696 | int security_audit_rule_init(u32 field, u32 op, char *rulestr, void **lsmrule); |
| 1676 | int security_audit_rule_known(struct audit_krule *krule); | 1697 | int security_audit_rule_known(struct audit_krule *krule); |
| 1677 | int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule, | 1698 | int security_audit_rule_match(u32 secid, u32 field, u32 op, void *lsmrule); |
| 1678 | struct audit_context *actx); | ||
| 1679 | void security_audit_rule_free(void *lsmrule); | 1699 | void security_audit_rule_free(void *lsmrule); |
| 1680 | 1700 | ||
| 1681 | #else | 1701 | #else |
| @@ -1692,7 +1712,7 @@ static inline int security_audit_rule_known(struct audit_krule *krule) | |||
| 1692 | } | 1712 | } |
| 1693 | 1713 | ||
| 1694 | static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, | 1714 | static inline int security_audit_rule_match(u32 secid, u32 field, u32 op, |
| 1695 | void *lsmrule, struct audit_context *actx) | 1715 | void *lsmrule) |
| 1696 | { | 1716 | { |
| 1697 | return 0; | 1717 | return 0; |
| 1698 | } | 1718 | } |
diff --git a/include/linux/selinux.h b/include/linux/selinux.h deleted file mode 100644 index 44f459612690..000000000000 --- a/include/linux/selinux.h +++ /dev/null | |||
| @@ -1,35 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * SELinux services exported to the rest of the kernel. | ||
| 3 | * | ||
| 4 | * Author: James Morris <jmorris@redhat.com> | ||
| 5 | * | ||
| 6 | * Copyright (C) 2005 Red Hat, Inc., James Morris <jmorris@redhat.com> | ||
| 7 | * Copyright (C) 2006 Trusted Computer Solutions, Inc. <dgoeddel@trustedcs.com> | ||
| 8 | * Copyright (C) 2006 IBM Corporation, Timothy R. Chavez <tinytim@us.ibm.com> | ||
| 9 | * | ||
| 10 | * This program is free software; you can redistribute it and/or modify | ||
| 11 | * it under the terms of the GNU General Public License version 2, | ||
| 12 | * as published by the Free Software Foundation. | ||
| 13 | */ | ||
| 14 | #ifndef _LINUX_SELINUX_H | ||
| 15 | #define _LINUX_SELINUX_H | ||
| 16 | |||
| 17 | struct selinux_audit_rule; | ||
| 18 | struct audit_context; | ||
| 19 | struct kern_ipc_perm; | ||
| 20 | |||
| 21 | #ifdef CONFIG_SECURITY_SELINUX | ||
| 22 | |||
| 23 | /** | ||
| 24 | * selinux_is_enabled - is SELinux enabled? | ||
| 25 | */ | ||
| 26 | bool selinux_is_enabled(void); | ||
| 27 | #else | ||
| 28 | |||
| 29 | static inline bool selinux_is_enabled(void) | ||
| 30 | { | ||
| 31 | return false; | ||
| 32 | } | ||
| 33 | #endif /* CONFIG_SECURITY_SELINUX */ | ||
| 34 | |||
| 35 | #endif /* _LINUX_SELINUX_H */ | ||
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index f155dc607112..20d815a33145 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h | |||
| @@ -21,6 +21,7 @@ struct shmem_inode_info { | |||
| 21 | struct list_head swaplist; /* chain of maybes on swap */ | 21 | struct list_head swaplist; /* chain of maybes on swap */ |
| 22 | struct shared_policy policy; /* NUMA memory alloc policy */ | 22 | struct shared_policy policy; /* NUMA memory alloc policy */ |
| 23 | struct simple_xattrs xattrs; /* list of xattrs */ | 23 | struct simple_xattrs xattrs; /* list of xattrs */ |
| 24 | atomic_t stop_eviction; /* hold when working on inode */ | ||
| 24 | struct inode vfs_inode; | 25 | struct inode vfs_inode; |
| 25 | }; | 26 | }; |
| 26 | 27 | ||
| @@ -72,7 +73,8 @@ extern void shmem_unlock_mapping(struct address_space *mapping); | |||
| 72 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, | 73 | extern struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, |
| 73 | pgoff_t index, gfp_t gfp_mask); | 74 | pgoff_t index, gfp_t gfp_mask); |
| 74 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); | 75 | extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end); |
| 75 | extern int shmem_unuse(swp_entry_t entry, struct page *page); | 76 | extern int shmem_unuse(unsigned int type, bool frontswap, |
| 77 | unsigned long *fs_pages_to_unuse); | ||
| 76 | 78 | ||
| 77 | extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); | 79 | extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); |
| 78 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, | 80 | extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, |
diff --git a/include/linux/signal.h b/include/linux/signal.h index cc7e2c1cd444..9702016734b1 100644 --- a/include/linux/signal.h +++ b/include/linux/signal.h | |||
| @@ -392,7 +392,7 @@ extern bool unhandled_signal(struct task_struct *tsk, int sig); | |||
| 392 | #endif | 392 | #endif |
| 393 | 393 | ||
| 394 | #define siginmask(sig, mask) \ | 394 | #define siginmask(sig, mask) \ |
| 395 | ((sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) | 395 | ((sig) > 0 && (sig) < SIGRTMIN && (rt_sigmask(sig) & (mask))) |
| 396 | 396 | ||
| 397 | #define SIG_KERNEL_ONLY_MASK (\ | 397 | #define SIG_KERNEL_ONLY_MASK (\ |
| 398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) | 398 | rt_sigmask(SIGKILL) | rt_sigmask(SIGSTOP)) |
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 93f56fddd92a..9027a8c4219f 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h | |||
| @@ -327,26 +327,49 @@ struct skb_frag_struct { | |||
| 327 | #endif | 327 | #endif |
| 328 | }; | 328 | }; |
| 329 | 329 | ||
| 330 | /** | ||
| 331 | * skb_frag_size - Returns the size of a skb fragment | ||
| 332 | * @frag: skb fragment | ||
| 333 | */ | ||
| 330 | static inline unsigned int skb_frag_size(const skb_frag_t *frag) | 334 | static inline unsigned int skb_frag_size(const skb_frag_t *frag) |
| 331 | { | 335 | { |
| 332 | return frag->size; | 336 | return frag->size; |
| 333 | } | 337 | } |
| 334 | 338 | ||
| 339 | /** | ||
| 340 | * skb_frag_size_set - Sets the size of a skb fragment | ||
| 341 | * @frag: skb fragment | ||
| 342 | * @size: size of fragment | ||
| 343 | */ | ||
| 335 | static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) | 344 | static inline void skb_frag_size_set(skb_frag_t *frag, unsigned int size) |
| 336 | { | 345 | { |
| 337 | frag->size = size; | 346 | frag->size = size; |
| 338 | } | 347 | } |
| 339 | 348 | ||
| 349 | /** | ||
| 350 | * skb_frag_size_add - Incrementes the size of a skb fragment by %delta | ||
| 351 | * @frag: skb fragment | ||
| 352 | * @delta: value to add | ||
| 353 | */ | ||
| 340 | static inline void skb_frag_size_add(skb_frag_t *frag, int delta) | 354 | static inline void skb_frag_size_add(skb_frag_t *frag, int delta) |
| 341 | { | 355 | { |
| 342 | frag->size += delta; | 356 | frag->size += delta; |
| 343 | } | 357 | } |
| 344 | 358 | ||
| 359 | /** | ||
| 360 | * skb_frag_size_sub - Decrements the size of a skb fragment by %delta | ||
| 361 | * @frag: skb fragment | ||
| 362 | * @delta: value to subtract | ||
| 363 | */ | ||
| 345 | static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) | 364 | static inline void skb_frag_size_sub(skb_frag_t *frag, int delta) |
| 346 | { | 365 | { |
| 347 | frag->size -= delta; | 366 | frag->size -= delta; |
| 348 | } | 367 | } |
| 349 | 368 | ||
| 369 | /** | ||
| 370 | * skb_frag_must_loop - Test if %p is a high memory page | ||
| 371 | * @p: fragment's page | ||
| 372 | */ | ||
| 350 | static inline bool skb_frag_must_loop(struct page *p) | 373 | static inline bool skb_frag_must_loop(struct page *p) |
| 351 | { | 374 | { |
| 352 | #if defined(CONFIG_HIGHMEM) | 375 | #if defined(CONFIG_HIGHMEM) |
| @@ -590,7 +613,7 @@ typedef unsigned int sk_buff_data_t; | |||
| 590 | typedef unsigned char *sk_buff_data_t; | 613 | typedef unsigned char *sk_buff_data_t; |
| 591 | #endif | 614 | #endif |
| 592 | 615 | ||
| 593 | /** | 616 | /** |
| 594 | * struct sk_buff - socket buffer | 617 | * struct sk_buff - socket buffer |
| 595 | * @next: Next buffer in list | 618 | * @next: Next buffer in list |
| 596 | * @prev: Previous buffer in list | 619 | * @prev: Previous buffer in list |
| @@ -648,7 +671,7 @@ typedef unsigned char *sk_buff_data_t; | |||
| 648 | * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL | 671 | * @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL |
| 649 | * @dst_pending_confirm: need to confirm neighbour | 672 | * @dst_pending_confirm: need to confirm neighbour |
| 650 | * @decrypted: Decrypted SKB | 673 | * @decrypted: Decrypted SKB |
| 651 | * @napi_id: id of the NAPI struct this skb came from | 674 | * @napi_id: id of the NAPI struct this skb came from |
| 652 | * @secmark: security marking | 675 | * @secmark: security marking |
| 653 | * @mark: Generic packet mark | 676 | * @mark: Generic packet mark |
| 654 | * @vlan_proto: vlan encapsulation protocol | 677 | * @vlan_proto: vlan encapsulation protocol |
| @@ -883,7 +906,10 @@ struct sk_buff { | |||
| 883 | #define SKB_ALLOC_RX 0x02 | 906 | #define SKB_ALLOC_RX 0x02 |
| 884 | #define SKB_ALLOC_NAPI 0x04 | 907 | #define SKB_ALLOC_NAPI 0x04 |
| 885 | 908 | ||
| 886 | /* Returns true if the skb was allocated from PFMEMALLOC reserves */ | 909 | /** |
| 910 | * skb_pfmemalloc - Test if the skb was allocated from PFMEMALLOC reserves | ||
| 911 | * @skb: buffer | ||
| 912 | */ | ||
| 887 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) | 913 | static inline bool skb_pfmemalloc(const struct sk_buff *skb) |
| 888 | { | 914 | { |
| 889 | return unlikely(skb->pfmemalloc); | 915 | return unlikely(skb->pfmemalloc); |
| @@ -905,7 +931,7 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb) | |||
| 905 | */ | 931 | */ |
| 906 | static inline struct dst_entry *skb_dst(const struct sk_buff *skb) | 932 | static inline struct dst_entry *skb_dst(const struct sk_buff *skb) |
| 907 | { | 933 | { |
| 908 | /* If refdst was not refcounted, check we still are in a | 934 | /* If refdst was not refcounted, check we still are in a |
| 909 | * rcu_read_lock section | 935 | * rcu_read_lock section |
| 910 | */ | 936 | */ |
| 911 | WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && | 937 | WARN_ON((skb->_skb_refdst & SKB_DST_NOREF) && |
| @@ -952,6 +978,10 @@ static inline bool skb_dst_is_noref(const struct sk_buff *skb) | |||
| 952 | return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); | 978 | return (skb->_skb_refdst & SKB_DST_NOREF) && skb_dst(skb); |
| 953 | } | 979 | } |
| 954 | 980 | ||
| 981 | /** | ||
| 982 | * skb_rtable - Returns the skb &rtable | ||
| 983 | * @skb: buffer | ||
| 984 | */ | ||
| 955 | static inline struct rtable *skb_rtable(const struct sk_buff *skb) | 985 | static inline struct rtable *skb_rtable(const struct sk_buff *skb) |
| 956 | { | 986 | { |
| 957 | return (struct rtable *)skb_dst(skb); | 987 | return (struct rtable *)skb_dst(skb); |
| @@ -966,6 +996,10 @@ static inline bool skb_pkt_type_ok(u32 ptype) | |||
| 966 | return ptype <= PACKET_OTHERHOST; | 996 | return ptype <= PACKET_OTHERHOST; |
| 967 | } | 997 | } |
| 968 | 998 | ||
| 999 | /** | ||
| 1000 | * skb_napi_id - Returns the skb's NAPI id | ||
| 1001 | * @skb: buffer | ||
| 1002 | */ | ||
| 969 | static inline unsigned int skb_napi_id(const struct sk_buff *skb) | 1003 | static inline unsigned int skb_napi_id(const struct sk_buff *skb) |
| 970 | { | 1004 | { |
| 971 | #ifdef CONFIG_NET_RX_BUSY_POLL | 1005 | #ifdef CONFIG_NET_RX_BUSY_POLL |
| @@ -975,7 +1009,12 @@ static inline unsigned int skb_napi_id(const struct sk_buff *skb) | |||
| 975 | #endif | 1009 | #endif |
| 976 | } | 1010 | } |
| 977 | 1011 | ||
| 978 | /* decrement the reference count and return true if we can free the skb */ | 1012 | /** |
| 1013 | * skb_unref - decrement the skb's reference count | ||
| 1014 | * @skb: buffer | ||
| 1015 | * | ||
| 1016 | * Returns true if we can free the skb. | ||
| 1017 | */ | ||
| 979 | static inline bool skb_unref(struct sk_buff *skb) | 1018 | static inline bool skb_unref(struct sk_buff *skb) |
| 980 | { | 1019 | { |
| 981 | if (unlikely(!skb)) | 1020 | if (unlikely(!skb)) |
| @@ -1005,6 +1044,14 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags, | |||
| 1005 | int node); | 1044 | int node); |
| 1006 | struct sk_buff *__build_skb(void *data, unsigned int frag_size); | 1045 | struct sk_buff *__build_skb(void *data, unsigned int frag_size); |
| 1007 | struct sk_buff *build_skb(void *data, unsigned int frag_size); | 1046 | struct sk_buff *build_skb(void *data, unsigned int frag_size); |
| 1047 | |||
| 1048 | /** | ||
| 1049 | * alloc_skb - allocate a network buffer | ||
| 1050 | * @size: size to allocate | ||
| 1051 | * @priority: allocation mask | ||
| 1052 | * | ||
| 1053 | * This function is a convenient wrapper around __alloc_skb(). | ||
| 1054 | */ | ||
| 1008 | static inline struct sk_buff *alloc_skb(unsigned int size, | 1055 | static inline struct sk_buff *alloc_skb(unsigned int size, |
| 1009 | gfp_t priority) | 1056 | gfp_t priority) |
| 1010 | { | 1057 | { |
| @@ -1047,6 +1094,13 @@ static inline bool skb_fclone_busy(const struct sock *sk, | |||
| 1047 | fclones->skb2.sk == sk; | 1094 | fclones->skb2.sk == sk; |
| 1048 | } | 1095 | } |
| 1049 | 1096 | ||
| 1097 | /** | ||
| 1098 | * alloc_skb_fclone - allocate a network buffer from fclone cache | ||
| 1099 | * @size: size to allocate | ||
| 1100 | * @priority: allocation mask | ||
| 1101 | * | ||
| 1102 | * This function is a convenient wrapper around __alloc_skb(). | ||
| 1103 | */ | ||
| 1050 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, | 1104 | static inline struct sk_buff *alloc_skb_fclone(unsigned int size, |
| 1051 | gfp_t priority) | 1105 | gfp_t priority) |
| 1052 | { | 1106 | { |
| @@ -1221,6 +1275,11 @@ static inline int skb_flow_dissector_bpf_prog_detach(const union bpf_attr *attr) | |||
| 1221 | } | 1275 | } |
| 1222 | #endif | 1276 | #endif |
| 1223 | 1277 | ||
| 1278 | struct bpf_flow_keys; | ||
| 1279 | bool __skb_flow_bpf_dissect(struct bpf_prog *prog, | ||
| 1280 | const struct sk_buff *skb, | ||
| 1281 | struct flow_dissector *flow_dissector, | ||
| 1282 | struct bpf_flow_keys *flow_keys); | ||
| 1224 | bool __skb_flow_dissect(const struct sk_buff *skb, | 1283 | bool __skb_flow_dissect(const struct sk_buff *skb, |
| 1225 | struct flow_dissector *flow_dissector, | 1284 | struct flow_dissector *flow_dissector, |
| 1226 | void *target_container, | 1285 | void *target_container, |
| @@ -1884,12 +1943,12 @@ static inline void __skb_queue_before(struct sk_buff_head *list, | |||
| 1884 | * | 1943 | * |
| 1885 | * A buffer cannot be placed on two lists at the same time. | 1944 | * A buffer cannot be placed on two lists at the same time. |
| 1886 | */ | 1945 | */ |
| 1887 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); | ||
| 1888 | static inline void __skb_queue_head(struct sk_buff_head *list, | 1946 | static inline void __skb_queue_head(struct sk_buff_head *list, |
| 1889 | struct sk_buff *newsk) | 1947 | struct sk_buff *newsk) |
| 1890 | { | 1948 | { |
| 1891 | __skb_queue_after(list, (struct sk_buff *)list, newsk); | 1949 | __skb_queue_after(list, (struct sk_buff *)list, newsk); |
| 1892 | } | 1950 | } |
| 1951 | void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk); | ||
| 1893 | 1952 | ||
| 1894 | /** | 1953 | /** |
| 1895 | * __skb_queue_tail - queue a buffer at the list tail | 1954 | * __skb_queue_tail - queue a buffer at the list tail |
| @@ -1901,12 +1960,12 @@ static inline void __skb_queue_head(struct sk_buff_head *list, | |||
| 1901 | * | 1960 | * |
| 1902 | * A buffer cannot be placed on two lists at the same time. | 1961 | * A buffer cannot be placed on two lists at the same time. |
| 1903 | */ | 1962 | */ |
| 1904 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); | ||
| 1905 | static inline void __skb_queue_tail(struct sk_buff_head *list, | 1963 | static inline void __skb_queue_tail(struct sk_buff_head *list, |
| 1906 | struct sk_buff *newsk) | 1964 | struct sk_buff *newsk) |
| 1907 | { | 1965 | { |
| 1908 | __skb_queue_before(list, (struct sk_buff *)list, newsk); | 1966 | __skb_queue_before(list, (struct sk_buff *)list, newsk); |
| 1909 | } | 1967 | } |
| 1968 | void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk); | ||
| 1910 | 1969 | ||
| 1911 | /* | 1970 | /* |
| 1912 | * remove sk_buff from list. _Must_ be called atomically, and with | 1971 | * remove sk_buff from list. _Must_ be called atomically, and with |
| @@ -1933,7 +1992,6 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list) | |||
| 1933 | * so must be used with appropriate locks held only. The head item is | 1992 | * so must be used with appropriate locks held only. The head item is |
| 1934 | * returned or %NULL if the list is empty. | 1993 | * returned or %NULL if the list is empty. |
| 1935 | */ | 1994 | */ |
| 1936 | struct sk_buff *skb_dequeue(struct sk_buff_head *list); | ||
| 1937 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | 1995 | static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) |
| 1938 | { | 1996 | { |
| 1939 | struct sk_buff *skb = skb_peek(list); | 1997 | struct sk_buff *skb = skb_peek(list); |
| @@ -1941,6 +1999,7 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | |||
| 1941 | __skb_unlink(skb, list); | 1999 | __skb_unlink(skb, list); |
| 1942 | return skb; | 2000 | return skb; |
| 1943 | } | 2001 | } |
| 2002 | struct sk_buff *skb_dequeue(struct sk_buff_head *list); | ||
| 1944 | 2003 | ||
| 1945 | /** | 2004 | /** |
| 1946 | * __skb_dequeue_tail - remove from the tail of the queue | 2005 | * __skb_dequeue_tail - remove from the tail of the queue |
| @@ -1950,7 +2009,6 @@ static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list) | |||
| 1950 | * so must be used with appropriate locks held only. The tail item is | 2009 | * so must be used with appropriate locks held only. The tail item is |
| 1951 | * returned or %NULL if the list is empty. | 2010 | * returned or %NULL if the list is empty. |
| 1952 | */ | 2011 | */ |
| 1953 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); | ||
| 1954 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) | 2012 | static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) |
| 1955 | { | 2013 | { |
| 1956 | struct sk_buff *skb = skb_peek_tail(list); | 2014 | struct sk_buff *skb = skb_peek_tail(list); |
| @@ -1958,6 +2016,7 @@ static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list) | |||
| 1958 | __skb_unlink(skb, list); | 2016 | __skb_unlink(skb, list); |
| 1959 | return skb; | 2017 | return skb; |
| 1960 | } | 2018 | } |
| 2019 | struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list); | ||
| 1961 | 2020 | ||
| 1962 | 2021 | ||
| 1963 | static inline bool skb_is_nonlinear(const struct sk_buff *skb) | 2022 | static inline bool skb_is_nonlinear(const struct sk_buff *skb) |
| @@ -2424,8 +2483,7 @@ static inline void skb_pop_mac_header(struct sk_buff *skb) | |||
| 2424 | skb->mac_header = skb->network_header; | 2483 | skb->mac_header = skb->network_header; |
| 2425 | } | 2484 | } |
| 2426 | 2485 | ||
| 2427 | static inline void skb_probe_transport_header(struct sk_buff *skb, | 2486 | static inline void skb_probe_transport_header(struct sk_buff *skb) |
| 2428 | const int offset_hint) | ||
| 2429 | { | 2487 | { |
| 2430 | struct flow_keys_basic keys; | 2488 | struct flow_keys_basic keys; |
| 2431 | 2489 | ||
| @@ -2434,8 +2492,6 @@ static inline void skb_probe_transport_header(struct sk_buff *skb, | |||
| 2434 | 2492 | ||
| 2435 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) | 2493 | if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0)) |
| 2436 | skb_set_transport_header(skb, keys.control.thoff); | 2494 | skb_set_transport_header(skb, keys.control.thoff); |
| 2437 | else | ||
| 2438 | skb_set_transport_header(skb, offset_hint); | ||
| 2439 | } | 2495 | } |
| 2440 | 2496 | ||
| 2441 | static inline void skb_mac_header_rebuild(struct sk_buff *skb) | 2497 | static inline void skb_mac_header_rebuild(struct sk_buff *skb) |
| @@ -2648,13 +2704,13 @@ static inline int skb_orphan_frags_rx(struct sk_buff *skb, gfp_t gfp_mask) | |||
| 2648 | * the list and one reference dropped. This function does not take the | 2704 | * the list and one reference dropped. This function does not take the |
| 2649 | * list lock and the caller must hold the relevant locks to use it. | 2705 | * list lock and the caller must hold the relevant locks to use it. |
| 2650 | */ | 2706 | */ |
| 2651 | void skb_queue_purge(struct sk_buff_head *list); | ||
| 2652 | static inline void __skb_queue_purge(struct sk_buff_head *list) | 2707 | static inline void __skb_queue_purge(struct sk_buff_head *list) |
| 2653 | { | 2708 | { |
| 2654 | struct sk_buff *skb; | 2709 | struct sk_buff *skb; |
| 2655 | while ((skb = __skb_dequeue(list)) != NULL) | 2710 | while ((skb = __skb_dequeue(list)) != NULL) |
| 2656 | kfree_skb(skb); | 2711 | kfree_skb(skb); |
| 2657 | } | 2712 | } |
| 2713 | void skb_queue_purge(struct sk_buff_head *list); | ||
| 2658 | 2714 | ||
| 2659 | unsigned int skb_rbtree_purge(struct rb_root *root); | 2715 | unsigned int skb_rbtree_purge(struct rb_root *root); |
| 2660 | 2716 | ||
| @@ -3023,7 +3079,7 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) | |||
| 3023 | } | 3079 | } |
| 3024 | 3080 | ||
| 3025 | /** | 3081 | /** |
| 3026 | * skb_put_padto - increase size and pad an skbuff up to a minimal size | 3082 | * __skb_put_padto - increase size and pad an skbuff up to a minimal size |
| 3027 | * @skb: buffer to pad | 3083 | * @skb: buffer to pad |
| 3028 | * @len: minimal length | 3084 | * @len: minimal length |
| 3029 | * @free_on_error: free buffer on error | 3085 | * @free_on_error: free buffer on error |
| @@ -3218,6 +3274,7 @@ int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len); | |||
| 3218 | * | 3274 | * |
| 3219 | * This is exactly the same as pskb_trim except that it ensures the | 3275 | * This is exactly the same as pskb_trim except that it ensures the |
| 3220 | * checksum of received packets are still valid after the operation. | 3276 | * checksum of received packets are still valid after the operation. |
| 3277 | * It can change skb pointers. | ||
| 3221 | */ | 3278 | */ |
| 3222 | 3279 | ||
| 3223 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) | 3280 | static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len) |
| @@ -3480,16 +3537,25 @@ static inline ktime_t skb_get_ktime(const struct sk_buff *skb) | |||
| 3480 | /** | 3537 | /** |
| 3481 | * skb_get_timestamp - get timestamp from a skb | 3538 | * skb_get_timestamp - get timestamp from a skb |
| 3482 | * @skb: skb to get stamp from | 3539 | * @skb: skb to get stamp from |
| 3483 | * @stamp: pointer to struct timeval to store stamp in | 3540 | * @stamp: pointer to struct __kernel_old_timeval to store stamp in |
| 3484 | * | 3541 | * |
| 3485 | * Timestamps are stored in the skb as offsets to a base timestamp. | 3542 | * Timestamps are stored in the skb as offsets to a base timestamp. |
| 3486 | * This function converts the offset back to a struct timeval and stores | 3543 | * This function converts the offset back to a struct timeval and stores |
| 3487 | * it in stamp. | 3544 | * it in stamp. |
| 3488 | */ | 3545 | */ |
| 3489 | static inline void skb_get_timestamp(const struct sk_buff *skb, | 3546 | static inline void skb_get_timestamp(const struct sk_buff *skb, |
| 3490 | struct timeval *stamp) | 3547 | struct __kernel_old_timeval *stamp) |
| 3491 | { | 3548 | { |
| 3492 | *stamp = ktime_to_timeval(skb->tstamp); | 3549 | *stamp = ns_to_kernel_old_timeval(skb->tstamp); |
| 3550 | } | ||
| 3551 | |||
| 3552 | static inline void skb_get_new_timestamp(const struct sk_buff *skb, | ||
| 3553 | struct __kernel_sock_timeval *stamp) | ||
| 3554 | { | ||
| 3555 | struct timespec64 ts = ktime_to_timespec64(skb->tstamp); | ||
| 3556 | |||
| 3557 | stamp->tv_sec = ts.tv_sec; | ||
| 3558 | stamp->tv_usec = ts.tv_nsec / 1000; | ||
| 3493 | } | 3559 | } |
| 3494 | 3560 | ||
| 3495 | static inline void skb_get_timestampns(const struct sk_buff *skb, | 3561 | static inline void skb_get_timestampns(const struct sk_buff *skb, |
| @@ -3498,6 +3564,15 @@ static inline void skb_get_timestampns(const struct sk_buff *skb, | |||
| 3498 | *stamp = ktime_to_timespec(skb->tstamp); | 3564 | *stamp = ktime_to_timespec(skb->tstamp); |
| 3499 | } | 3565 | } |
| 3500 | 3566 | ||
| 3567 | static inline void skb_get_new_timestampns(const struct sk_buff *skb, | ||
| 3568 | struct __kernel_timespec *stamp) | ||
| 3569 | { | ||
| 3570 | struct timespec64 ts = ktime_to_timespec64(skb->tstamp); | ||
| 3571 | |||
| 3572 | stamp->tv_sec = ts.tv_sec; | ||
| 3573 | stamp->tv_nsec = ts.tv_nsec; | ||
| 3574 | } | ||
| 3575 | |||
| 3501 | static inline void __net_timestamp(struct sk_buff *skb) | 3576 | static inline void __net_timestamp(struct sk_buff *skb) |
| 3502 | { | 3577 | { |
| 3503 | skb->tstamp = ktime_get_real(); | 3578 | skb->tstamp = ktime_get_real(); |
| @@ -4211,6 +4286,12 @@ static inline bool skb_is_gso_sctp(const struct sk_buff *skb) | |||
| 4211 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; | 4286 | return skb_shinfo(skb)->gso_type & SKB_GSO_SCTP; |
| 4212 | } | 4287 | } |
| 4213 | 4288 | ||
| 4289 | /* Note: Should be called only if skb_is_gso(skb) is true */ | ||
| 4290 | static inline bool skb_is_gso_tcp(const struct sk_buff *skb) | ||
| 4291 | { | ||
| 4292 | return skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6); | ||
| 4293 | } | ||
| 4294 | |||
| 4214 | static inline void skb_gso_reset(struct sk_buff *skb) | 4295 | static inline void skb_gso_reset(struct sk_buff *skb) |
| 4215 | { | 4296 | { |
| 4216 | skb_shinfo(skb)->gso_size = 0; | 4297 | skb_shinfo(skb)->gso_size = 0; |
| @@ -4296,7 +4377,7 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb) | |||
| 4296 | /* Local Checksum Offload. | 4377 | /* Local Checksum Offload. |
| 4297 | * Compute outer checksum based on the assumption that the | 4378 | * Compute outer checksum based on the assumption that the |
| 4298 | * inner checksum will be offloaded later. | 4379 | * inner checksum will be offloaded later. |
| 4299 | * See Documentation/networking/checksum-offloads.txt for | 4380 | * See Documentation/networking/checksum-offloads.rst for |
| 4300 | * explanation of how this works. | 4381 | * explanation of how this works. |
| 4301 | * Fill in outer checksum adjustment (e.g. with sum of outer | 4382 | * Fill in outer checksum adjustment (e.g. with sum of outer |
| 4302 | * pseudo-header) before calling. | 4383 | * pseudo-header) before calling. |
diff --git a/include/linux/slab.h b/include/linux/slab.h index 11b45f7ae405..9449b19c5f10 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h | |||
| @@ -32,6 +32,8 @@ | |||
| 32 | #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) | 32 | #define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U) |
| 33 | /* Use GFP_DMA memory */ | 33 | /* Use GFP_DMA memory */ |
| 34 | #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) | 34 | #define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U) |
| 35 | /* Use GFP_DMA32 memory */ | ||
| 36 | #define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U) | ||
| 35 | /* DEBUG: Store the last owner for bug hunting */ | 37 | /* DEBUG: Store the last owner for bug hunting */ |
| 36 | #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) | 38 | #define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U) |
| 37 | /* Panic if kmem_cache_create() fails */ | 39 | /* Panic if kmem_cache_create() fails */ |
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h index 3a1a1dbc6f49..d2153789bd9f 100644 --- a/include/linux/slub_def.h +++ b/include/linux/slub_def.h | |||
| @@ -81,12 +81,12 @@ struct kmem_cache_order_objects { | |||
| 81 | */ | 81 | */ |
| 82 | struct kmem_cache { | 82 | struct kmem_cache { |
| 83 | struct kmem_cache_cpu __percpu *cpu_slab; | 83 | struct kmem_cache_cpu __percpu *cpu_slab; |
| 84 | /* Used for retriving partial slabs etc */ | 84 | /* Used for retrieving partial slabs, etc. */ |
| 85 | slab_flags_t flags; | 85 | slab_flags_t flags; |
| 86 | unsigned long min_partial; | 86 | unsigned long min_partial; |
| 87 | unsigned int size; /* The size of an object including meta data */ | 87 | unsigned int size; /* The size of an object including metadata */ |
| 88 | unsigned int object_size;/* The size of an object without meta data */ | 88 | unsigned int object_size;/* The size of an object without metadata */ |
| 89 | unsigned int offset; /* Free pointer offset. */ | 89 | unsigned int offset; /* Free pointer offset */ |
| 90 | #ifdef CONFIG_SLUB_CPU_PARTIAL | 90 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
| 91 | /* Number of per cpu partial objects to keep around */ | 91 | /* Number of per cpu partial objects to keep around */ |
| 92 | unsigned int cpu_partial; | 92 | unsigned int cpu_partial; |
| @@ -110,7 +110,7 @@ struct kmem_cache { | |||
| 110 | #endif | 110 | #endif |
| 111 | #ifdef CONFIG_MEMCG | 111 | #ifdef CONFIG_MEMCG |
| 112 | struct memcg_cache_params memcg_params; | 112 | struct memcg_cache_params memcg_params; |
| 113 | /* for propagation, maximum size of a stored attr */ | 113 | /* For propagation, maximum size of a stored attr */ |
| 114 | unsigned int max_attr_size; | 114 | unsigned int max_attr_size; |
| 115 | #ifdef CONFIG_SYSFS | 115 | #ifdef CONFIG_SYSFS |
| 116 | struct kset *memcg_kset; | 116 | struct kset *memcg_kset; |
| @@ -151,7 +151,7 @@ struct kmem_cache { | |||
| 151 | #else | 151 | #else |
| 152 | #define slub_cpu_partial(s) (0) | 152 | #define slub_cpu_partial(s) (0) |
| 153 | #define slub_set_cpu_partial(s, n) | 153 | #define slub_set_cpu_partial(s, n) |
| 154 | #endif // CONFIG_SLUB_CPU_PARTIAL | 154 | #endif /* CONFIG_SLUB_CPU_PARTIAL */ |
| 155 | 155 | ||
| 156 | #ifdef CONFIG_SYSFS | 156 | #ifdef CONFIG_SYSFS |
| 157 | #define SLAB_SUPPORTS_SYSFS | 157 | #define SLAB_SUPPORTS_SYSFS |
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h index 69c285b1c990..eb71a50b8afc 100644 --- a/include/linux/soc/qcom/llcc-qcom.h +++ b/include/linux/soc/qcom/llcc-qcom.h | |||
| @@ -162,6 +162,12 @@ int llcc_slice_deactivate(struct llcc_slice_desc *desc); | |||
| 162 | */ | 162 | */ |
| 163 | int qcom_llcc_probe(struct platform_device *pdev, | 163 | int qcom_llcc_probe(struct platform_device *pdev, |
| 164 | const struct llcc_slice_config *table, u32 sz); | 164 | const struct llcc_slice_config *table, u32 sz); |
| 165 | |||
| 166 | /** | ||
| 167 | * qcom_llcc_remove - remove the sct table | ||
| 168 | * @pdev: Platform device pointer | ||
| 169 | */ | ||
| 170 | int qcom_llcc_remove(struct platform_device *pdev); | ||
| 165 | #else | 171 | #else |
| 166 | static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid) | 172 | static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid) |
| 167 | { | 173 | { |
diff --git a/include/linux/socket.h b/include/linux/socket.h index ab2041a00e01..b57cd8bf96e2 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h | |||
| @@ -26,7 +26,7 @@ typedef __kernel_sa_family_t sa_family_t; | |||
| 26 | /* | 26 | /* |
| 27 | * 1003.1g requires sa_family_t and that sa_data is char. | 27 | * 1003.1g requires sa_family_t and that sa_data is char. |
| 28 | */ | 28 | */ |
| 29 | 29 | ||
| 30 | struct sockaddr { | 30 | struct sockaddr { |
| 31 | sa_family_t sa_family; /* address family, AF_xxx */ | 31 | sa_family_t sa_family; /* address family, AF_xxx */ |
| 32 | char sa_data[14]; /* 14 bytes of protocol address */ | 32 | char sa_data[14]; /* 14 bytes of protocol address */ |
| @@ -44,7 +44,7 @@ struct linger { | |||
| 44 | * system, not 4.3. Thus msg_accrights(len) are now missing. They | 44 | * system, not 4.3. Thus msg_accrights(len) are now missing. They |
| 45 | * belong in an obscure libc emulation or the bin. | 45 | * belong in an obscure libc emulation or the bin. |
| 46 | */ | 46 | */ |
| 47 | 47 | ||
| 48 | struct msghdr { | 48 | struct msghdr { |
| 49 | void *msg_name; /* ptr to socket address structure */ | 49 | void *msg_name; /* ptr to socket address structure */ |
| 50 | int msg_namelen; /* size of socket address structure */ | 50 | int msg_namelen; /* size of socket address structure */ |
| @@ -54,7 +54,7 @@ struct msghdr { | |||
| 54 | unsigned int msg_flags; /* flags on received message */ | 54 | unsigned int msg_flags; /* flags on received message */ |
| 55 | struct kiocb *msg_iocb; /* ptr to iocb for async requests */ | 55 | struct kiocb *msg_iocb; /* ptr to iocb for async requests */ |
| 56 | }; | 56 | }; |
| 57 | 57 | ||
| 58 | struct user_msghdr { | 58 | struct user_msghdr { |
| 59 | void __user *msg_name; /* ptr to socket address structure */ | 59 | void __user *msg_name; /* ptr to socket address structure */ |
| 60 | int msg_namelen; /* size of socket address structure */ | 60 | int msg_namelen; /* size of socket address structure */ |
| @@ -122,7 +122,7 @@ struct cmsghdr { | |||
| 122 | * inside range, given by msg->msg_controllen before using | 122 | * inside range, given by msg->msg_controllen before using |
| 123 | * ancillary object DATA. --ANK (980731) | 123 | * ancillary object DATA. --ANK (980731) |
| 124 | */ | 124 | */ |
| 125 | 125 | ||
| 126 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, | 126 | static inline struct cmsghdr * __cmsg_nxthdr(void *__ctl, __kernel_size_t __size, |
| 127 | struct cmsghdr *__cmsg) | 127 | struct cmsghdr *__cmsg) |
| 128 | { | 128 | { |
| @@ -264,10 +264,10 @@ struct ucred { | |||
| 264 | /* Maximum queue length specifiable by listen. */ | 264 | /* Maximum queue length specifiable by listen. */ |
| 265 | #define SOMAXCONN 128 | 265 | #define SOMAXCONN 128 |
| 266 | 266 | ||
| 267 | /* Flags we can use with send/ and recv. | 267 | /* Flags we can use with send/ and recv. |
| 268 | Added those for 1003.1g not all are supported yet | 268 | Added those for 1003.1g not all are supported yet |
| 269 | */ | 269 | */ |
| 270 | 270 | ||
| 271 | #define MSG_OOB 1 | 271 | #define MSG_OOB 1 |
| 272 | #define MSG_PEEK 2 | 272 | #define MSG_PEEK 2 |
| 273 | #define MSG_DONTROUTE 4 | 273 | #define MSG_DONTROUTE 4 |
| @@ -349,9 +349,17 @@ struct ucred { | |||
| 349 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); | 349 | extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); |
| 350 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); | 350 | extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); |
| 351 | 351 | ||
| 352 | struct timespec64; | ||
| 352 | struct __kernel_timespec; | 353 | struct __kernel_timespec; |
| 353 | struct old_timespec32; | 354 | struct old_timespec32; |
| 354 | 355 | ||
| 356 | struct scm_timestamping_internal { | ||
| 357 | struct timespec64 ts[3]; | ||
| 358 | }; | ||
| 359 | |||
| 360 | extern void put_cmsg_scm_timestamping64(struct msghdr *msg, struct scm_timestamping_internal *tss); | ||
| 361 | extern void put_cmsg_scm_timestamping(struct msghdr *msg, struct scm_timestamping_internal *tss); | ||
| 362 | |||
| 355 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff | 363 | /* The __sys_...msg variants allow MSG_CMSG_COMPAT iff |
| 356 | * forbid_cmsg_compat==false | 364 | * forbid_cmsg_compat==false |
| 357 | */ | 365 | */ |
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h index b0674e330ef6..c1c59473cef9 100644 --- a/include/linux/spi/pxa2xx_spi.h +++ b/include/linux/spi/pxa2xx_spi.h | |||
| @@ -22,7 +22,7 @@ | |||
| 22 | struct dma_chan; | 22 | struct dma_chan; |
| 23 | 23 | ||
| 24 | /* device.platform_data for SSP controller devices */ | 24 | /* device.platform_data for SSP controller devices */ |
| 25 | struct pxa2xx_spi_master { | 25 | struct pxa2xx_spi_controller { |
| 26 | u16 num_chipselect; | 26 | u16 num_chipselect; |
| 27 | u8 enable_dma; | 27 | u8 enable_dma; |
| 28 | bool is_slave; | 28 | bool is_slave; |
| @@ -54,7 +54,7 @@ struct pxa2xx_spi_chip { | |||
| 54 | 54 | ||
| 55 | #include <linux/clk.h> | 55 | #include <linux/clk.h> |
| 56 | 56 | ||
| 57 | extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_master *info); | 57 | extern void pxa2xx_set_spi_info(unsigned id, struct pxa2xx_spi_controller *info); |
| 58 | 58 | ||
| 59 | #endif | 59 | #endif |
| 60 | #endif | 60 | #endif |
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h index 3fe24500c5ee..3703d0dcac2e 100644 --- a/include/linux/spi/spi-mem.h +++ b/include/linux/spi/spi-mem.h | |||
| @@ -330,6 +330,11 @@ ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc, | |||
| 330 | u64 offs, size_t len, void *buf); | 330 | u64 offs, size_t len, void *buf); |
| 331 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, | 331 | ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc, |
| 332 | u64 offs, size_t len, const void *buf); | 332 | u64 offs, size_t len, const void *buf); |
| 333 | struct spi_mem_dirmap_desc * | ||
| 334 | devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem, | ||
| 335 | const struct spi_mem_dirmap_info *info); | ||
| 336 | void devm_spi_mem_dirmap_destroy(struct device *dev, | ||
| 337 | struct spi_mem_dirmap_desc *desc); | ||
| 333 | 338 | ||
| 334 | int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, | 339 | int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv, |
| 335 | struct module *owner); | 340 | struct module *owner); |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index 314d922ca607..662b336aa2e4 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/kthread.h> | 12 | #include <linux/kthread.h> |
| 13 | #include <linux/completion.h> | 13 | #include <linux/completion.h> |
| 14 | #include <linux/scatterlist.h> | 14 | #include <linux/scatterlist.h> |
| 15 | #include <linux/gpio/consumer.h> | ||
| 15 | 16 | ||
| 16 | struct dma_chan; | 17 | struct dma_chan; |
| 17 | struct property_entry; | 18 | struct property_entry; |
| @@ -116,8 +117,13 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats, | |||
| 116 | * @modalias: Name of the driver to use with this device, or an alias | 117 | * @modalias: Name of the driver to use with this device, or an alias |
| 117 | * for that name. This appears in the sysfs "modalias" attribute | 118 | * for that name. This appears in the sysfs "modalias" attribute |
| 118 | * for driver coldplugging, and in uevents used for hotplugging | 119 | * for driver coldplugging, and in uevents used for hotplugging |
| 119 | * @cs_gpio: gpio number of the chipselect line (optional, -ENOENT when | 120 | * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when |
| 121 | * not using a GPIO line) use cs_gpiod in new drivers by opting in on | ||
| 122 | * the spi_master. | ||
| 123 | * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when | ||
| 120 | * not using a GPIO line) | 124 | * not using a GPIO line) |
| 125 | * @word_delay_usecs: microsecond delay to be inserted between consecutive | ||
| 126 | * words of a transfer | ||
| 121 | * | 127 | * |
| 122 | * @statistics: statistics for the spi_device | 128 | * @statistics: statistics for the spi_device |
| 123 | * | 129 | * |
| @@ -163,7 +169,9 @@ struct spi_device { | |||
| 163 | void *controller_data; | 169 | void *controller_data; |
| 164 | char modalias[SPI_NAME_SIZE]; | 170 | char modalias[SPI_NAME_SIZE]; |
| 165 | const char *driver_override; | 171 | const char *driver_override; |
| 166 | int cs_gpio; /* chip select gpio */ | 172 | int cs_gpio; /* LEGACY: chip select gpio */ |
| 173 | struct gpio_desc *cs_gpiod; /* chip select gpio desc */ | ||
| 174 | uint8_t word_delay_usecs; /* inter-word delay */ | ||
| 167 | 175 | ||
| 168 | /* the statistics */ | 176 | /* the statistics */ |
| 169 | struct spi_statistics statistics; | 177 | struct spi_statistics statistics; |
| @@ -376,9 +384,17 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv) | |||
| 376 | * controller has native support for memory like operations. | 384 | * controller has native support for memory like operations. |
| 377 | * @unprepare_message: undo any work done by prepare_message(). | 385 | * @unprepare_message: undo any work done by prepare_message(). |
| 378 | * @slave_abort: abort the ongoing transfer request on an SPI slave controller | 386 | * @slave_abort: abort the ongoing transfer request on an SPI slave controller |
| 379 | * @cs_gpios: Array of GPIOs to use as chip select lines; one per CS | 387 | * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per |
| 380 | * number. Any individual value may be -ENOENT for CS lines that | 388 | * CS number. Any individual value may be -ENOENT for CS lines that |
| 389 | * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods | ||
| 390 | * in new drivers. | ||
| 391 | * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS | ||
| 392 | * number. Any individual value may be NULL for CS lines that | ||
| 381 | * are not GPIOs (driven by the SPI controller itself). | 393 | * are not GPIOs (driven by the SPI controller itself). |
| 394 | * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab | ||
| 395 | * GPIO descriptors rather than using global GPIO numbers grabbed by the | ||
| 396 | * driver. This will fill in @cs_gpiods and @cs_gpios should not be used, | ||
| 397 | * and SPI devices will have the cs_gpiod assigned rather than cs_gpio. | ||
| 382 | * @statistics: statistics for the spi_controller | 398 | * @statistics: statistics for the spi_controller |
| 383 | * @dma_tx: DMA transmit channel | 399 | * @dma_tx: DMA transmit channel |
| 384 | * @dma_rx: DMA receive channel | 400 | * @dma_rx: DMA receive channel |
| @@ -557,6 +573,8 @@ struct spi_controller { | |||
| 557 | 573 | ||
| 558 | /* gpio chip select */ | 574 | /* gpio chip select */ |
| 559 | int *cs_gpios; | 575 | int *cs_gpios; |
| 576 | struct gpio_desc **cs_gpiods; | ||
| 577 | bool use_gpio_descriptors; | ||
| 560 | 578 | ||
| 561 | /* statistics */ | 579 | /* statistics */ |
| 562 | struct spi_statistics statistics; | 580 | struct spi_statistics statistics; |
| @@ -706,6 +724,8 @@ extern void spi_res_release(struct spi_controller *ctlr, | |||
| 706 | * @delay_usecs: microseconds to delay after this transfer before | 724 | * @delay_usecs: microseconds to delay after this transfer before |
| 707 | * (optionally) changing the chipselect status, then starting | 725 | * (optionally) changing the chipselect status, then starting |
| 708 | * the next transfer or completing this @spi_message. | 726 | * the next transfer or completing this @spi_message. |
| 727 | * @word_delay_usecs: microseconds to inter word delay after each word size | ||
| 728 | * (set by bits_per_word) transmission. | ||
| 709 | * @word_delay: clock cycles to inter word delay after each word size | 729 | * @word_delay: clock cycles to inter word delay after each word size |
| 710 | * (set by bits_per_word) transmission. | 730 | * (set by bits_per_word) transmission. |
| 711 | * @transfer_list: transfers are sequenced through @spi_message.transfers | 731 | * @transfer_list: transfers are sequenced through @spi_message.transfers |
| @@ -788,6 +808,7 @@ struct spi_transfer { | |||
| 788 | #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ | 808 | #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */ |
| 789 | #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ | 809 | #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */ |
| 790 | u8 bits_per_word; | 810 | u8 bits_per_word; |
| 811 | u8 word_delay_usecs; | ||
| 791 | u16 delay_usecs; | 812 | u16 delay_usecs; |
| 792 | u32 speed_hz; | 813 | u32 speed_hz; |
| 793 | u16 word_delay; | 814 | u16 word_delay; |
diff --git a/include/linux/srcu.h b/include/linux/srcu.h index c614375cd264..c495b2d51569 100644 --- a/include/linux/srcu.h +++ b/include/linux/srcu.h | |||
| @@ -1,24 +1,11 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion | 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright (C) IBM Corporation, 2006 | 5 | * Copyright (C) IBM Corporation, 2006 |
| 19 | * Copyright (C) Fujitsu, 2012 | 6 | * Copyright (C) Fujitsu, 2012 |
| 20 | * | 7 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> | 8 | * Author: Paul McKenney <paulmck@linux.ibm.com> |
| 22 | * Lai Jiangshan <laijs@cn.fujitsu.com> | 9 | * Lai Jiangshan <laijs@cn.fujitsu.com> |
| 23 | * | 10 | * |
| 24 | * For detailed explanation of Read-Copy Update mechanism see - | 11 | * For detailed explanation of Read-Copy Update mechanism see - |
| @@ -223,6 +210,7 @@ srcu_read_lock_notrace(struct srcu_struct *ssp) __acquires(ssp) | |||
| 223 | static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) | 210 | static inline void srcu_read_unlock(struct srcu_struct *ssp, int idx) |
| 224 | __releases(ssp) | 211 | __releases(ssp) |
| 225 | { | 212 | { |
| 213 | WARN_ON_ONCE(idx & ~0x1); | ||
| 226 | rcu_lock_release(&(ssp)->dep_map); | 214 | rcu_lock_release(&(ssp)->dep_map); |
| 227 | __srcu_read_unlock(ssp, idx); | 215 | __srcu_read_unlock(ssp, idx); |
| 228 | } | 216 | } |
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h index b19216aaaef2..5a5a1941ca15 100644 --- a/include/linux/srcutiny.h +++ b/include/linux/srcutiny.h | |||
| @@ -1,24 +1,11 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion, | 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion, |
| 3 | * tiny variant. | 4 | * tiny variant. |
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, you can access it online at | ||
| 17 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 18 | * | ||
| 19 | * Copyright (C) IBM Corporation, 2017 | 6 | * Copyright (C) IBM Corporation, 2017 |
| 20 | * | 7 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> | 8 | * Author: Paul McKenney <paulmck@linux.ibm.com> |
| 22 | */ | 9 | */ |
| 23 | 10 | ||
| 24 | #ifndef _LINUX_SRCU_TINY_H | 11 | #ifndef _LINUX_SRCU_TINY_H |
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h index 6f292bd3e7db..7f7c8c050f63 100644 --- a/include/linux/srcutree.h +++ b/include/linux/srcutree.h | |||
| @@ -1,24 +1,11 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Sleepable Read-Copy Update mechanism for mutual exclusion, | 3 | * Sleepable Read-Copy Update mechanism for mutual exclusion, |
| 3 | * tree variant. | 4 | * tree variant. |
| 4 | * | 5 | * |
| 5 | * This program is free software; you can redistribute it and/or modify | ||
| 6 | * it under the terms of the GNU General Public License as published by | ||
| 7 | * the Free Software Foundation; either version 2 of the License, or | ||
| 8 | * (at your option) any later version. | ||
| 9 | * | ||
| 10 | * This program is distributed in the hope that it will be useful, | ||
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 13 | * GNU General Public License for more details. | ||
| 14 | * | ||
| 15 | * You should have received a copy of the GNU General Public License | ||
| 16 | * along with this program; if not, you can access it online at | ||
| 17 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 18 | * | ||
| 19 | * Copyright (C) IBM Corporation, 2017 | 6 | * Copyright (C) IBM Corporation, 2017 |
| 20 | * | 7 | * |
| 21 | * Author: Paul McKenney <paulmck@us.ibm.com> | 8 | * Author: Paul McKenney <paulmck@linux.ibm.com> |
| 22 | */ | 9 | */ |
| 23 | 10 | ||
| 24 | #ifndef _LINUX_SRCU_TREE_H | 11 | #ifndef _LINUX_SRCU_TREE_H |
| @@ -45,7 +32,8 @@ struct srcu_data { | |||
| 45 | unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ | 32 | unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */ |
| 46 | unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ | 33 | unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */ |
| 47 | bool srcu_cblist_invoking; /* Invoking these CBs? */ | 34 | bool srcu_cblist_invoking; /* Invoking these CBs? */ |
| 48 | struct delayed_work work; /* Context for CB invoking. */ | 35 | struct timer_list delay_work; /* Delay for CB invoking */ |
| 36 | struct work_struct work; /* Context for CB invoking. */ | ||
| 49 | struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ | 37 | struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */ |
| 50 | struct srcu_node *mynode; /* Leaf srcu_node. */ | 38 | struct srcu_node *mynode; /* Leaf srcu_node. */ |
| 51 | unsigned long grpmask; /* Mask for leaf srcu_node */ | 39 | unsigned long grpmask; /* Mask for leaf srcu_node */ |
diff --git a/include/linux/statfs.h b/include/linux/statfs.h index 3142e98546ac..9bc69edb8f18 100644 --- a/include/linux/statfs.h +++ b/include/linux/statfs.h | |||
| @@ -41,4 +41,7 @@ struct kstatfs { | |||
| 41 | #define ST_NODIRATIME 0x0800 /* do not update directory access times */ | 41 | #define ST_NODIRATIME 0x0800 /* do not update directory access times */ |
| 42 | #define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ | 42 | #define ST_RELATIME 0x1000 /* update atime relative to mtime/ctime */ |
| 43 | 43 | ||
| 44 | struct dentry; | ||
| 45 | extern int vfs_get_fsid(struct dentry *dentry, __kernel_fsid_t *fsid); | ||
| 46 | |||
| 44 | #endif | 47 | #endif |
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h index 7ddfc65586b0..4335bd771ce5 100644 --- a/include/linux/stmmac.h +++ b/include/linux/stmmac.h | |||
| @@ -184,6 +184,7 @@ struct plat_stmmacenet_data { | |||
| 184 | struct clk *pclk; | 184 | struct clk *pclk; |
| 185 | struct clk *clk_ptp_ref; | 185 | struct clk *clk_ptp_ref; |
| 186 | unsigned int clk_ptp_rate; | 186 | unsigned int clk_ptp_rate; |
| 187 | unsigned int clk_ref_rate; | ||
| 187 | struct reset_control *stmmac_rst; | 188 | struct reset_control *stmmac_rst; |
| 188 | struct stmmac_axi *axi; | 189 | struct stmmac_axi *axi; |
| 189 | int has_gmac4; | 190 | int has_gmac4; |
diff --git a/include/linux/string.h b/include/linux/string.h index 7927b875f80c..6ab0a6fa512e 100644 --- a/include/linux/string.h +++ b/include/linux/string.h | |||
| @@ -150,6 +150,9 @@ extern void * memscan(void *,int,__kernel_size_t); | |||
| 150 | #ifndef __HAVE_ARCH_MEMCMP | 150 | #ifndef __HAVE_ARCH_MEMCMP |
| 151 | extern int memcmp(const void *,const void *,__kernel_size_t); | 151 | extern int memcmp(const void *,const void *,__kernel_size_t); |
| 152 | #endif | 152 | #endif |
| 153 | #ifndef __HAVE_ARCH_BCMP | ||
| 154 | extern int bcmp(const void *,const void *,__kernel_size_t); | ||
| 155 | #endif | ||
| 153 | #ifndef __HAVE_ARCH_MEMCHR | 156 | #ifndef __HAVE_ARCH_MEMCHR |
| 154 | extern void * memchr(const void *,int,__kernel_size_t); | 157 | extern void * memchr(const void *,int,__kernel_size_t); |
| 155 | #endif | 158 | #endif |
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h index eed3cb16ccf1..5f9076fdb090 100644 --- a/include/linux/sunrpc/auth.h +++ b/include/linux/sunrpc/auth.h | |||
| @@ -74,14 +74,12 @@ struct rpc_cred_cache; | |||
| 74 | struct rpc_authops; | 74 | struct rpc_authops; |
| 75 | struct rpc_auth { | 75 | struct rpc_auth { |
| 76 | unsigned int au_cslack; /* call cred size estimate */ | 76 | unsigned int au_cslack; /* call cred size estimate */ |
| 77 | /* guess at number of u32's auth adds before | 77 | unsigned int au_rslack; /* reply cred size estimate */ |
| 78 | * reply data; normally the verifier size: */ | 78 | unsigned int au_verfsize; /* size of reply verifier */ |
| 79 | unsigned int au_rslack; | 79 | unsigned int au_ralign; /* words before UL header */ |
| 80 | /* for gss, used to calculate au_rslack: */ | 80 | |
| 81 | unsigned int au_verfsize; | 81 | unsigned int au_flags; |
| 82 | 82 | const struct rpc_authops *au_ops; | |
| 83 | unsigned int au_flags; /* various flags */ | ||
| 84 | const struct rpc_authops *au_ops; /* operations */ | ||
| 85 | rpc_authflavor_t au_flavor; /* pseudoflavor (note may | 83 | rpc_authflavor_t au_flavor; /* pseudoflavor (note may |
| 86 | * differ from the flavor in | 84 | * differ from the flavor in |
| 87 | * au_ops->au_flavor in gss | 85 | * au_ops->au_flavor in gss |
| @@ -131,13 +129,15 @@ struct rpc_credops { | |||
| 131 | void (*crdestroy)(struct rpc_cred *); | 129 | void (*crdestroy)(struct rpc_cred *); |
| 132 | 130 | ||
| 133 | int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); | 131 | int (*crmatch)(struct auth_cred *, struct rpc_cred *, int); |
| 134 | __be32 * (*crmarshal)(struct rpc_task *, __be32 *); | 132 | int (*crmarshal)(struct rpc_task *task, |
| 133 | struct xdr_stream *xdr); | ||
| 135 | int (*crrefresh)(struct rpc_task *); | 134 | int (*crrefresh)(struct rpc_task *); |
| 136 | __be32 * (*crvalidate)(struct rpc_task *, __be32 *); | 135 | int (*crvalidate)(struct rpc_task *task, |
| 137 | int (*crwrap_req)(struct rpc_task *, kxdreproc_t, | 136 | struct xdr_stream *xdr); |
| 138 | void *, __be32 *, void *); | 137 | int (*crwrap_req)(struct rpc_task *task, |
| 139 | int (*crunwrap_resp)(struct rpc_task *, kxdrdproc_t, | 138 | struct xdr_stream *xdr); |
| 140 | void *, __be32 *, void *); | 139 | int (*crunwrap_resp)(struct rpc_task *task, |
| 140 | struct xdr_stream *xdr); | ||
| 141 | int (*crkey_timeout)(struct rpc_cred *); | 141 | int (*crkey_timeout)(struct rpc_cred *); |
| 142 | char * (*crstringify_acceptor)(struct rpc_cred *); | 142 | char * (*crstringify_acceptor)(struct rpc_cred *); |
| 143 | bool (*crneed_reencode)(struct rpc_task *); | 143 | bool (*crneed_reencode)(struct rpc_task *); |
| @@ -165,10 +165,18 @@ struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred * | |||
| 165 | void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); | 165 | void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *); |
| 166 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); | 166 | struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int); |
| 167 | void put_rpccred(struct rpc_cred *); | 167 | void put_rpccred(struct rpc_cred *); |
| 168 | __be32 * rpcauth_marshcred(struct rpc_task *, __be32 *); | 168 | int rpcauth_marshcred(struct rpc_task *task, |
| 169 | __be32 * rpcauth_checkverf(struct rpc_task *, __be32 *); | 169 | struct xdr_stream *xdr); |
| 170 | int rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp, __be32 *data, void *obj); | 170 | int rpcauth_checkverf(struct rpc_task *task, |
| 171 | int rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp, __be32 *data, void *obj); | 171 | struct xdr_stream *xdr); |
| 172 | int rpcauth_wrap_req_encode(struct rpc_task *task, | ||
| 173 | struct xdr_stream *xdr); | ||
| 174 | int rpcauth_wrap_req(struct rpc_task *task, | ||
| 175 | struct xdr_stream *xdr); | ||
| 176 | int rpcauth_unwrap_resp_decode(struct rpc_task *task, | ||
| 177 | struct xdr_stream *xdr); | ||
| 178 | int rpcauth_unwrap_resp(struct rpc_task *task, | ||
| 179 | struct xdr_stream *xdr); | ||
| 172 | bool rpcauth_xmit_need_reencode(struct rpc_task *task); | 180 | bool rpcauth_xmit_need_reencode(struct rpc_task *task); |
| 173 | int rpcauth_refreshcred(struct rpc_task *); | 181 | int rpcauth_refreshcred(struct rpc_task *); |
| 174 | void rpcauth_invalcred(struct rpc_task *); | 182 | void rpcauth_invalcred(struct rpc_task *); |
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h index 1c441714d569..98bc9883b230 100644 --- a/include/linux/sunrpc/clnt.h +++ b/include/linux/sunrpc/clnt.h | |||
| @@ -169,6 +169,9 @@ int rpcb_v4_register(struct net *net, const u32 program, | |||
| 169 | const char *netid); | 169 | const char *netid); |
| 170 | void rpcb_getport_async(struct rpc_task *); | 170 | void rpcb_getport_async(struct rpc_task *); |
| 171 | 171 | ||
| 172 | void rpc_prepare_reply_pages(struct rpc_rqst *req, struct page **pages, | ||
| 173 | unsigned int base, unsigned int len, | ||
| 174 | unsigned int hdrsize); | ||
| 172 | void rpc_call_start(struct rpc_task *); | 175 | void rpc_call_start(struct rpc_task *); |
| 173 | int rpc_call_async(struct rpc_clnt *clnt, | 176 | int rpc_call_async(struct rpc_clnt *clnt, |
| 174 | const struct rpc_message *msg, int flags, | 177 | const struct rpc_message *msg, int flags, |
diff --git a/include/linux/sunrpc/gss_krb5_enctypes.h b/include/linux/sunrpc/gss_krb5_enctypes.h index ec6234eee89c..981c89cef19d 100644 --- a/include/linux/sunrpc/gss_krb5_enctypes.h +++ b/include/linux/sunrpc/gss_krb5_enctypes.h | |||
| @@ -1,4 +1,44 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
| 1 | /* | 2 | /* |
| 2 | * Dumb way to share this static piece of information with nfsd | 3 | * Define the string that exports the set of kernel-supported |
| 4 | * Kerberos enctypes. This list is sent via upcall to gssd, and | ||
| 5 | * is also exposed via the nfsd /proc API. The consumers generally | ||
| 6 | * treat this as an ordered list, where the first item in the list | ||
| 7 | * is the most preferred. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #ifndef _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H | ||
| 11 | #define _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H | ||
| 12 | |||
| 13 | #ifdef CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES | ||
| 14 | |||
| 15 | /* | ||
| 16 | * NB: This list includes encryption types that were deprecated | ||
| 17 | * by RFC 8429 (DES3_CBC_SHA1 and ARCFOUR_HMAC). | ||
| 18 | * | ||
| 19 | * ENCTYPE_AES256_CTS_HMAC_SHA1_96 | ||
| 20 | * ENCTYPE_AES128_CTS_HMAC_SHA1_96 | ||
| 21 | * ENCTYPE_DES3_CBC_SHA1 | ||
| 22 | * ENCTYPE_ARCFOUR_HMAC | ||
| 23 | */ | ||
| 24 | #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23" | ||
| 25 | |||
| 26 | #else /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ | ||
| 27 | |||
| 28 | /* | ||
| 29 | * NB: This list includes encryption types that were deprecated | ||
| 30 | * by RFC 8429 and RFC 6649. | ||
| 31 | * | ||
| 32 | * ENCTYPE_AES256_CTS_HMAC_SHA1_96 | ||
| 33 | * ENCTYPE_AES128_CTS_HMAC_SHA1_96 | ||
| 34 | * ENCTYPE_DES3_CBC_SHA1 | ||
| 35 | * ENCTYPE_ARCFOUR_HMAC | ||
| 36 | * ENCTYPE_DES_CBC_MD5 | ||
| 37 | * ENCTYPE_DES_CBC_CRC | ||
| 38 | * ENCTYPE_DES_CBC_MD4 | ||
| 3 | */ | 39 | */ |
| 4 | #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" | 40 | #define KRB5_SUPPORTED_ENCTYPES "18,17,16,23,3,1,2" |
| 41 | |||
| 42 | #endif /* CONFIG_SUNRPC_DISABLE_INSECURE_ENCTYPES */ | ||
| 43 | |||
| 44 | #endif /* _LINUX_SUNRPC_GSS_KRB5_ENCTYPES_H */ | ||
diff --git a/include/linux/sunrpc/sched.h b/include/linux/sunrpc/sched.h index 219aa3910a0c..52d41d0c1ae1 100644 --- a/include/linux/sunrpc/sched.h +++ b/include/linux/sunrpc/sched.h | |||
| @@ -97,6 +97,7 @@ typedef void (*rpc_action)(struct rpc_task *); | |||
| 97 | 97 | ||
| 98 | struct rpc_call_ops { | 98 | struct rpc_call_ops { |
| 99 | void (*rpc_call_prepare)(struct rpc_task *, void *); | 99 | void (*rpc_call_prepare)(struct rpc_task *, void *); |
| 100 | void (*rpc_call_prepare_transmit)(struct rpc_task *, void *); | ||
| 100 | void (*rpc_call_done)(struct rpc_task *, void *); | 101 | void (*rpc_call_done)(struct rpc_task *, void *); |
| 101 | void (*rpc_count_stats)(struct rpc_task *, void *); | 102 | void (*rpc_count_stats)(struct rpc_task *, void *); |
| 102 | void (*rpc_release)(void *); | 103 | void (*rpc_release)(void *); |
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h index 2ec128060239..9ee3970ba59c 100644 --- a/include/linux/sunrpc/xdr.h +++ b/include/linux/sunrpc/xdr.h | |||
| @@ -87,6 +87,16 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) | |||
| 87 | #define xdr_one cpu_to_be32(1) | 87 | #define xdr_one cpu_to_be32(1) |
| 88 | #define xdr_two cpu_to_be32(2) | 88 | #define xdr_two cpu_to_be32(2) |
| 89 | 89 | ||
| 90 | #define rpc_auth_null cpu_to_be32(RPC_AUTH_NULL) | ||
| 91 | #define rpc_auth_unix cpu_to_be32(RPC_AUTH_UNIX) | ||
| 92 | #define rpc_auth_short cpu_to_be32(RPC_AUTH_SHORT) | ||
| 93 | #define rpc_auth_gss cpu_to_be32(RPC_AUTH_GSS) | ||
| 94 | |||
| 95 | #define rpc_call cpu_to_be32(RPC_CALL) | ||
| 96 | #define rpc_reply cpu_to_be32(RPC_REPLY) | ||
| 97 | |||
| 98 | #define rpc_msg_accepted cpu_to_be32(RPC_MSG_ACCEPTED) | ||
| 99 | |||
| 90 | #define rpc_success cpu_to_be32(RPC_SUCCESS) | 100 | #define rpc_success cpu_to_be32(RPC_SUCCESS) |
| 91 | #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) | 101 | #define rpc_prog_unavail cpu_to_be32(RPC_PROG_UNAVAIL) |
| 92 | #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) | 102 | #define rpc_prog_mismatch cpu_to_be32(RPC_PROG_MISMATCH) |
| @@ -95,6 +105,9 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) | |||
| 95 | #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) | 105 | #define rpc_system_err cpu_to_be32(RPC_SYSTEM_ERR) |
| 96 | #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) | 106 | #define rpc_drop_reply cpu_to_be32(RPC_DROP_REPLY) |
| 97 | 107 | ||
| 108 | #define rpc_mismatch cpu_to_be32(RPC_MISMATCH) | ||
| 109 | #define rpc_auth_error cpu_to_be32(RPC_AUTH_ERROR) | ||
| 110 | |||
| 98 | #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) | 111 | #define rpc_auth_ok cpu_to_be32(RPC_AUTH_OK) |
| 99 | #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) | 112 | #define rpc_autherr_badcred cpu_to_be32(RPC_AUTH_BADCRED) |
| 100 | #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) | 113 | #define rpc_autherr_rejectedcred cpu_to_be32(RPC_AUTH_REJECTEDCRED) |
| @@ -103,7 +116,6 @@ xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) | |||
| 103 | #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) | 116 | #define rpc_autherr_tooweak cpu_to_be32(RPC_AUTH_TOOWEAK) |
| 104 | #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) | 117 | #define rpcsec_gsserr_credproblem cpu_to_be32(RPCSEC_GSS_CREDPROBLEM) |
| 105 | #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) | 118 | #define rpcsec_gsserr_ctxproblem cpu_to_be32(RPCSEC_GSS_CTXPROBLEM) |
| 106 | #define rpc_autherr_oldseqnum cpu_to_be32(101) | ||
| 107 | 119 | ||
| 108 | /* | 120 | /* |
| 109 | * Miscellaneous XDR helper functions | 121 | * Miscellaneous XDR helper functions |
| @@ -167,7 +179,6 @@ xdr_adjust_iovec(struct kvec *iov, __be32 *p) | |||
| 167 | extern void xdr_shift_buf(struct xdr_buf *, size_t); | 179 | extern void xdr_shift_buf(struct xdr_buf *, size_t); |
| 168 | extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); | 180 | extern void xdr_buf_from_iov(struct kvec *, struct xdr_buf *); |
| 169 | extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); | 181 | extern int xdr_buf_subsegment(struct xdr_buf *, struct xdr_buf *, unsigned int, unsigned int); |
| 170 | extern void xdr_buf_trim(struct xdr_buf *, unsigned int); | ||
| 171 | extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); | 182 | extern int xdr_buf_read_netobj(struct xdr_buf *, struct xdr_netobj *, unsigned int); |
| 172 | extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); | 183 | extern int read_bytes_from_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); |
| 173 | extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); | 184 | extern int write_bytes_to_xdr_buf(struct xdr_buf *, unsigned int, void *, unsigned int); |
| @@ -217,6 +228,8 @@ struct xdr_stream { | |||
| 217 | struct kvec scratch; /* Scratch buffer */ | 228 | struct kvec scratch; /* Scratch buffer */ |
| 218 | struct page **page_ptr; /* pointer to the current page */ | 229 | struct page **page_ptr; /* pointer to the current page */ |
| 219 | unsigned int nwords; /* Remaining decode buffer length */ | 230 | unsigned int nwords; /* Remaining decode buffer length */ |
| 231 | |||
| 232 | struct rpc_rqst *rqst; /* For debugging */ | ||
| 220 | }; | 233 | }; |
| 221 | 234 | ||
| 222 | /* | 235 | /* |
| @@ -227,7 +240,8 @@ typedef void (*kxdreproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, | |||
| 227 | typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, | 240 | typedef int (*kxdrdproc_t)(struct rpc_rqst *rqstp, struct xdr_stream *xdr, |
| 228 | void *obj); | 241 | void *obj); |
| 229 | 242 | ||
| 230 | extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); | 243 | extern void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, |
| 244 | __be32 *p, struct rpc_rqst *rqst); | ||
| 231 | extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); | 245 | extern __be32 *xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes); |
| 232 | extern void xdr_commit_encode(struct xdr_stream *xdr); | 246 | extern void xdr_commit_encode(struct xdr_stream *xdr); |
| 233 | extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); | 247 | extern void xdr_truncate_encode(struct xdr_stream *xdr, size_t len); |
| @@ -235,7 +249,8 @@ extern int xdr_restrict_buflen(struct xdr_stream *xdr, int newbuflen); | |||
| 235 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, | 249 | extern void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, |
| 236 | unsigned int base, unsigned int len); | 250 | unsigned int base, unsigned int len); |
| 237 | extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); | 251 | extern unsigned int xdr_stream_pos(const struct xdr_stream *xdr); |
| 238 | extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p); | 252 | extern void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, |
| 253 | __be32 *p, struct rpc_rqst *rqst); | ||
| 239 | extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, | 254 | extern void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf, |
| 240 | struct page **pages, unsigned int len); | 255 | struct page **pages, unsigned int len); |
| 241 | extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); | 256 | extern void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen); |
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h index ad7e910b119d..3a391544299e 100644 --- a/include/linux/sunrpc/xprt.h +++ b/include/linux/sunrpc/xprt.h | |||
| @@ -196,8 +196,6 @@ struct rpc_xprt { | |||
| 196 | 196 | ||
| 197 | size_t max_payload; /* largest RPC payload size, | 197 | size_t max_payload; /* largest RPC payload size, |
| 198 | in bytes */ | 198 | in bytes */ |
| 199 | unsigned int tsh_size; /* size of transport specific | ||
| 200 | header */ | ||
| 201 | 199 | ||
| 202 | struct rpc_wait_queue binding; /* requests waiting on rpcbind */ | 200 | struct rpc_wait_queue binding; /* requests waiting on rpcbind */ |
| 203 | struct rpc_wait_queue sending; /* requests waiting to send */ | 201 | struct rpc_wait_queue sending; /* requests waiting to send */ |
| @@ -362,11 +360,6 @@ struct rpc_xprt * xprt_alloc(struct net *net, size_t size, | |||
| 362 | unsigned int max_req); | 360 | unsigned int max_req); |
| 363 | void xprt_free(struct rpc_xprt *); | 361 | void xprt_free(struct rpc_xprt *); |
| 364 | 362 | ||
| 365 | static inline __be32 *xprt_skip_transport_header(struct rpc_xprt *xprt, __be32 *p) | ||
| 366 | { | ||
| 367 | return p + xprt->tsh_size; | ||
| 368 | } | ||
| 369 | |||
| 370 | static inline int | 363 | static inline int |
| 371 | xprt_enable_swap(struct rpc_xprt *xprt) | 364 | xprt_enable_swap(struct rpc_xprt *xprt) |
| 372 | { | 365 | { |
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h index 458bfe0137f5..b81d0b3e0799 100644 --- a/include/linux/sunrpc/xprtsock.h +++ b/include/linux/sunrpc/xprtsock.h | |||
| @@ -26,6 +26,7 @@ struct sock_xprt { | |||
| 26 | */ | 26 | */ |
| 27 | struct socket * sock; | 27 | struct socket * sock; |
| 28 | struct sock * inet; | 28 | struct sock * inet; |
| 29 | struct file * file; | ||
| 29 | 30 | ||
| 30 | /* | 31 | /* |
| 31 | * State of TCP reply receive | 32 | * State of TCP reply receive |
diff --git a/include/linux/swap.h b/include/linux/swap.h index 622025ac1461..4bfb5c4ac108 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
| @@ -157,9 +157,9 @@ struct swap_extent { | |||
| 157 | /* | 157 | /* |
| 158 | * Max bad pages in the new format.. | 158 | * Max bad pages in the new format.. |
| 159 | */ | 159 | */ |
| 160 | #define __swapoffset(x) ((unsigned long)&((union swap_header *)0)->x) | ||
| 161 | #define MAX_SWAP_BADPAGES \ | 160 | #define MAX_SWAP_BADPAGES \ |
| 162 | ((__swapoffset(magic.magic) - __swapoffset(info.badpages)) / sizeof(int)) | 161 | ((offsetof(union swap_header, magic.magic) - \ |
| 162 | offsetof(union swap_header, info.badpages)) / sizeof(int)) | ||
| 163 | 163 | ||
| 164 | enum { | 164 | enum { |
| 165 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ | 165 | SWP_USED = (1 << 0), /* is slot in swap_info[] used? */ |
| @@ -307,7 +307,7 @@ struct vma_swap_readahead { | |||
| 307 | }; | 307 | }; |
| 308 | 308 | ||
| 309 | /* linux/mm/workingset.c */ | 309 | /* linux/mm/workingset.c */ |
| 310 | void *workingset_eviction(struct address_space *mapping, struct page *page); | 310 | void *workingset_eviction(struct page *page); |
| 311 | void workingset_refault(struct page *page, void *shadow); | 311 | void workingset_refault(struct page *page, void *shadow); |
| 312 | void workingset_activation(struct page *page); | 312 | void workingset_activation(struct page *page); |
| 313 | 313 | ||
| @@ -625,7 +625,7 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg) | |||
| 625 | return vm_swappiness; | 625 | return vm_swappiness; |
| 626 | 626 | ||
| 627 | /* root ? */ | 627 | /* root ? */ |
| 628 | if (mem_cgroup_disabled() || !memcg->css.parent) | 628 | if (mem_cgroup_disabled() || mem_cgroup_is_root(memcg)) |
| 629 | return vm_swappiness; | 629 | return vm_swappiness; |
| 630 | 630 | ||
| 631 | return memcg->swappiness; | 631 | return memcg->swappiness; |
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h index 7c007ed7505f..361f62bb4a8e 100644 --- a/include/linux/swiotlb.h +++ b/include/linux/swiotlb.h | |||
| @@ -60,9 +60,6 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev, | |||
| 60 | size_t size, enum dma_data_direction dir, | 60 | size_t size, enum dma_data_direction dir, |
| 61 | enum dma_sync_target target); | 61 | enum dma_sync_target target); |
| 62 | 62 | ||
| 63 | extern int | ||
| 64 | swiotlb_dma_supported(struct device *hwdev, u64 mask); | ||
| 65 | |||
| 66 | #ifdef CONFIG_SWIOTLB | 63 | #ifdef CONFIG_SWIOTLB |
| 67 | extern enum swiotlb_force swiotlb_force; | 64 | extern enum swiotlb_force swiotlb_force; |
| 68 | extern phys_addr_t io_tlb_start, io_tlb_end; | 65 | extern phys_addr_t io_tlb_start, io_tlb_end; |
| @@ -76,6 +73,8 @@ bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr, | |||
| 76 | size_t size, enum dma_data_direction dir, unsigned long attrs); | 73 | size_t size, enum dma_data_direction dir, unsigned long attrs); |
| 77 | void __init swiotlb_exit(void); | 74 | void __init swiotlb_exit(void); |
| 78 | unsigned int swiotlb_max_segment(void); | 75 | unsigned int swiotlb_max_segment(void); |
| 76 | size_t swiotlb_max_mapping_size(struct device *dev); | ||
| 77 | bool is_swiotlb_active(void); | ||
| 79 | #else | 78 | #else |
| 80 | #define swiotlb_force SWIOTLB_NO_FORCE | 79 | #define swiotlb_force SWIOTLB_NO_FORCE |
| 81 | static inline bool is_swiotlb_buffer(phys_addr_t paddr) | 80 | static inline bool is_swiotlb_buffer(phys_addr_t paddr) |
| @@ -95,6 +94,15 @@ static inline unsigned int swiotlb_max_segment(void) | |||
| 95 | { | 94 | { |
| 96 | return 0; | 95 | return 0; |
| 97 | } | 96 | } |
| 97 | static inline size_t swiotlb_max_mapping_size(struct device *dev) | ||
| 98 | { | ||
| 99 | return SIZE_MAX; | ||
| 100 | } | ||
| 101 | |||
| 102 | static inline bool is_swiotlb_active(void) | ||
| 103 | { | ||
| 104 | return false; | ||
| 105 | } | ||
| 98 | #endif /* CONFIG_SWIOTLB */ | 106 | #endif /* CONFIG_SWIOTLB */ |
| 99 | 107 | ||
| 100 | extern void swiotlb_print_info(void); | 108 | extern void swiotlb_print_info(void); |
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h index eee0412bdf4b..52a079b3a9a6 100644 --- a/include/linux/switchtec.h +++ b/include/linux/switchtec.h | |||
| @@ -248,9 +248,13 @@ struct ntb_ctrl_regs { | |||
| 248 | u32 win_size; | 248 | u32 win_size; |
| 249 | u64 xlate_addr; | 249 | u64 xlate_addr; |
| 250 | } bar_entry[6]; | 250 | } bar_entry[6]; |
| 251 | u32 reserved2[216]; | 251 | struct { |
| 252 | u32 req_id_table[256]; | 252 | u32 win_size; |
| 253 | u32 reserved3[512]; | 253 | u32 reserved[3]; |
| 254 | } bar_ext_entry[6]; | ||
| 255 | u32 reserved2[192]; | ||
| 256 | u32 req_id_table[512]; | ||
| 257 | u32 reserved3[256]; | ||
| 254 | u64 lut_entry[512]; | 258 | u64 lut_entry[512]; |
| 255 | } __packed; | 259 | } __packed; |
| 256 | 260 | ||
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 257cccba3062..e446806a561f 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -54,7 +54,7 @@ struct __sysctl_args; | |||
| 54 | struct sysinfo; | 54 | struct sysinfo; |
| 55 | struct timespec; | 55 | struct timespec; |
| 56 | struct timeval; | 56 | struct timeval; |
| 57 | struct timex; | 57 | struct __kernel_timex; |
| 58 | struct timezone; | 58 | struct timezone; |
| 59 | struct tms; | 59 | struct tms; |
| 60 | struct utimbuf; | 60 | struct utimbuf; |
| @@ -69,6 +69,7 @@ struct file_handle; | |||
| 69 | struct sigaltstack; | 69 | struct sigaltstack; |
| 70 | struct rseq; | 70 | struct rseq; |
| 71 | union bpf_attr; | 71 | union bpf_attr; |
| 72 | struct io_uring_params; | ||
| 72 | 73 | ||
| 73 | #include <linux/types.h> | 74 | #include <linux/types.h> |
| 74 | #include <linux/aio_abi.h> | 75 | #include <linux/aio_abi.h> |
| @@ -297,6 +298,11 @@ asmlinkage long sys_io_getevents(aio_context_t ctx_id, | |||
| 297 | long nr, | 298 | long nr, |
| 298 | struct io_event __user *events, | 299 | struct io_event __user *events, |
| 299 | struct __kernel_timespec __user *timeout); | 300 | struct __kernel_timespec __user *timeout); |
| 301 | asmlinkage long sys_io_getevents_time32(__u32 ctx_id, | ||
| 302 | __s32 min_nr, | ||
| 303 | __s32 nr, | ||
| 304 | struct io_event __user *events, | ||
| 305 | struct old_timespec32 __user *timeout); | ||
| 300 | asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, | 306 | asmlinkage long sys_io_pgetevents(aio_context_t ctx_id, |
| 301 | long min_nr, | 307 | long min_nr, |
| 302 | long nr, | 308 | long nr, |
| @@ -309,6 +315,13 @@ asmlinkage long sys_io_pgetevents_time32(aio_context_t ctx_id, | |||
| 309 | struct io_event __user *events, | 315 | struct io_event __user *events, |
| 310 | struct old_timespec32 __user *timeout, | 316 | struct old_timespec32 __user *timeout, |
| 311 | const struct __aio_sigset *sig); | 317 | const struct __aio_sigset *sig); |
| 318 | asmlinkage long sys_io_uring_setup(u32 entries, | ||
| 319 | struct io_uring_params __user *p); | ||
| 320 | asmlinkage long sys_io_uring_enter(unsigned int fd, u32 to_submit, | ||
| 321 | u32 min_complete, u32 flags, | ||
| 322 | const sigset_t __user *sig, size_t sigsz); | ||
| 323 | asmlinkage long sys_io_uring_register(unsigned int fd, unsigned int op, | ||
| 324 | void __user *arg, unsigned int nr_args); | ||
| 312 | 325 | ||
| 313 | /* fs/xattr.c */ | 326 | /* fs/xattr.c */ |
| 314 | asmlinkage long sys_setxattr(const char __user *path, const char __user *name, | 327 | asmlinkage long sys_setxattr(const char __user *path, const char __user *name, |
| @@ -522,11 +535,19 @@ asmlinkage long sys_timerfd_settime(int ufd, int flags, | |||
| 522 | const struct __kernel_itimerspec __user *utmr, | 535 | const struct __kernel_itimerspec __user *utmr, |
| 523 | struct __kernel_itimerspec __user *otmr); | 536 | struct __kernel_itimerspec __user *otmr); |
| 524 | asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); | 537 | asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr); |
| 538 | asmlinkage long sys_timerfd_gettime32(int ufd, | ||
| 539 | struct old_itimerspec32 __user *otmr); | ||
| 540 | asmlinkage long sys_timerfd_settime32(int ufd, int flags, | ||
| 541 | const struct old_itimerspec32 __user *utmr, | ||
| 542 | struct old_itimerspec32 __user *otmr); | ||
| 525 | 543 | ||
| 526 | /* fs/utimes.c */ | 544 | /* fs/utimes.c */ |
| 527 | asmlinkage long sys_utimensat(int dfd, const char __user *filename, | 545 | asmlinkage long sys_utimensat(int dfd, const char __user *filename, |
| 528 | struct __kernel_timespec __user *utimes, | 546 | struct __kernel_timespec __user *utimes, |
| 529 | int flags); | 547 | int flags); |
| 548 | asmlinkage long sys_utimensat_time32(unsigned int dfd, | ||
| 549 | const char __user *filename, | ||
| 550 | struct old_timespec32 __user *t, int flags); | ||
| 530 | 551 | ||
| 531 | /* kernel/acct.c */ | 552 | /* kernel/acct.c */ |
| 532 | asmlinkage long sys_acct(const char __user *name); | 553 | asmlinkage long sys_acct(const char __user *name); |
| @@ -555,6 +576,9 @@ asmlinkage long sys_unshare(unsigned long unshare_flags); | |||
| 555 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, | 576 | asmlinkage long sys_futex(u32 __user *uaddr, int op, u32 val, |
| 556 | struct __kernel_timespec __user *utime, u32 __user *uaddr2, | 577 | struct __kernel_timespec __user *utime, u32 __user *uaddr2, |
| 557 | u32 val3); | 578 | u32 val3); |
| 579 | asmlinkage long sys_futex_time32(u32 __user *uaddr, int op, u32 val, | ||
| 580 | struct old_timespec32 __user *utime, u32 __user *uaddr2, | ||
| 581 | u32 val3); | ||
| 558 | asmlinkage long sys_get_robust_list(int pid, | 582 | asmlinkage long sys_get_robust_list(int pid, |
| 559 | struct robust_list_head __user * __user *head_ptr, | 583 | struct robust_list_head __user * __user *head_ptr, |
| 560 | size_t __user *len_ptr); | 584 | size_t __user *len_ptr); |
| @@ -564,6 +588,8 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head, | |||
| 564 | /* kernel/hrtimer.c */ | 588 | /* kernel/hrtimer.c */ |
| 565 | asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, | 589 | asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp, |
| 566 | struct __kernel_timespec __user *rmtp); | 590 | struct __kernel_timespec __user *rmtp); |
| 591 | asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp, | ||
| 592 | struct old_timespec32 __user *rmtp); | ||
| 567 | 593 | ||
| 568 | /* kernel/itimer.c */ | 594 | /* kernel/itimer.c */ |
| 569 | asmlinkage long sys_getitimer(int which, struct itimerval __user *value); | 595 | asmlinkage long sys_getitimer(int which, struct itimerval __user *value); |
| @@ -591,7 +617,7 @@ asmlinkage long sys_timer_gettime(timer_t timer_id, | |||
| 591 | asmlinkage long sys_timer_getoverrun(timer_t timer_id); | 617 | asmlinkage long sys_timer_getoverrun(timer_t timer_id); |
| 592 | asmlinkage long sys_timer_settime(timer_t timer_id, int flags, | 618 | asmlinkage long sys_timer_settime(timer_t timer_id, int flags, |
| 593 | const struct __kernel_itimerspec __user *new_setting, | 619 | const struct __kernel_itimerspec __user *new_setting, |
| 594 | struct itimerspec __user *old_setting); | 620 | struct __kernel_itimerspec __user *old_setting); |
| 595 | asmlinkage long sys_timer_delete(timer_t timer_id); | 621 | asmlinkage long sys_timer_delete(timer_t timer_id); |
| 596 | asmlinkage long sys_clock_settime(clockid_t which_clock, | 622 | asmlinkage long sys_clock_settime(clockid_t which_clock, |
| 597 | const struct __kernel_timespec __user *tp); | 623 | const struct __kernel_timespec __user *tp); |
| @@ -602,6 +628,20 @@ asmlinkage long sys_clock_getres(clockid_t which_clock, | |||
| 602 | asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, | 628 | asmlinkage long sys_clock_nanosleep(clockid_t which_clock, int flags, |
| 603 | const struct __kernel_timespec __user *rqtp, | 629 | const struct __kernel_timespec __user *rqtp, |
| 604 | struct __kernel_timespec __user *rmtp); | 630 | struct __kernel_timespec __user *rmtp); |
| 631 | asmlinkage long sys_timer_gettime32(timer_t timer_id, | ||
| 632 | struct old_itimerspec32 __user *setting); | ||
| 633 | asmlinkage long sys_timer_settime32(timer_t timer_id, int flags, | ||
| 634 | struct old_itimerspec32 __user *new, | ||
| 635 | struct old_itimerspec32 __user *old); | ||
| 636 | asmlinkage long sys_clock_settime32(clockid_t which_clock, | ||
| 637 | struct old_timespec32 __user *tp); | ||
| 638 | asmlinkage long sys_clock_gettime32(clockid_t which_clock, | ||
| 639 | struct old_timespec32 __user *tp); | ||
| 640 | asmlinkage long sys_clock_getres_time32(clockid_t which_clock, | ||
| 641 | struct old_timespec32 __user *tp); | ||
| 642 | asmlinkage long sys_clock_nanosleep_time32(clockid_t which_clock, int flags, | ||
| 643 | struct old_timespec32 __user *rqtp, | ||
| 644 | struct old_timespec32 __user *rmtp); | ||
| 605 | 645 | ||
| 606 | /* kernel/printk.c */ | 646 | /* kernel/printk.c */ |
| 607 | asmlinkage long sys_syslog(int type, char __user *buf, int len); | 647 | asmlinkage long sys_syslog(int type, char __user *buf, int len); |
| @@ -627,6 +667,8 @@ asmlinkage long sys_sched_get_priority_max(int policy); | |||
| 627 | asmlinkage long sys_sched_get_priority_min(int policy); | 667 | asmlinkage long sys_sched_get_priority_min(int policy); |
| 628 | asmlinkage long sys_sched_rr_get_interval(pid_t pid, | 668 | asmlinkage long sys_sched_rr_get_interval(pid_t pid, |
| 629 | struct __kernel_timespec __user *interval); | 669 | struct __kernel_timespec __user *interval); |
| 670 | asmlinkage long sys_sched_rr_get_interval_time32(pid_t pid, | ||
| 671 | struct old_timespec32 __user *interval); | ||
| 630 | 672 | ||
| 631 | /* kernel/signal.c */ | 673 | /* kernel/signal.c */ |
| 632 | asmlinkage long sys_restart_syscall(void); | 674 | asmlinkage long sys_restart_syscall(void); |
| @@ -695,7 +737,8 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, | |||
| 695 | struct timezone __user *tz); | 737 | struct timezone __user *tz); |
| 696 | asmlinkage long sys_settimeofday(struct timeval __user *tv, | 738 | asmlinkage long sys_settimeofday(struct timeval __user *tv, |
| 697 | struct timezone __user *tz); | 739 | struct timezone __user *tz); |
| 698 | asmlinkage long sys_adjtimex(struct timex __user *txc_p); | 740 | asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p); |
| 741 | asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p); | ||
| 699 | 742 | ||
| 700 | /* kernel/timer.c */ | 743 | /* kernel/timer.c */ |
| 701 | asmlinkage long sys_getpid(void); | 744 | asmlinkage long sys_getpid(void); |
| @@ -714,9 +757,18 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *msg_ptr, size_t | |||
| 714 | asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout); | 757 | asmlinkage long sys_mq_timedreceive(mqd_t mqdes, char __user *msg_ptr, size_t msg_len, unsigned int __user *msg_prio, const struct __kernel_timespec __user *abs_timeout); |
| 715 | asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); | 758 | asmlinkage long sys_mq_notify(mqd_t mqdes, const struct sigevent __user *notification); |
| 716 | asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); | 759 | asmlinkage long sys_mq_getsetattr(mqd_t mqdes, const struct mq_attr __user *mqstat, struct mq_attr __user *omqstat); |
| 760 | asmlinkage long sys_mq_timedreceive_time32(mqd_t mqdes, | ||
| 761 | char __user *u_msg_ptr, | ||
| 762 | unsigned int msg_len, unsigned int __user *u_msg_prio, | ||
| 763 | const struct old_timespec32 __user *u_abs_timeout); | ||
| 764 | asmlinkage long sys_mq_timedsend_time32(mqd_t mqdes, | ||
| 765 | const char __user *u_msg_ptr, | ||
| 766 | unsigned int msg_len, unsigned int msg_prio, | ||
| 767 | const struct old_timespec32 __user *u_abs_timeout); | ||
| 717 | 768 | ||
| 718 | /* ipc/msg.c */ | 769 | /* ipc/msg.c */ |
| 719 | asmlinkage long sys_msgget(key_t key, int msgflg); | 770 | asmlinkage long sys_msgget(key_t key, int msgflg); |
| 771 | asmlinkage long sys_old_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); | ||
| 720 | asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); | 772 | asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf); |
| 721 | asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, | 773 | asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, |
| 722 | size_t msgsz, long msgtyp, int msgflg); | 774 | size_t msgsz, long msgtyp, int msgflg); |
| @@ -726,14 +778,19 @@ asmlinkage long sys_msgsnd(int msqid, struct msgbuf __user *msgp, | |||
| 726 | /* ipc/sem.c */ | 778 | /* ipc/sem.c */ |
| 727 | asmlinkage long sys_semget(key_t key, int nsems, int semflg); | 779 | asmlinkage long sys_semget(key_t key, int nsems, int semflg); |
| 728 | asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); | 780 | asmlinkage long sys_semctl(int semid, int semnum, int cmd, unsigned long arg); |
| 781 | asmlinkage long sys_old_semctl(int semid, int semnum, int cmd, unsigned long arg); | ||
| 729 | asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, | 782 | asmlinkage long sys_semtimedop(int semid, struct sembuf __user *sops, |
| 730 | unsigned nsops, | 783 | unsigned nsops, |
| 731 | const struct __kernel_timespec __user *timeout); | 784 | const struct __kernel_timespec __user *timeout); |
| 785 | asmlinkage long sys_semtimedop_time32(int semid, struct sembuf __user *sops, | ||
| 786 | unsigned nsops, | ||
| 787 | const struct old_timespec32 __user *timeout); | ||
| 732 | asmlinkage long sys_semop(int semid, struct sembuf __user *sops, | 788 | asmlinkage long sys_semop(int semid, struct sembuf __user *sops, |
| 733 | unsigned nsops); | 789 | unsigned nsops); |
| 734 | 790 | ||
| 735 | /* ipc/shm.c */ | 791 | /* ipc/shm.c */ |
| 736 | asmlinkage long sys_shmget(key_t key, size_t size, int flag); | 792 | asmlinkage long sys_shmget(key_t key, size_t size, int flag); |
| 793 | asmlinkage long sys_old_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); | ||
| 737 | asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); | 794 | asmlinkage long sys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf); |
| 738 | asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); | 795 | asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg); |
| 739 | asmlinkage long sys_shmdt(char __user *shmaddr); | 796 | asmlinkage long sys_shmdt(char __user *shmaddr); |
| @@ -867,7 +924,9 @@ asmlinkage long sys_open_by_handle_at(int mountdirfd, | |||
| 867 | struct file_handle __user *handle, | 924 | struct file_handle __user *handle, |
| 868 | int flags); | 925 | int flags); |
| 869 | asmlinkage long sys_clock_adjtime(clockid_t which_clock, | 926 | asmlinkage long sys_clock_adjtime(clockid_t which_clock, |
| 870 | struct timex __user *tx); | 927 | struct __kernel_timex __user *tx); |
| 928 | asmlinkage long sys_clock_adjtime32(clockid_t which_clock, | ||
| 929 | struct old_timex32 __user *tx); | ||
| 871 | asmlinkage long sys_syncfs(int fd); | 930 | asmlinkage long sys_syncfs(int fd); |
| 872 | asmlinkage long sys_setns(int fd, int nstype); | 931 | asmlinkage long sys_setns(int fd, int nstype); |
| 873 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, | 932 | asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg, |
| @@ -926,6 +985,9 @@ asmlinkage long sys_statx(int dfd, const char __user *path, unsigned flags, | |||
| 926 | unsigned mask, struct statx __user *buffer); | 985 | unsigned mask, struct statx __user *buffer); |
| 927 | asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len, | 986 | asmlinkage long sys_rseq(struct rseq __user *rseq, uint32_t rseq_len, |
| 928 | int flags, uint32_t sig); | 987 | int flags, uint32_t sig); |
| 988 | asmlinkage long sys_pidfd_send_signal(int pidfd, int sig, | ||
| 989 | siginfo_t __user *info, | ||
| 990 | unsigned int flags); | ||
| 929 | 991 | ||
| 930 | /* | 992 | /* |
| 931 | * Architecture-specific system calls | 993 | * Architecture-specific system calls |
| @@ -1003,6 +1065,7 @@ asmlinkage long sys_alarm(unsigned int seconds); | |||
| 1003 | asmlinkage long sys_getpgrp(void); | 1065 | asmlinkage long sys_getpgrp(void); |
| 1004 | asmlinkage long sys_pause(void); | 1066 | asmlinkage long sys_pause(void); |
| 1005 | asmlinkage long sys_time(time_t __user *tloc); | 1067 | asmlinkage long sys_time(time_t __user *tloc); |
| 1068 | asmlinkage long sys_time32(old_time32_t __user *tloc); | ||
| 1006 | #ifdef __ARCH_WANT_SYS_UTIME | 1069 | #ifdef __ARCH_WANT_SYS_UTIME |
| 1007 | asmlinkage long sys_utime(char __user *filename, | 1070 | asmlinkage long sys_utime(char __user *filename, |
| 1008 | struct utimbuf __user *times); | 1071 | struct utimbuf __user *times); |
| @@ -1011,6 +1074,13 @@ asmlinkage long sys_utimes(char __user *filename, | |||
| 1011 | asmlinkage long sys_futimesat(int dfd, const char __user *filename, | 1074 | asmlinkage long sys_futimesat(int dfd, const char __user *filename, |
| 1012 | struct timeval __user *utimes); | 1075 | struct timeval __user *utimes); |
| 1013 | #endif | 1076 | #endif |
| 1077 | asmlinkage long sys_futimesat_time32(unsigned int dfd, | ||
| 1078 | const char __user *filename, | ||
| 1079 | struct old_timeval32 __user *t); | ||
| 1080 | asmlinkage long sys_utime32(const char __user *filename, | ||
| 1081 | struct old_utimbuf32 __user *t); | ||
| 1082 | asmlinkage long sys_utimes_time32(const char __user *filename, | ||
| 1083 | struct old_timeval32 __user *t); | ||
| 1014 | asmlinkage long sys_creat(const char __user *pathname, umode_t mode); | 1084 | asmlinkage long sys_creat(const char __user *pathname, umode_t mode); |
| 1015 | asmlinkage long sys_getdents(unsigned int fd, | 1085 | asmlinkage long sys_getdents(unsigned int fd, |
| 1016 | struct linux_dirent __user *dirent, | 1086 | struct linux_dirent __user *dirent, |
| @@ -1035,6 +1105,7 @@ asmlinkage long sys_fork(void); | |||
| 1035 | 1105 | ||
| 1036 | /* obsolete: kernel/time/time.c */ | 1106 | /* obsolete: kernel/time/time.c */ |
| 1037 | asmlinkage long sys_stime(time_t __user *tptr); | 1107 | asmlinkage long sys_stime(time_t __user *tptr); |
| 1108 | asmlinkage long sys_stime32(old_time32_t __user *tptr); | ||
| 1038 | 1109 | ||
| 1039 | /* obsolete: kernel/signal.c */ | 1110 | /* obsolete: kernel/signal.c */ |
| 1040 | asmlinkage long sys_sigpending(old_sigset_t __user *uset); | 1111 | asmlinkage long sys_sigpending(old_sigset_t __user *uset); |
| @@ -1185,6 +1256,10 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len, | |||
| 1185 | unsigned long prot, unsigned long flags, | 1256 | unsigned long prot, unsigned long flags, |
| 1186 | unsigned long fd, unsigned long pgoff); | 1257 | unsigned long fd, unsigned long pgoff); |
| 1187 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count); | 1258 | ssize_t ksys_readahead(int fd, loff_t offset, size_t count); |
| 1259 | int ksys_ipc(unsigned int call, int first, unsigned long second, | ||
| 1260 | unsigned long third, void __user * ptr, long fifth); | ||
| 1261 | int compat_ksys_ipc(u32 call, int first, int second, | ||
| 1262 | u32 third, u32 ptr, u32 fifth); | ||
| 1188 | 1263 | ||
| 1189 | /* | 1264 | /* |
| 1190 | * The following kernel syscall equivalents are just wrappers to fs-internal | 1265 | * The following kernel syscall equivalents are just wrappers to fs-internal |
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h index 6cfe05893a76..4a49f80e7f71 100644 --- a/include/linux/tee_drv.h +++ b/include/linux/tee_drv.h | |||
| @@ -15,11 +15,14 @@ | |||
| 15 | #ifndef __TEE_DRV_H | 15 | #ifndef __TEE_DRV_H |
| 16 | #define __TEE_DRV_H | 16 | #define __TEE_DRV_H |
| 17 | 17 | ||
| 18 | #include <linux/types.h> | 18 | #include <linux/device.h> |
| 19 | #include <linux/idr.h> | 19 | #include <linux/idr.h> |
| 20 | #include <linux/kref.h> | 20 | #include <linux/kref.h> |
| 21 | #include <linux/list.h> | 21 | #include <linux/list.h> |
| 22 | #include <linux/mod_devicetable.h> | ||
| 22 | #include <linux/tee.h> | 23 | #include <linux/tee.h> |
| 24 | #include <linux/types.h> | ||
| 25 | #include <linux/uuid.h> | ||
| 23 | 26 | ||
| 24 | /* | 27 | /* |
| 25 | * The file describes the API provided by the generic TEE driver to the | 28 | * The file describes the API provided by the generic TEE driver to the |
| @@ -47,6 +50,11 @@ struct tee_shm_pool; | |||
| 47 | * @releasing: flag that indicates if context is being released right now. | 50 | * @releasing: flag that indicates if context is being released right now. |
| 48 | * It is needed to break circular dependency on context during | 51 | * It is needed to break circular dependency on context during |
| 49 | * shared memory release. | 52 | * shared memory release. |
| 53 | * @supp_nowait: flag that indicates that requests in this context should not | ||
| 54 | * wait for tee-supplicant daemon to be started if not present | ||
| 55 | * and just return with an error code. It is needed for requests | ||
| 56 | * that arises from TEE based kernel drivers that should be | ||
| 57 | * non-blocking in nature. | ||
| 50 | */ | 58 | */ |
| 51 | struct tee_context { | 59 | struct tee_context { |
| 52 | struct tee_device *teedev; | 60 | struct tee_device *teedev; |
| @@ -54,6 +62,7 @@ struct tee_context { | |||
| 54 | void *data; | 62 | void *data; |
| 55 | struct kref refcount; | 63 | struct kref refcount; |
| 56 | bool releasing; | 64 | bool releasing; |
| 65 | bool supp_nowait; | ||
| 57 | }; | 66 | }; |
| 58 | 67 | ||
| 59 | struct tee_param_memref { | 68 | struct tee_param_memref { |
| @@ -526,6 +535,18 @@ int tee_client_invoke_func(struct tee_context *ctx, | |||
| 526 | struct tee_ioctl_invoke_arg *arg, | 535 | struct tee_ioctl_invoke_arg *arg, |
| 527 | struct tee_param *param); | 536 | struct tee_param *param); |
| 528 | 537 | ||
| 538 | /** | ||
| 539 | * tee_client_cancel_req() - Request cancellation of the previous open-session | ||
| 540 | * or invoke-command operations in a Trusted Application | ||
| 541 | * @ctx: TEE Context | ||
| 542 | * @arg: Cancellation arguments, see description of | ||
| 543 | * struct tee_ioctl_cancel_arg | ||
| 544 | * | ||
| 545 | * Returns < 0 on error else 0 if the cancellation was successfully requested. | ||
| 546 | */ | ||
| 547 | int tee_client_cancel_req(struct tee_context *ctx, | ||
| 548 | struct tee_ioctl_cancel_arg *arg); | ||
| 549 | |||
| 529 | static inline bool tee_param_is_memref(struct tee_param *param) | 550 | static inline bool tee_param_is_memref(struct tee_param *param) |
| 530 | { | 551 | { |
| 531 | switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { | 552 | switch (param->attr & TEE_IOCTL_PARAM_ATTR_TYPE_MASK) { |
| @@ -538,4 +559,31 @@ static inline bool tee_param_is_memref(struct tee_param *param) | |||
| 538 | } | 559 | } |
| 539 | } | 560 | } |
| 540 | 561 | ||
| 562 | extern struct bus_type tee_bus_type; | ||
| 563 | |||
| 564 | /** | ||
| 565 | * struct tee_client_device - tee based device | ||
| 566 | * @id: device identifier | ||
| 567 | * @dev: device structure | ||
| 568 | */ | ||
| 569 | struct tee_client_device { | ||
| 570 | struct tee_client_device_id id; | ||
| 571 | struct device dev; | ||
| 572 | }; | ||
| 573 | |||
| 574 | #define to_tee_client_device(d) container_of(d, struct tee_client_device, dev) | ||
| 575 | |||
| 576 | /** | ||
| 577 | * struct tee_client_driver - tee client driver | ||
| 578 | * @id_table: device id table supported by this driver | ||
| 579 | * @driver: driver structure | ||
| 580 | */ | ||
| 581 | struct tee_client_driver { | ||
| 582 | const struct tee_client_device_id *id_table; | ||
| 583 | struct device_driver driver; | ||
| 584 | }; | ||
| 585 | |||
| 586 | #define to_tee_client_driver(d) \ | ||
| 587 | container_of(d, struct tee_client_driver, driver) | ||
| 588 | |||
| 541 | #endif /*__TEE_DRV_H*/ | 589 | #endif /*__TEE_DRV_H*/ |
diff --git a/include/linux/time32.h b/include/linux/time32.h index 118b9977080c..0a1f302a1753 100644 --- a/include/linux/time32.h +++ b/include/linux/time32.h | |||
| @@ -10,6 +10,7 @@ | |||
| 10 | */ | 10 | */ |
| 11 | 11 | ||
| 12 | #include <linux/time64.h> | 12 | #include <linux/time64.h> |
| 13 | #include <linux/timex.h> | ||
| 13 | 14 | ||
| 14 | #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) | 15 | #define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1) |
| 15 | 16 | ||
| @@ -35,13 +36,42 @@ struct old_utimbuf32 { | |||
| 35 | old_time32_t modtime; | 36 | old_time32_t modtime; |
| 36 | }; | 37 | }; |
| 37 | 38 | ||
| 39 | struct old_timex32 { | ||
| 40 | u32 modes; | ||
| 41 | s32 offset; | ||
| 42 | s32 freq; | ||
| 43 | s32 maxerror; | ||
| 44 | s32 esterror; | ||
| 45 | s32 status; | ||
| 46 | s32 constant; | ||
| 47 | s32 precision; | ||
| 48 | s32 tolerance; | ||
| 49 | struct old_timeval32 time; | ||
| 50 | s32 tick; | ||
| 51 | s32 ppsfreq; | ||
| 52 | s32 jitter; | ||
| 53 | s32 shift; | ||
| 54 | s32 stabil; | ||
| 55 | s32 jitcnt; | ||
| 56 | s32 calcnt; | ||
| 57 | s32 errcnt; | ||
| 58 | s32 stbcnt; | ||
| 59 | s32 tai; | ||
| 60 | |||
| 61 | s32:32; s32:32; s32:32; s32:32; | ||
| 62 | s32:32; s32:32; s32:32; s32:32; | ||
| 63 | s32:32; s32:32; s32:32; | ||
| 64 | }; | ||
| 65 | |||
| 38 | extern int get_old_timespec32(struct timespec64 *, const void __user *); | 66 | extern int get_old_timespec32(struct timespec64 *, const void __user *); |
| 39 | extern int put_old_timespec32(const struct timespec64 *, void __user *); | 67 | extern int put_old_timespec32(const struct timespec64 *, void __user *); |
| 40 | extern int get_old_itimerspec32(struct itimerspec64 *its, | 68 | extern int get_old_itimerspec32(struct itimerspec64 *its, |
| 41 | const struct old_itimerspec32 __user *uits); | 69 | const struct old_itimerspec32 __user *uits); |
| 42 | extern int put_old_itimerspec32(const struct itimerspec64 *its, | 70 | extern int put_old_itimerspec32(const struct itimerspec64 *its, |
| 43 | struct old_itimerspec32 __user *uits); | 71 | struct old_itimerspec32 __user *uits); |
| 44 | 72 | struct __kernel_timex; | |
| 73 | int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *); | ||
| 74 | int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *); | ||
| 45 | 75 | ||
| 46 | #if __BITS_PER_LONG == 64 | 76 | #if __BITS_PER_LONG == 64 |
| 47 | 77 | ||
diff --git a/include/linux/time64.h b/include/linux/time64.h index 05634afba0db..f38d382ffec1 100644 --- a/include/linux/time64.h +++ b/include/linux/time64.h | |||
| @@ -7,14 +7,6 @@ | |||
| 7 | typedef __s64 time64_t; | 7 | typedef __s64 time64_t; |
| 8 | typedef __u64 timeu64_t; | 8 | typedef __u64 timeu64_t; |
| 9 | 9 | ||
| 10 | /* CONFIG_64BIT_TIME enables new 64 bit time_t syscalls in the compat path | ||
| 11 | * and 32-bit emulation. | ||
| 12 | */ | ||
| 13 | #ifndef CONFIG_64BIT_TIME | ||
| 14 | #define __kernel_timespec timespec | ||
| 15 | #define __kernel_itimerspec itimerspec | ||
| 16 | #endif | ||
| 17 | |||
| 18 | #include <uapi/linux/time.h> | 10 | #include <uapi/linux/time.h> |
| 19 | 11 | ||
| 20 | struct timespec64 { | 12 | struct timespec64 { |
diff --git a/include/linux/timex.h b/include/linux/timex.h index 39c25dbebfe8..ce0859763670 100644 --- a/include/linux/timex.h +++ b/include/linux/timex.h | |||
| @@ -151,7 +151,9 @@ extern unsigned long tick_nsec; /* SHIFTED_HZ period (nsec) */ | |||
| 151 | #define NTP_INTERVAL_FREQ (HZ) | 151 | #define NTP_INTERVAL_FREQ (HZ) |
| 152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) | 152 | #define NTP_INTERVAL_LENGTH (NSEC_PER_SEC/NTP_INTERVAL_FREQ) |
| 153 | 153 | ||
| 154 | extern int do_adjtimex(struct timex *); | 154 | extern int do_adjtimex(struct __kernel_timex *); |
| 155 | extern int do_clock_adjtime(const clockid_t which_clock, struct __kernel_timex * ktx); | ||
| 156 | |||
| 155 | extern void hardpps(const struct timespec64 *, const struct timespec64 *); | 157 | extern void hardpps(const struct timespec64 *, const struct timespec64 *); |
| 156 | 158 | ||
| 157 | int read_current_timer(unsigned long *timer_val); | 159 | int read_current_timer(unsigned long *timer_val); |
diff --git a/include/linux/torture.h b/include/linux/torture.h index 48fad21109fc..23d80db426d7 100644 --- a/include/linux/torture.h +++ b/include/linux/torture.h | |||
| @@ -1,23 +1,10 @@ | |||
| 1 | /* SPDX-License-Identifier: GPL-2.0+ */ | ||
| 1 | /* | 2 | /* |
| 2 | * Common functions for in-kernel torture tests. | 3 | * Common functions for in-kernel torture tests. |
| 3 | * | 4 | * |
| 4 | * This program is free software; you can redistribute it and/or modify | ||
| 5 | * it under the terms of the GNU General Public License as published by | ||
| 6 | * the Free Software Foundation; either version 2 of the License, or | ||
| 7 | * (at your option) any later version. | ||
| 8 | * | ||
| 9 | * This program is distributed in the hope that it will be useful, | ||
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 12 | * GNU General Public License for more details. | ||
| 13 | * | ||
| 14 | * You should have received a copy of the GNU General Public License | ||
| 15 | * along with this program; if not, you can access it online at | ||
| 16 | * http://www.gnu.org/licenses/gpl-2.0.html. | ||
| 17 | * | ||
| 18 | * Copyright IBM Corporation, 2014 | 5 | * Copyright IBM Corporation, 2014 |
| 19 | * | 6 | * |
| 20 | * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 7 | * Author: Paul E. McKenney <paulmck@linux.ibm.com> |
| 21 | */ | 8 | */ |
| 22 | 9 | ||
| 23 | #ifndef __LINUX_TORTURE_H | 10 | #ifndef __LINUX_TORTURE_H |
| @@ -50,11 +37,12 @@ | |||
| 50 | do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) | 37 | do { if (verbose) pr_alert("%s" TORTURE_FLAG "!!! %s\n", torture_type, s); } while (0) |
| 51 | 38 | ||
| 52 | /* Definitions for online/offline exerciser. */ | 39 | /* Definitions for online/offline exerciser. */ |
| 40 | typedef void torture_ofl_func(void); | ||
| 53 | bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, | 41 | bool torture_offline(int cpu, long *n_onl_attempts, long *n_onl_successes, |
| 54 | unsigned long *sum_offl, int *min_onl, int *max_onl); | 42 | unsigned long *sum_offl, int *min_onl, int *max_onl); |
| 55 | bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, | 43 | bool torture_online(int cpu, long *n_onl_attempts, long *n_onl_successes, |
| 56 | unsigned long *sum_onl, int *min_onl, int *max_onl); | 44 | unsigned long *sum_onl, int *min_onl, int *max_onl); |
| 57 | int torture_onoff_init(long ooholdoff, long oointerval); | 45 | int torture_onoff_init(long ooholdoff, long oointerval, torture_ofl_func *f); |
| 58 | void torture_onoff_stats(void); | 46 | void torture_onoff_stats(void); |
| 59 | bool torture_onoff_failures(void); | 47 | bool torture_onoff_failures(void); |
| 60 | 48 | ||
diff --git a/include/linux/tpm.h b/include/linux/tpm.h index b49a55cf775f..1b5436b213a2 100644 --- a/include/linux/tpm.h +++ b/include/linux/tpm.h | |||
| @@ -22,12 +22,41 @@ | |||
| 22 | #ifndef __LINUX_TPM_H__ | 22 | #ifndef __LINUX_TPM_H__ |
| 23 | #define __LINUX_TPM_H__ | 23 | #define __LINUX_TPM_H__ |
| 24 | 24 | ||
| 25 | #include <linux/hw_random.h> | ||
| 26 | #include <linux/acpi.h> | ||
| 27 | #include <linux/cdev.h> | ||
| 28 | #include <linux/fs.h> | ||
| 29 | #include <crypto/hash_info.h> | ||
| 30 | |||
| 25 | #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ | 31 | #define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */ |
| 32 | #define TPM_MAX_DIGEST_SIZE SHA512_DIGEST_SIZE | ||
| 26 | 33 | ||
| 27 | struct tpm_chip; | 34 | struct tpm_chip; |
| 28 | struct trusted_key_payload; | 35 | struct trusted_key_payload; |
| 29 | struct trusted_key_options; | 36 | struct trusted_key_options; |
| 30 | 37 | ||
| 38 | enum tpm_algorithms { | ||
| 39 | TPM_ALG_ERROR = 0x0000, | ||
| 40 | TPM_ALG_SHA1 = 0x0004, | ||
| 41 | TPM_ALG_KEYEDHASH = 0x0008, | ||
| 42 | TPM_ALG_SHA256 = 0x000B, | ||
| 43 | TPM_ALG_SHA384 = 0x000C, | ||
| 44 | TPM_ALG_SHA512 = 0x000D, | ||
| 45 | TPM_ALG_NULL = 0x0010, | ||
| 46 | TPM_ALG_SM3_256 = 0x0012, | ||
| 47 | }; | ||
| 48 | |||
| 49 | struct tpm_digest { | ||
| 50 | u16 alg_id; | ||
| 51 | u8 digest[TPM_MAX_DIGEST_SIZE]; | ||
| 52 | } __packed; | ||
| 53 | |||
| 54 | struct tpm_bank_info { | ||
| 55 | u16 alg_id; | ||
| 56 | u16 digest_size; | ||
| 57 | u16 crypto_id; | ||
| 58 | }; | ||
| 59 | |||
| 31 | enum TPM_OPS_FLAGS { | 60 | enum TPM_OPS_FLAGS { |
| 32 | TPM_OPS_AUTO_STARTUP = BIT(0), | 61 | TPM_OPS_AUTO_STARTUP = BIT(0), |
| 33 | }; | 62 | }; |
| @@ -41,7 +70,7 @@ struct tpm_class_ops { | |||
| 41 | int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); | 70 | int (*send) (struct tpm_chip *chip, u8 *buf, size_t len); |
| 42 | void (*cancel) (struct tpm_chip *chip); | 71 | void (*cancel) (struct tpm_chip *chip); |
| 43 | u8 (*status) (struct tpm_chip *chip); | 72 | u8 (*status) (struct tpm_chip *chip); |
| 44 | bool (*update_timeouts)(struct tpm_chip *chip, | 73 | void (*update_timeouts)(struct tpm_chip *chip, |
| 45 | unsigned long *timeout_cap); | 74 | unsigned long *timeout_cap); |
| 46 | int (*go_idle)(struct tpm_chip *chip); | 75 | int (*go_idle)(struct tpm_chip *chip); |
| 47 | int (*cmd_ready)(struct tpm_chip *chip); | 76 | int (*cmd_ready)(struct tpm_chip *chip); |
| @@ -50,11 +79,100 @@ struct tpm_class_ops { | |||
| 50 | void (*clk_enable)(struct tpm_chip *chip, bool value); | 79 | void (*clk_enable)(struct tpm_chip *chip, bool value); |
| 51 | }; | 80 | }; |
| 52 | 81 | ||
| 82 | #define TPM_NUM_EVENT_LOG_FILES 3 | ||
| 83 | |||
| 84 | /* Indexes the duration array */ | ||
| 85 | enum tpm_duration { | ||
| 86 | TPM_SHORT = 0, | ||
| 87 | TPM_MEDIUM = 1, | ||
| 88 | TPM_LONG = 2, | ||
| 89 | TPM_LONG_LONG = 3, | ||
| 90 | TPM_UNDEFINED, | ||
| 91 | TPM_NUM_DURATIONS = TPM_UNDEFINED, | ||
| 92 | }; | ||
| 93 | |||
| 94 | #define TPM_PPI_VERSION_LEN 3 | ||
| 95 | |||
| 96 | struct tpm_space { | ||
| 97 | u32 context_tbl[3]; | ||
| 98 | u8 *context_buf; | ||
| 99 | u32 session_tbl[3]; | ||
| 100 | u8 *session_buf; | ||
| 101 | }; | ||
| 102 | |||
| 103 | struct tpm_bios_log { | ||
| 104 | void *bios_event_log; | ||
| 105 | void *bios_event_log_end; | ||
| 106 | }; | ||
| 107 | |||
| 108 | struct tpm_chip_seqops { | ||
| 109 | struct tpm_chip *chip; | ||
| 110 | const struct seq_operations *seqops; | ||
| 111 | }; | ||
| 112 | |||
| 113 | struct tpm_chip { | ||
| 114 | struct device dev; | ||
| 115 | struct device devs; | ||
| 116 | struct cdev cdev; | ||
| 117 | struct cdev cdevs; | ||
| 118 | |||
| 119 | /* A driver callback under ops cannot be run unless ops_sem is held | ||
| 120 | * (sometimes implicitly, eg for the sysfs code). ops becomes null | ||
| 121 | * when the driver is unregistered, see tpm_try_get_ops. | ||
| 122 | */ | ||
| 123 | struct rw_semaphore ops_sem; | ||
| 124 | const struct tpm_class_ops *ops; | ||
| 125 | |||
| 126 | struct tpm_bios_log log; | ||
| 127 | struct tpm_chip_seqops bin_log_seqops; | ||
| 128 | struct tpm_chip_seqops ascii_log_seqops; | ||
| 129 | |||
| 130 | unsigned int flags; | ||
| 131 | |||
| 132 | int dev_num; /* /dev/tpm# */ | ||
| 133 | unsigned long is_open; /* only one allowed */ | ||
| 134 | |||
| 135 | char hwrng_name[64]; | ||
| 136 | struct hwrng hwrng; | ||
| 137 | |||
| 138 | struct mutex tpm_mutex; /* tpm is processing */ | ||
| 139 | |||
| 140 | unsigned long timeout_a; /* jiffies */ | ||
| 141 | unsigned long timeout_b; /* jiffies */ | ||
| 142 | unsigned long timeout_c; /* jiffies */ | ||
| 143 | unsigned long timeout_d; /* jiffies */ | ||
| 144 | bool timeout_adjusted; | ||
| 145 | unsigned long duration[TPM_NUM_DURATIONS]; /* jiffies */ | ||
| 146 | bool duration_adjusted; | ||
| 147 | |||
| 148 | struct dentry *bios_dir[TPM_NUM_EVENT_LOG_FILES]; | ||
| 149 | |||
| 150 | const struct attribute_group *groups[3]; | ||
| 151 | unsigned int groups_cnt; | ||
| 152 | |||
| 153 | u32 nr_allocated_banks; | ||
| 154 | struct tpm_bank_info *allocated_banks; | ||
| 155 | #ifdef CONFIG_ACPI | ||
| 156 | acpi_handle acpi_dev_handle; | ||
| 157 | char ppi_version[TPM_PPI_VERSION_LEN + 1]; | ||
| 158 | #endif /* CONFIG_ACPI */ | ||
| 159 | |||
| 160 | struct tpm_space work_space; | ||
| 161 | u32 last_cc; | ||
| 162 | u32 nr_commands; | ||
| 163 | u32 *cc_attrs_tbl; | ||
| 164 | |||
| 165 | /* active locality */ | ||
| 166 | int locality; | ||
| 167 | }; | ||
| 168 | |||
| 53 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) | 169 | #if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE) |
| 54 | 170 | ||
| 55 | extern int tpm_is_tpm2(struct tpm_chip *chip); | 171 | extern int tpm_is_tpm2(struct tpm_chip *chip); |
| 56 | extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf); | 172 | extern int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, |
| 57 | extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, const u8 *hash); | 173 | struct tpm_digest *digest); |
| 174 | extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, | ||
| 175 | struct tpm_digest *digests); | ||
| 58 | extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); | 176 | extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen); |
| 59 | extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); | 177 | extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max); |
| 60 | extern int tpm_seal_trusted(struct tpm_chip *chip, | 178 | extern int tpm_seal_trusted(struct tpm_chip *chip, |
| @@ -70,13 +188,14 @@ static inline int tpm_is_tpm2(struct tpm_chip *chip) | |||
| 70 | return -ENODEV; | 188 | return -ENODEV; |
| 71 | } | 189 | } |
| 72 | 190 | ||
| 73 | static inline int tpm_pcr_read(struct tpm_chip *chip, u32 pcr_idx, u8 *res_buf) | 191 | static inline int tpm_pcr_read(struct tpm_chip *chip, int pcr_idx, |
| 192 | struct tpm_digest *digest) | ||
| 74 | { | 193 | { |
| 75 | return -ENODEV; | 194 | return -ENODEV; |
| 76 | } | 195 | } |
| 77 | 196 | ||
| 78 | static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, | 197 | static inline int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx, |
| 79 | const u8 *hash) | 198 | struct tpm_digest *digests) |
| 80 | { | 199 | { |
| 81 | return -ENODEV; | 200 | return -ENODEV; |
| 82 | } | 201 | } |
diff --git a/include/linux/tpm_eventlog.h b/include/linux/tpm_eventlog.h index 20d9da77fc11..81519f163211 100644 --- a/include/linux/tpm_eventlog.h +++ b/include/linux/tpm_eventlog.h | |||
| @@ -3,12 +3,11 @@ | |||
| 3 | #ifndef __LINUX_TPM_EVENTLOG_H__ | 3 | #ifndef __LINUX_TPM_EVENTLOG_H__ |
| 4 | #define __LINUX_TPM_EVENTLOG_H__ | 4 | #define __LINUX_TPM_EVENTLOG_H__ |
| 5 | 5 | ||
| 6 | #include <crypto/hash_info.h> | 6 | #include <linux/tpm.h> |
| 7 | 7 | ||
| 8 | #define TCG_EVENT_NAME_LEN_MAX 255 | 8 | #define TCG_EVENT_NAME_LEN_MAX 255 |
| 9 | #define MAX_TEXT_EVENT 1000 /* Max event string length */ | 9 | #define MAX_TEXT_EVENT 1000 /* Max event string length */ |
| 10 | #define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ | 10 | #define ACPI_TCPA_SIG "TCPA" /* 0x41504354 /'TCPA' */ |
| 11 | #define TPM2_ACTIVE_PCR_BANKS 3 | ||
| 12 | 11 | ||
| 13 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 | 12 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_1_2 0x1 |
| 14 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 | 13 | #define EFI_TCG2_EVENT_LOG_FORMAT_TCG_2 0x2 |
| @@ -82,7 +81,7 @@ struct tcg_efi_specid_event_algs { | |||
| 82 | u16 digest_size; | 81 | u16 digest_size; |
| 83 | } __packed; | 82 | } __packed; |
| 84 | 83 | ||
| 85 | struct tcg_efi_specid_event { | 84 | struct tcg_efi_specid_event_head { |
| 86 | u8 signature[16]; | 85 | u8 signature[16]; |
| 87 | u32 platform_class; | 86 | u32 platform_class; |
| 88 | u8 spec_version_minor; | 87 | u8 spec_version_minor; |
| @@ -90,9 +89,7 @@ struct tcg_efi_specid_event { | |||
| 90 | u8 spec_errata; | 89 | u8 spec_errata; |
| 91 | u8 uintnsize; | 90 | u8 uintnsize; |
| 92 | u32 num_algs; | 91 | u32 num_algs; |
| 93 | struct tcg_efi_specid_event_algs digest_sizes[TPM2_ACTIVE_PCR_BANKS]; | 92 | struct tcg_efi_specid_event_algs digest_sizes[]; |
| 94 | u8 vendor_info_size; | ||
| 95 | u8 vendor_info[0]; | ||
| 96 | } __packed; | 93 | } __packed; |
| 97 | 94 | ||
| 98 | struct tcg_pcr_event { | 95 | struct tcg_pcr_event { |
| @@ -108,17 +105,11 @@ struct tcg_event_field { | |||
| 108 | u8 event[0]; | 105 | u8 event[0]; |
| 109 | } __packed; | 106 | } __packed; |
| 110 | 107 | ||
| 111 | struct tpm2_digest { | 108 | struct tcg_pcr_event2_head { |
| 112 | u16 alg_id; | ||
| 113 | u8 digest[SHA512_DIGEST_SIZE]; | ||
| 114 | } __packed; | ||
| 115 | |||
| 116 | struct tcg_pcr_event2 { | ||
| 117 | u32 pcr_idx; | 109 | u32 pcr_idx; |
| 118 | u32 event_type; | 110 | u32 event_type; |
| 119 | u32 count; | 111 | u32 count; |
| 120 | struct tpm2_digest digests[TPM2_ACTIVE_PCR_BANKS]; | 112 | struct tpm_digest digests[]; |
| 121 | struct tcg_event_field event; | ||
| 122 | } __packed; | 113 | } __packed; |
| 123 | 114 | ||
| 124 | #endif | 115 | #endif |
diff --git a/include/linux/types.h b/include/linux/types.h index c2615d6a019e..cc0dbbe551d5 100644 --- a/include/linux/types.h +++ b/include/linux/types.h | |||
| @@ -155,9 +155,9 @@ typedef u64 dma_addr_t; | |||
| 155 | typedef u32 dma_addr_t; | 155 | typedef u32 dma_addr_t; |
| 156 | #endif | 156 | #endif |
| 157 | 157 | ||
| 158 | typedef unsigned __bitwise gfp_t; | 158 | typedef unsigned int __bitwise gfp_t; |
| 159 | typedef unsigned __bitwise slab_flags_t; | 159 | typedef unsigned int __bitwise slab_flags_t; |
| 160 | typedef unsigned __bitwise fmode_t; | 160 | typedef unsigned int __bitwise fmode_t; |
| 161 | 161 | ||
| 162 | #ifdef CONFIG_PHYS_ADDR_T_64BIT | 162 | #ifdef CONFIG_PHYS_ADDR_T_64BIT |
| 163 | typedef u64 phys_addr_t; | 163 | typedef u64 phys_addr_t; |
diff --git a/include/linux/uio.h b/include/linux/uio.h index ecf584f6b82d..2d0131ad4604 100644 --- a/include/linux/uio.h +++ b/include/linux/uio.h | |||
| @@ -23,14 +23,23 @@ struct kvec { | |||
| 23 | }; | 23 | }; |
| 24 | 24 | ||
| 25 | enum iter_type { | 25 | enum iter_type { |
| 26 | ITER_IOVEC = 0, | 26 | /* set if ITER_BVEC doesn't hold a bv_page ref */ |
| 27 | ITER_KVEC = 2, | 27 | ITER_BVEC_FLAG_NO_REF = 2, |
| 28 | ITER_BVEC = 4, | 28 | |
| 29 | ITER_PIPE = 8, | 29 | /* iter types */ |
| 30 | ITER_DISCARD = 16, | 30 | ITER_IOVEC = 4, |
| 31 | ITER_KVEC = 8, | ||
| 32 | ITER_BVEC = 16, | ||
| 33 | ITER_PIPE = 32, | ||
| 34 | ITER_DISCARD = 64, | ||
| 31 | }; | 35 | }; |
| 32 | 36 | ||
| 33 | struct iov_iter { | 37 | struct iov_iter { |
| 38 | /* | ||
| 39 | * Bit 0 is the read/write bit, set if we're writing. | ||
| 40 | * Bit 1 is the BVEC_FLAG_NO_REF bit, set if type is a bvec and | ||
| 41 | * the caller isn't expecting to drop a page reference when done. | ||
| 42 | */ | ||
| 34 | unsigned int type; | 43 | unsigned int type; |
| 35 | size_t iov_offset; | 44 | size_t iov_offset; |
| 36 | size_t count; | 45 | size_t count; |
| @@ -51,7 +60,7 @@ struct iov_iter { | |||
| 51 | 60 | ||
| 52 | static inline enum iter_type iov_iter_type(const struct iov_iter *i) | 61 | static inline enum iter_type iov_iter_type(const struct iov_iter *i) |
| 53 | { | 62 | { |
| 54 | return i->type & ~(READ | WRITE); | 63 | return i->type & ~(READ | WRITE | ITER_BVEC_FLAG_NO_REF); |
| 55 | } | 64 | } |
| 56 | 65 | ||
| 57 | static inline bool iter_is_iovec(const struct iov_iter *i) | 66 | static inline bool iter_is_iovec(const struct iov_iter *i) |
| @@ -84,6 +93,11 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i) | |||
| 84 | return i->type & (READ | WRITE); | 93 | return i->type & (READ | WRITE); |
| 85 | } | 94 | } |
| 86 | 95 | ||
| 96 | static inline bool iov_iter_bvec_no_ref(const struct iov_iter *i) | ||
| 97 | { | ||
| 98 | return (i->type & ITER_BVEC_FLAG_NO_REF) != 0; | ||
| 99 | } | ||
| 100 | |||
| 87 | /* | 101 | /* |
| 88 | * Total number of bytes covered by an iovec. | 102 | * Total number of bytes covered by an iovec. |
| 89 | * | 103 | * |
| @@ -110,14 +124,6 @@ static inline struct iovec iov_iter_iovec(const struct iov_iter *iter) | |||
| 110 | }; | 124 | }; |
| 111 | } | 125 | } |
| 112 | 126 | ||
| 113 | #define iov_for_each(iov, iter, start) \ | ||
| 114 | if (iov_iter_type(start) == ITER_IOVEC || \ | ||
| 115 | iov_iter_type(start) == ITER_KVEC) \ | ||
| 116 | for (iter = (start); \ | ||
| 117 | (iter).count && \ | ||
| 118 | ((iov = iov_iter_iovec(&(iter))), 1); \ | ||
| 119 | iov_iter_advance(&(iter), (iov).iov_len)) | ||
| 120 | |||
| 121 | size_t iov_iter_copy_from_user_atomic(struct page *page, | 127 | size_t iov_iter_copy_from_user_atomic(struct page *page, |
| 122 | struct iov_iter *i, unsigned long offset, size_t bytes); | 128 | struct iov_iter *i, unsigned long offset, size_t bytes); |
| 123 | void iov_iter_advance(struct iov_iter *i, size_t bytes); | 129 | void iov_iter_advance(struct iov_iter *i, size_t bytes); |
diff --git a/include/linux/umh.h b/include/linux/umh.h index 235f51b62c71..0c08de356d0d 100644 --- a/include/linux/umh.h +++ b/include/linux/umh.h | |||
| @@ -47,6 +47,8 @@ struct umh_info { | |||
| 47 | const char *cmdline; | 47 | const char *cmdline; |
| 48 | struct file *pipe_to_umh; | 48 | struct file *pipe_to_umh; |
| 49 | struct file *pipe_from_umh; | 49 | struct file *pipe_from_umh; |
| 50 | struct list_head list; | ||
| 51 | void (*cleanup)(struct umh_info *info); | ||
| 50 | pid_t pid; | 52 | pid_t pid; |
| 51 | }; | 53 | }; |
| 52 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); | 54 | int fork_usermode_blob(void *data, size_t len, struct umh_info *info); |
diff --git a/include/linux/usb.h b/include/linux/usb.h index 5e49e82c4368..ff010d1fd1c7 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h | |||
| @@ -200,7 +200,6 @@ usb_find_last_int_out_endpoint(struct usb_host_interface *alt, | |||
| 200 | * @dev: driver model's view of this device | 200 | * @dev: driver model's view of this device |
| 201 | * @usb_dev: if an interface is bound to the USB major, this will point | 201 | * @usb_dev: if an interface is bound to the USB major, this will point |
| 202 | * to the sysfs representation for that device. | 202 | * to the sysfs representation for that device. |
| 203 | * @pm_usage_cnt: PM usage counter for this interface | ||
| 204 | * @reset_ws: Used for scheduling resets from atomic context. | 203 | * @reset_ws: Used for scheduling resets from atomic context. |
| 205 | * @resetting_device: USB core reset the device, so use alt setting 0 as | 204 | * @resetting_device: USB core reset the device, so use alt setting 0 as |
| 206 | * current; needs bandwidth alloc after reset. | 205 | * current; needs bandwidth alloc after reset. |
| @@ -257,7 +256,6 @@ struct usb_interface { | |||
| 257 | 256 | ||
| 258 | struct device dev; /* interface specific device info */ | 257 | struct device dev; /* interface specific device info */ |
| 259 | struct device *usb_dev; | 258 | struct device *usb_dev; |
| 260 | atomic_t pm_usage_cnt; /* usage counter for autosuspend */ | ||
| 261 | struct work_struct reset_ws; /* for resets in atomic context */ | 259 | struct work_struct reset_ws; /* for resets in atomic context */ |
| 262 | }; | 260 | }; |
| 263 | #define to_usb_interface(d) container_of(d, struct usb_interface, dev) | 261 | #define to_usb_interface(d) container_of(d, struct usb_interface, dev) |
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h index 7dc3a411bece..695931b03684 100644 --- a/include/linux/usb/hcd.h +++ b/include/linux/usb/hcd.h | |||
| @@ -72,6 +72,12 @@ struct giveback_urb_bh { | |||
| 72 | struct usb_host_endpoint *completing_ep; | 72 | struct usb_host_endpoint *completing_ep; |
| 73 | }; | 73 | }; |
| 74 | 74 | ||
| 75 | enum usb_dev_authorize_policy { | ||
| 76 | USB_DEVICE_AUTHORIZE_NONE = 0, | ||
| 77 | USB_DEVICE_AUTHORIZE_ALL = 1, | ||
| 78 | USB_DEVICE_AUTHORIZE_INTERNAL = 2, | ||
| 79 | }; | ||
| 80 | |||
| 75 | struct usb_hcd { | 81 | struct usb_hcd { |
| 76 | 82 | ||
| 77 | /* | 83 | /* |
| @@ -117,7 +123,6 @@ struct usb_hcd { | |||
| 117 | #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ | 123 | #define HCD_FLAG_RH_RUNNING 5 /* root hub is running? */ |
| 118 | #define HCD_FLAG_DEAD 6 /* controller has died? */ | 124 | #define HCD_FLAG_DEAD 6 /* controller has died? */ |
| 119 | #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ | 125 | #define HCD_FLAG_INTF_AUTHORIZED 7 /* authorize interfaces? */ |
| 120 | #define HCD_FLAG_DEV_AUTHORIZED 8 /* authorize devices? */ | ||
| 121 | 126 | ||
| 122 | /* The flags can be tested using these macros; they are likely to | 127 | /* The flags can be tested using these macros; they are likely to |
| 123 | * be slightly faster than test_bit(). | 128 | * be slightly faster than test_bit(). |
| @@ -142,8 +147,7 @@ struct usb_hcd { | |||
| 142 | * or they require explicit user space authorization; this bit is | 147 | * or they require explicit user space authorization; this bit is |
| 143 | * settable through /sys/class/usb_host/X/authorized_default | 148 | * settable through /sys/class/usb_host/X/authorized_default |
| 144 | */ | 149 | */ |
| 145 | #define HCD_DEV_AUTHORIZED(hcd) \ | 150 | enum usb_dev_authorize_policy dev_policy; |
| 146 | ((hcd)->flags & (1U << HCD_FLAG_DEV_AUTHORIZED)) | ||
| 147 | 151 | ||
| 148 | /* Flags that get set only during HCD registration or removal. */ | 152 | /* Flags that get set only during HCD registration or removal. */ |
| 149 | unsigned rh_registered:1;/* is root hub registered? */ | 153 | unsigned rh_registered:1;/* is root hub registered? */ |
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h index edc51be4a77c..c05ffa6abda9 100644 --- a/include/linux/usb/role.h +++ b/include/linux/usb/role.h | |||
| @@ -18,6 +18,7 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); | |||
| 18 | 18 | ||
| 19 | /** | 19 | /** |
| 20 | * struct usb_role_switch_desc - USB Role Switch Descriptor | 20 | * struct usb_role_switch_desc - USB Role Switch Descriptor |
| 21 | * @fwnode: The device node to be associated with the role switch | ||
| 21 | * @usb2_port: Optional reference to the host controller port device (USB2) | 22 | * @usb2_port: Optional reference to the host controller port device (USB2) |
| 22 | * @usb3_port: Optional reference to the host controller port device (USB3) | 23 | * @usb3_port: Optional reference to the host controller port device (USB3) |
| 23 | * @udc: Optional reference to the peripheral controller device | 24 | * @udc: Optional reference to the peripheral controller device |
| @@ -32,6 +33,7 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev); | |||
| 32 | * usb_role_switch_register() before registering the switch. | 33 | * usb_role_switch_register() before registering the switch. |
| 33 | */ | 34 | */ |
| 34 | struct usb_role_switch_desc { | 35 | struct usb_role_switch_desc { |
| 36 | struct fwnode_handle *fwnode; | ||
| 35 | struct device *usb2_port; | 37 | struct device *usb2_port; |
| 36 | struct device *usb3_port; | 38 | struct device *usb3_port; |
| 37 | struct device *udc; | 39 | struct device *udc; |
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 50c74a77db55..0c532ca3f079 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h | |||
| @@ -159,12 +159,6 @@ struct tcpm_port; | |||
| 159 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); | 159 | struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc); |
| 160 | void tcpm_unregister_port(struct tcpm_port *port); | 160 | void tcpm_unregister_port(struct tcpm_port *port); |
| 161 | 161 | ||
| 162 | int tcpm_update_source_capabilities(struct tcpm_port *port, const u32 *pdo, | ||
| 163 | unsigned int nr_pdo); | ||
| 164 | int tcpm_update_sink_capabilities(struct tcpm_port *port, const u32 *pdo, | ||
| 165 | unsigned int nr_pdo, | ||
| 166 | unsigned int operating_snk_mw); | ||
| 167 | |||
| 168 | void tcpm_vbus_change(struct tcpm_port *port); | 162 | void tcpm_vbus_change(struct tcpm_port *port); |
| 169 | void tcpm_cc_change(struct tcpm_port *port); | 163 | void tcpm_cc_change(struct tcpm_port *port); |
| 170 | void tcpm_pd_receive(struct tcpm_port *port, | 164 | void tcpm_pd_receive(struct tcpm_port *port, |
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h index 55ae781d60a9..7fa12ef8d09a 100644 --- a/include/linux/usb/typec_dp.h +++ b/include/linux/usb/typec_dp.h | |||
| @@ -92,4 +92,8 @@ enum { | |||
| 92 | #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 | 92 | #define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8 |
| 93 | #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) | 93 | #define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8) |
| 94 | 94 | ||
| 95 | /* Helper for setting/getting the pin assignement value to the configuration */ | ||
| 96 | #define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8) | ||
| 97 | #define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8) | ||
| 98 | |||
| 95 | #endif /* __USB_TYPEC_DP_H */ | 99 | #endif /* __USB_TYPEC_DP_H */ |
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h index 79293f630ee1..43f40685e53c 100644 --- a/include/linux/usb/typec_mux.h +++ b/include/linux/usb/typec_mux.h | |||
| @@ -47,7 +47,8 @@ void typec_switch_put(struct typec_switch *sw); | |||
| 47 | int typec_switch_register(struct typec_switch *sw); | 47 | int typec_switch_register(struct typec_switch *sw); |
| 48 | void typec_switch_unregister(struct typec_switch *sw); | 48 | void typec_switch_unregister(struct typec_switch *sw); |
| 49 | 49 | ||
| 50 | struct typec_mux *typec_mux_get(struct device *dev, const char *name); | 50 | struct typec_mux * |
| 51 | typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc); | ||
| 51 | void typec_mux_put(struct typec_mux *mux); | 52 | void typec_mux_put(struct typec_mux *mux); |
| 52 | int typec_mux_register(struct typec_mux *mux); | 53 | int typec_mux_register(struct typec_mux *mux); |
| 53 | void typec_mux_unregister(struct typec_mux *mux); | 54 | void typec_mux_unregister(struct typec_mux *mux); |
diff --git a/include/linux/usb/wusb.h b/include/linux/usb/wusb.h index 9e4a3213f2c2..65adee629106 100644 --- a/include/linux/usb/wusb.h +++ b/include/linux/usb/wusb.h | |||
| @@ -236,22 +236,6 @@ enum { | |||
| 236 | WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ | 236 | WUSB_TRUST_TIMEOUT_MS = 4000, /* [WUSB] section 4.15.1 */ |
| 237 | }; | 237 | }; |
| 238 | 238 | ||
| 239 | static inline size_t ckhdid_printf(char *pr_ckhdid, size_t size, | ||
| 240 | const struct wusb_ckhdid *ckhdid) | ||
| 241 | { | ||
| 242 | return scnprintf(pr_ckhdid, size, | ||
| 243 | "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx " | ||
| 244 | "%02hx %02hx %02hx %02hx %02hx %02hx %02hx %02hx", | ||
| 245 | ckhdid->data[0], ckhdid->data[1], | ||
| 246 | ckhdid->data[2], ckhdid->data[3], | ||
| 247 | ckhdid->data[4], ckhdid->data[5], | ||
| 248 | ckhdid->data[6], ckhdid->data[7], | ||
| 249 | ckhdid->data[8], ckhdid->data[9], | ||
| 250 | ckhdid->data[10], ckhdid->data[11], | ||
| 251 | ckhdid->data[12], ckhdid->data[13], | ||
| 252 | ckhdid->data[14], ckhdid->data[15]); | ||
| 253 | } | ||
| 254 | |||
| 255 | /* | 239 | /* |
| 256 | * WUSB Crypto stuff (WUSB1.0[6]) | 240 | * WUSB Crypto stuff (WUSB1.0[6]) |
| 257 | */ | 241 | */ |
diff --git a/include/linux/vbox_utils.h b/include/linux/vbox_utils.h index a240ed2a0372..ff56c443180c 100644 --- a/include/linux/vbox_utils.h +++ b/include/linux/vbox_utils.h | |||
| @@ -24,15 +24,17 @@ __printf(1, 2) void vbg_debug(const char *fmt, ...); | |||
| 24 | #define vbg_debug pr_debug | 24 | #define vbg_debug pr_debug |
| 25 | #endif | 25 | #endif |
| 26 | 26 | ||
| 27 | int vbg_hgcm_connect(struct vbg_dev *gdev, | 27 | int vbg_hgcm_connect(struct vbg_dev *gdev, u32 requestor, |
| 28 | struct vmmdev_hgcm_service_location *loc, | 28 | struct vmmdev_hgcm_service_location *loc, |
| 29 | u32 *client_id, int *vbox_status); | 29 | u32 *client_id, int *vbox_status); |
| 30 | 30 | ||
| 31 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 client_id, int *vbox_status); | 31 | int vbg_hgcm_disconnect(struct vbg_dev *gdev, u32 requestor, |
| 32 | u32 client_id, int *vbox_status); | ||
| 32 | 33 | ||
| 33 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 client_id, u32 function, | 34 | int vbg_hgcm_call(struct vbg_dev *gdev, u32 requestor, u32 client_id, |
| 34 | u32 timeout_ms, struct vmmdev_hgcm_function_parameter *parms, | 35 | u32 function, u32 timeout_ms, |
| 35 | u32 parm_count, int *vbox_status); | 36 | struct vmmdev_hgcm_function_parameter *parms, u32 parm_count, |
| 37 | int *vbox_status); | ||
| 36 | 38 | ||
| 37 | /** | 39 | /** |
| 38 | * Convert a VirtualBox status code to a standard Linux kernel return value. | 40 | * Convert a VirtualBox status code to a standard Linux kernel return value. |
diff --git a/include/linux/verification.h b/include/linux/verification.h index cfa4730d607a..018fb5f13d44 100644 --- a/include/linux/verification.h +++ b/include/linux/verification.h | |||
| @@ -17,6 +17,7 @@ | |||
| 17 | * should be used. | 17 | * should be used. |
| 18 | */ | 18 | */ |
| 19 | #define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) | 19 | #define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL) |
| 20 | #define VERIFY_USE_PLATFORM_KEYRING ((struct key *)2UL) | ||
| 20 | 21 | ||
| 21 | /* | 22 | /* |
| 22 | * The use to which an asymmetric key is being put. | 23 | * The use to which an asymmetric key is being put. |
diff --git a/include/linux/vgaarb.h b/include/linux/vgaarb.h index ee162e3e879b..553b34c8b5f7 100644 --- a/include/linux/vgaarb.h +++ b/include/linux/vgaarb.h | |||
| @@ -125,9 +125,11 @@ extern void vga_put(struct pci_dev *pdev, unsigned int rsrc); | |||
| 125 | #ifdef CONFIG_VGA_ARB | 125 | #ifdef CONFIG_VGA_ARB |
| 126 | extern struct pci_dev *vga_default_device(void); | 126 | extern struct pci_dev *vga_default_device(void); |
| 127 | extern void vga_set_default_device(struct pci_dev *pdev); | 127 | extern void vga_set_default_device(struct pci_dev *pdev); |
| 128 | extern int vga_remove_vgacon(struct pci_dev *pdev); | ||
| 128 | #else | 129 | #else |
| 129 | static inline struct pci_dev *vga_default_device(void) { return NULL; }; | 130 | static inline struct pci_dev *vga_default_device(void) { return NULL; }; |
| 130 | static inline void vga_set_default_device(struct pci_dev *pdev) { }; | 131 | static inline void vga_set_default_device(struct pci_dev *pdev) { }; |
| 132 | static inline int vga_remove_vgacon(struct pci_dev *pdev) { return 0; }; | ||
| 131 | #endif | 133 | #endif |
| 132 | 134 | ||
| 133 | /* | 135 | /* |
diff --git a/include/linux/virtio.h b/include/linux/virtio.h index fa1b5da2804e..673fe3ef3607 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h | |||
| @@ -157,6 +157,8 @@ int virtio_device_freeze(struct virtio_device *dev); | |||
| 157 | int virtio_device_restore(struct virtio_device *dev); | 157 | int virtio_device_restore(struct virtio_device *dev); |
| 158 | #endif | 158 | #endif |
| 159 | 159 | ||
| 160 | size_t virtio_max_dma_size(struct virtio_device *vdev); | ||
| 161 | |||
| 160 | #define virtio_device_for_each_vq(vdev, vq) \ | 162 | #define virtio_device_for_each_vq(vdev, vq) \ |
| 161 | list_for_each_entry(vq, &vdev->vqs, list) | 163 | list_for_each_entry(vq, &vdev->vqs, list) |
| 162 | 164 | ||
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h index 32baf8e26735..bb4cc4910750 100644 --- a/include/linux/virtio_config.h +++ b/include/linux/virtio_config.h | |||
| @@ -12,6 +12,11 @@ struct irq_affinity; | |||
| 12 | 12 | ||
| 13 | /** | 13 | /** |
| 14 | * virtio_config_ops - operations for configuring a virtio device | 14 | * virtio_config_ops - operations for configuring a virtio device |
| 15 | * Note: Do not assume that a transport implements all of the operations | ||
| 16 | * getting/setting a value as a simple read/write! Generally speaking, | ||
| 17 | * any of @get/@set, @get_status/@set_status, or @get_features/ | ||
| 18 | * @finalize_features are NOT safe to be called from an atomic | ||
| 19 | * context. | ||
| 15 | * @get: read the value of a configuration field | 20 | * @get: read the value of a configuration field |
| 16 | * vdev: the virtio_device | 21 | * vdev: the virtio_device |
| 17 | * offset: the offset of the configuration field | 22 | * offset: the offset of the configuration field |
| @@ -22,7 +27,7 @@ struct irq_affinity; | |||
| 22 | * offset: the offset of the configuration field | 27 | * offset: the offset of the configuration field |
| 23 | * buf: the buffer to read the field value from. | 28 | * buf: the buffer to read the field value from. |
| 24 | * len: the length of the buffer | 29 | * len: the length of the buffer |
| 25 | * @generation: config generation counter | 30 | * @generation: config generation counter (optional) |
| 26 | * vdev: the virtio_device | 31 | * vdev: the virtio_device |
| 27 | * Returns the config generation counter | 32 | * Returns the config generation counter |
| 28 | * @get_status: read the status byte | 33 | * @get_status: read the status byte |
| @@ -48,17 +53,17 @@ struct irq_affinity; | |||
| 48 | * @del_vqs: free virtqueues found by find_vqs(). | 53 | * @del_vqs: free virtqueues found by find_vqs(). |
| 49 | * @get_features: get the array of feature bits for this device. | 54 | * @get_features: get the array of feature bits for this device. |
| 50 | * vdev: the virtio_device | 55 | * vdev: the virtio_device |
| 51 | * Returns the first 32 feature bits (all we currently need). | 56 | * Returns the first 64 feature bits (all we currently need). |
| 52 | * @finalize_features: confirm what device features we'll be using. | 57 | * @finalize_features: confirm what device features we'll be using. |
| 53 | * vdev: the virtio_device | 58 | * vdev: the virtio_device |
| 54 | * This gives the final feature bits for the device: it can change | 59 | * This gives the final feature bits for the device: it can change |
| 55 | * the dev->feature bits if it wants. | 60 | * the dev->feature bits if it wants. |
| 56 | * Returns 0 on success or error status | 61 | * Returns 0 on success or error status |
| 57 | * @bus_name: return the bus name associated with the device | 62 | * @bus_name: return the bus name associated with the device (optional) |
| 58 | * vdev: the virtio_device | 63 | * vdev: the virtio_device |
| 59 | * This returns a pointer to the bus name a la pci_name from which | 64 | * This returns a pointer to the bus name a la pci_name from which |
| 60 | * the caller can then copy. | 65 | * the caller can then copy. |
| 61 | * @set_vq_affinity: set the affinity for a virtqueue. | 66 | * @set_vq_affinity: set the affinity for a virtqueue (optional). |
| 62 | * @get_vq_affinity: get the affinity for a virtqueue (optional). | 67 | * @get_vq_affinity: get the affinity for a virtqueue (optional). |
| 63 | */ | 68 | */ |
| 64 | typedef void vq_callback_t(struct virtqueue *); | 69 | typedef void vq_callback_t(struct virtqueue *); |
| @@ -285,6 +290,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | |||
| 285 | /* Config space accessors. */ | 290 | /* Config space accessors. */ |
| 286 | #define virtio_cread(vdev, structname, member, ptr) \ | 291 | #define virtio_cread(vdev, structname, member, ptr) \ |
| 287 | do { \ | 292 | do { \ |
| 293 | might_sleep(); \ | ||
| 288 | /* Must match the member's type, and be integer */ \ | 294 | /* Must match the member's type, and be integer */ \ |
| 289 | if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ | 295 | if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ |
| 290 | (*ptr) = 1; \ | 296 | (*ptr) = 1; \ |
| @@ -314,6 +320,7 @@ static inline __virtio64 cpu_to_virtio64(struct virtio_device *vdev, u64 val) | |||
| 314 | /* Config space accessors. */ | 320 | /* Config space accessors. */ |
| 315 | #define virtio_cwrite(vdev, structname, member, ptr) \ | 321 | #define virtio_cwrite(vdev, structname, member, ptr) \ |
| 316 | do { \ | 322 | do { \ |
| 323 | might_sleep(); \ | ||
| 317 | /* Must match the member's type, and be integer */ \ | 324 | /* Must match the member's type, and be integer */ \ |
| 318 | if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ | 325 | if (!typecheck(typeof((((structname*)0)->member)), *(ptr))) \ |
| 319 | BUG_ON((*ptr) == 1); \ | 326 | BUG_ON((*ptr) == 1); \ |
| @@ -353,6 +360,7 @@ static inline void __virtio_cread_many(struct virtio_device *vdev, | |||
| 353 | vdev->config->generation(vdev) : 0; | 360 | vdev->config->generation(vdev) : 0; |
| 354 | int i; | 361 | int i; |
| 355 | 362 | ||
| 363 | might_sleep(); | ||
| 356 | do { | 364 | do { |
| 357 | old = gen; | 365 | old = gen; |
| 358 | 366 | ||
| @@ -375,6 +383,8 @@ static inline void virtio_cread_bytes(struct virtio_device *vdev, | |||
| 375 | static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) | 383 | static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) |
| 376 | { | 384 | { |
| 377 | u8 ret; | 385 | u8 ret; |
| 386 | |||
| 387 | might_sleep(); | ||
| 378 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 388 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 379 | return ret; | 389 | return ret; |
| 380 | } | 390 | } |
| @@ -382,6 +392,7 @@ static inline u8 virtio_cread8(struct virtio_device *vdev, unsigned int offset) | |||
| 382 | static inline void virtio_cwrite8(struct virtio_device *vdev, | 392 | static inline void virtio_cwrite8(struct virtio_device *vdev, |
| 383 | unsigned int offset, u8 val) | 393 | unsigned int offset, u8 val) |
| 384 | { | 394 | { |
| 395 | might_sleep(); | ||
| 385 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 396 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 386 | } | 397 | } |
| 387 | 398 | ||
| @@ -389,6 +400,8 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, | |||
| 389 | unsigned int offset) | 400 | unsigned int offset) |
| 390 | { | 401 | { |
| 391 | u16 ret; | 402 | u16 ret; |
| 403 | |||
| 404 | might_sleep(); | ||
| 392 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 405 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 393 | return virtio16_to_cpu(vdev, (__force __virtio16)ret); | 406 | return virtio16_to_cpu(vdev, (__force __virtio16)ret); |
| 394 | } | 407 | } |
| @@ -396,6 +409,7 @@ static inline u16 virtio_cread16(struct virtio_device *vdev, | |||
| 396 | static inline void virtio_cwrite16(struct virtio_device *vdev, | 409 | static inline void virtio_cwrite16(struct virtio_device *vdev, |
| 397 | unsigned int offset, u16 val) | 410 | unsigned int offset, u16 val) |
| 398 | { | 411 | { |
| 412 | might_sleep(); | ||
| 399 | val = (__force u16)cpu_to_virtio16(vdev, val); | 413 | val = (__force u16)cpu_to_virtio16(vdev, val); |
| 400 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 414 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 401 | } | 415 | } |
| @@ -404,6 +418,8 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, | |||
| 404 | unsigned int offset) | 418 | unsigned int offset) |
| 405 | { | 419 | { |
| 406 | u32 ret; | 420 | u32 ret; |
| 421 | |||
| 422 | might_sleep(); | ||
| 407 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); | 423 | vdev->config->get(vdev, offset, &ret, sizeof(ret)); |
| 408 | return virtio32_to_cpu(vdev, (__force __virtio32)ret); | 424 | return virtio32_to_cpu(vdev, (__force __virtio32)ret); |
| 409 | } | 425 | } |
| @@ -411,6 +427,7 @@ static inline u32 virtio_cread32(struct virtio_device *vdev, | |||
| 411 | static inline void virtio_cwrite32(struct virtio_device *vdev, | 427 | static inline void virtio_cwrite32(struct virtio_device *vdev, |
| 412 | unsigned int offset, u32 val) | 428 | unsigned int offset, u32 val) |
| 413 | { | 429 | { |
| 430 | might_sleep(); | ||
| 414 | val = (__force u32)cpu_to_virtio32(vdev, val); | 431 | val = (__force u32)cpu_to_virtio32(vdev, val); |
| 415 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 432 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 416 | } | 433 | } |
| @@ -426,6 +443,7 @@ static inline u64 virtio_cread64(struct virtio_device *vdev, | |||
| 426 | static inline void virtio_cwrite64(struct virtio_device *vdev, | 443 | static inline void virtio_cwrite64(struct virtio_device *vdev, |
| 427 | unsigned int offset, u64 val) | 444 | unsigned int offset, u64 val) |
| 428 | { | 445 | { |
| 446 | might_sleep(); | ||
| 429 | val = (__force u64)cpu_to_virtio64(vdev, val); | 447 | val = (__force u64)cpu_to_virtio64(vdev, val); |
| 430 | vdev->config->set(vdev, offset, &val, sizeof(val)); | 448 | vdev->config->set(vdev, offset, &val, sizeof(val)); |
| 431 | } | 449 | } |
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index cb462f9ab7dd..0d1fe9297ac6 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h | |||
| @@ -57,6 +57,25 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, | |||
| 57 | 57 | ||
| 58 | if (!skb_partial_csum_set(skb, start, off)) | 58 | if (!skb_partial_csum_set(skb, start, off)) |
| 59 | return -EINVAL; | 59 | return -EINVAL; |
| 60 | } else { | ||
| 61 | /* gso packets without NEEDS_CSUM do not set transport_offset. | ||
| 62 | * probe and drop if does not match one of the above types. | ||
| 63 | */ | ||
| 64 | if (gso_type && skb->network_header) { | ||
| 65 | if (!skb->protocol) | ||
| 66 | virtio_net_hdr_set_proto(skb, hdr); | ||
| 67 | retry: | ||
| 68 | skb_probe_transport_header(skb); | ||
| 69 | if (!skb_transport_header_was_set(skb)) { | ||
| 70 | /* UFO does not specify ipv4 or 6: try both */ | ||
| 71 | if (gso_type & SKB_GSO_UDP && | ||
| 72 | skb->protocol == htons(ETH_P_IP)) { | ||
| 73 | skb->protocol = htons(ETH_P_IPV6); | ||
| 74 | goto retry; | ||
| 75 | } | ||
| 76 | return -EINVAL; | ||
| 77 | } | ||
| 78 | } | ||
| 60 | } | 79 | } |
| 61 | 80 | ||
| 62 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { | 81 | if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) { |
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h index fab02133a919..3dc70adfe5f5 100644 --- a/include/linux/virtio_ring.h +++ b/include/linux/virtio_ring.h | |||
| @@ -63,7 +63,7 @@ struct virtqueue; | |||
| 63 | /* | 63 | /* |
| 64 | * Creates a virtqueue and allocates the descriptor ring. If | 64 | * Creates a virtqueue and allocates the descriptor ring. If |
| 65 | * may_reduce_num is set, then this may allocate a smaller ring than | 65 | * may_reduce_num is set, then this may allocate a smaller ring than |
| 66 | * expected. The caller should query virtqueue_get_ring_size to learn | 66 | * expected. The caller should query virtqueue_get_vring_size to learn |
| 67 | * the actual size of the ring. | 67 | * the actual size of the ring. |
| 68 | */ | 68 | */ |
| 69 | struct virtqueue *vring_create_virtqueue(unsigned int index, | 69 | struct virtqueue *vring_create_virtqueue(unsigned int index, |
diff --git a/include/linux/vmw_vmci_defs.h b/include/linux/vmw_vmci_defs.h index b724ef7005de..eaa1e762bf06 100644 --- a/include/linux/vmw_vmci_defs.h +++ b/include/linux/vmw_vmci_defs.h | |||
| @@ -45,6 +45,7 @@ | |||
| 45 | #define VMCI_CAPS_GUESTCALL 0x2 | 45 | #define VMCI_CAPS_GUESTCALL 0x2 |
| 46 | #define VMCI_CAPS_DATAGRAM 0x4 | 46 | #define VMCI_CAPS_DATAGRAM 0x4 |
| 47 | #define VMCI_CAPS_NOTIFICATIONS 0x8 | 47 | #define VMCI_CAPS_NOTIFICATIONS 0x8 |
| 48 | #define VMCI_CAPS_PPN64 0x10 | ||
| 48 | 49 | ||
| 49 | /* Interrupt Cause register bits. */ | 50 | /* Interrupt Cause register bits. */ |
| 50 | #define VMCI_ICR_DATAGRAM 0x1 | 51 | #define VMCI_ICR_DATAGRAM 0x1 |
| @@ -569,8 +570,10 @@ struct vmci_resource_query_msg { | |||
| 569 | */ | 570 | */ |
| 570 | struct vmci_notify_bm_set_msg { | 571 | struct vmci_notify_bm_set_msg { |
| 571 | struct vmci_datagram hdr; | 572 | struct vmci_datagram hdr; |
| 572 | u32 bitmap_ppn; | 573 | union { |
| 573 | u32 _pad; | 574 | u32 bitmap_ppn32; |
| 575 | u64 bitmap_ppn64; | ||
| 576 | }; | ||
| 574 | }; | 577 | }; |
| 575 | 578 | ||
| 576 | /* | 579 | /* |
diff --git a/include/linux/wait.h b/include/linux/wait.h index ed7c122cb31f..5f3efabc36f4 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h | |||
| @@ -308,7 +308,7 @@ do { \ | |||
| 308 | 308 | ||
| 309 | #define __wait_event_freezable(wq_head, condition) \ | 309 | #define __wait_event_freezable(wq_head, condition) \ |
| 310 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ | 310 | ___wait_event(wq_head, condition, TASK_INTERRUPTIBLE, 0, 0, \ |
| 311 | schedule(); try_to_freeze()) | 311 | freezable_schedule()) |
| 312 | 312 | ||
| 313 | /** | 313 | /** |
| 314 | * wait_event_freezable - sleep (or freeze) until a condition gets true | 314 | * wait_event_freezable - sleep (or freeze) until a condition gets true |
| @@ -367,7 +367,7 @@ do { \ | |||
| 367 | #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ | 367 | #define __wait_event_freezable_timeout(wq_head, condition, timeout) \ |
| 368 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ | 368 | ___wait_event(wq_head, ___wait_cond_timeout(condition), \ |
| 369 | TASK_INTERRUPTIBLE, 0, timeout, \ | 369 | TASK_INTERRUPTIBLE, 0, timeout, \ |
| 370 | __ret = schedule_timeout(__ret); try_to_freeze()) | 370 | __ret = freezable_schedule_timeout(__ret)) |
| 371 | 371 | ||
| 372 | /* | 372 | /* |
| 373 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid | 373 | * like wait_event_timeout() -- except it uses TASK_INTERRUPTIBLE to avoid |
| @@ -588,7 +588,7 @@ do { \ | |||
| 588 | 588 | ||
| 589 | #define __wait_event_freezable_exclusive(wq, condition) \ | 589 | #define __wait_event_freezable_exclusive(wq, condition) \ |
| 590 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ | 590 | ___wait_event(wq, condition, TASK_INTERRUPTIBLE, 1, 0, \ |
| 591 | schedule(); try_to_freeze()) | 591 | freezable_schedule()) |
| 592 | 592 | ||
| 593 | #define wait_event_freezable_exclusive(wq, condition) \ | 593 | #define wait_event_freezable_exclusive(wq, condition) \ |
| 594 | ({ \ | 594 | ({ \ |
diff --git a/include/linux/wmi.h b/include/linux/wmi.h index 4757cb5077e5..592f81afecbb 100644 --- a/include/linux/wmi.h +++ b/include/linux/wmi.h | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | 18 | ||
| 19 | #include <linux/device.h> | 19 | #include <linux/device.h> |
| 20 | #include <linux/acpi.h> | 20 | #include <linux/acpi.h> |
| 21 | #include <linux/mod_devicetable.h> | ||
| 21 | #include <uapi/linux/wmi.h> | 22 | #include <uapi/linux/wmi.h> |
| 22 | 23 | ||
| 23 | struct wmi_device { | 24 | struct wmi_device { |
| @@ -39,10 +40,6 @@ extern union acpi_object *wmidev_block_query(struct wmi_device *wdev, | |||
| 39 | 40 | ||
| 40 | extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); | 41 | extern int set_required_buffer_size(struct wmi_device *wdev, u64 length); |
| 41 | 42 | ||
| 42 | struct wmi_device_id { | ||
| 43 | const char *guid_string; | ||
| 44 | }; | ||
| 45 | |||
| 46 | struct wmi_driver { | 43 | struct wmi_driver { |
| 47 | struct device_driver driver; | 44 | struct device_driver driver; |
| 48 | const struct wmi_device_id *id_table; | 45 | const struct wmi_device_id *id_table; |
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h index 60d673e15632..d59525fca4d3 100644 --- a/include/linux/workqueue.h +++ b/include/linux/workqueue.h | |||
| @@ -390,43 +390,23 @@ extern struct workqueue_struct *system_freezable_wq; | |||
| 390 | extern struct workqueue_struct *system_power_efficient_wq; | 390 | extern struct workqueue_struct *system_power_efficient_wq; |
| 391 | extern struct workqueue_struct *system_freezable_power_efficient_wq; | 391 | extern struct workqueue_struct *system_freezable_power_efficient_wq; |
| 392 | 392 | ||
| 393 | extern struct workqueue_struct * | ||
| 394 | __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, | ||
| 395 | struct lock_class_key *key, const char *lock_name, ...) __printf(1, 6); | ||
| 396 | |||
| 397 | /** | 393 | /** |
| 398 | * alloc_workqueue - allocate a workqueue | 394 | * alloc_workqueue - allocate a workqueue |
| 399 | * @fmt: printf format for the name of the workqueue | 395 | * @fmt: printf format for the name of the workqueue |
| 400 | * @flags: WQ_* flags | 396 | * @flags: WQ_* flags |
| 401 | * @max_active: max in-flight work items, 0 for default | 397 | * @max_active: max in-flight work items, 0 for default |
| 402 | * @args...: args for @fmt | 398 | * remaining args: args for @fmt |
| 403 | * | 399 | * |
| 404 | * Allocate a workqueue with the specified parameters. For detailed | 400 | * Allocate a workqueue with the specified parameters. For detailed |
| 405 | * information on WQ_* flags, please refer to | 401 | * information on WQ_* flags, please refer to |
| 406 | * Documentation/core-api/workqueue.rst. | 402 | * Documentation/core-api/workqueue.rst. |
| 407 | * | 403 | * |
| 408 | * The __lock_name macro dance is to guarantee that single lock_class_key | ||
| 409 | * doesn't end up with different namesm, which isn't allowed by lockdep. | ||
| 410 | * | ||
| 411 | * RETURNS: | 404 | * RETURNS: |
| 412 | * Pointer to the allocated workqueue on success, %NULL on failure. | 405 | * Pointer to the allocated workqueue on success, %NULL on failure. |
| 413 | */ | 406 | */ |
| 414 | #ifdef CONFIG_LOCKDEP | 407 | struct workqueue_struct *alloc_workqueue(const char *fmt, |
| 415 | #define alloc_workqueue(fmt, flags, max_active, args...) \ | 408 | unsigned int flags, |
| 416 | ({ \ | 409 | int max_active, ...); |
| 417 | static struct lock_class_key __key; \ | ||
| 418 | const char *__lock_name; \ | ||
| 419 | \ | ||
| 420 | __lock_name = "(wq_completion)"#fmt#args; \ | ||
| 421 | \ | ||
| 422 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | ||
| 423 | &__key, __lock_name, ##args); \ | ||
| 424 | }) | ||
| 425 | #else | ||
| 426 | #define alloc_workqueue(fmt, flags, max_active, args...) \ | ||
| 427 | __alloc_workqueue_key((fmt), (flags), (max_active), \ | ||
| 428 | NULL, NULL, ##args) | ||
| 429 | #endif | ||
| 430 | 410 | ||
| 431 | /** | 411 | /** |
| 432 | * alloc_ordered_workqueue - allocate an ordered workqueue | 412 | * alloc_ordered_workqueue - allocate an ordered workqueue |
| @@ -463,6 +443,8 @@ int workqueue_set_unbound_cpumask(cpumask_var_t cpumask); | |||
| 463 | 443 | ||
| 464 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, | 444 | extern bool queue_work_on(int cpu, struct workqueue_struct *wq, |
| 465 | struct work_struct *work); | 445 | struct work_struct *work); |
| 446 | extern bool queue_work_node(int node, struct workqueue_struct *wq, | ||
| 447 | struct work_struct *work); | ||
| 466 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | 448 | extern bool queue_delayed_work_on(int cpu, struct workqueue_struct *wq, |
| 467 | struct delayed_work *work, unsigned long delay); | 449 | struct delayed_work *work, unsigned long delay); |
| 468 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, | 450 | extern bool mod_delayed_work_on(int cpu, struct workqueue_struct *wq, |
diff --git a/include/linux/xarray.h b/include/linux/xarray.h index f492e21c4aa2..0e01e6129145 100644 --- a/include/linux/xarray.h +++ b/include/linux/xarray.h | |||
| @@ -131,6 +131,12 @@ static inline unsigned int xa_pointer_tag(void *entry) | |||
| 131 | * xa_mk_internal() - Create an internal entry. | 131 | * xa_mk_internal() - Create an internal entry. |
| 132 | * @v: Value to turn into an internal entry. | 132 | * @v: Value to turn into an internal entry. |
| 133 | * | 133 | * |
| 134 | * Internal entries are used for a number of purposes. Entries 0-255 are | ||
| 135 | * used for sibling entries (only 0-62 are used by the current code). 256 | ||
| 136 | * is used for the retry entry. 257 is used for the reserved / zero entry. | ||
| 137 | * Negative internal entries are used to represent errnos. Node pointers | ||
| 138 | * are also tagged as internal entries in some situations. | ||
| 139 | * | ||
| 134 | * Context: Any context. | 140 | * Context: Any context. |
| 135 | * Return: An XArray internal entry corresponding to this value. | 141 | * Return: An XArray internal entry corresponding to this value. |
| 136 | */ | 142 | */ |
| @@ -163,6 +169,22 @@ static inline bool xa_is_internal(const void *entry) | |||
| 163 | return ((unsigned long)entry & 3) == 2; | 169 | return ((unsigned long)entry & 3) == 2; |
| 164 | } | 170 | } |
| 165 | 171 | ||
| 172 | #define XA_ZERO_ENTRY xa_mk_internal(257) | ||
| 173 | |||
| 174 | /** | ||
| 175 | * xa_is_zero() - Is the entry a zero entry? | ||
| 176 | * @entry: Entry retrieved from the XArray | ||
| 177 | * | ||
| 178 | * The normal API will return NULL as the contents of a slot containing | ||
| 179 | * a zero entry. You can only see zero entries by using the advanced API. | ||
| 180 | * | ||
| 181 | * Return: %true if the entry is a zero entry. | ||
| 182 | */ | ||
| 183 | static inline bool xa_is_zero(const void *entry) | ||
| 184 | { | ||
| 185 | return unlikely(entry == XA_ZERO_ENTRY); | ||
| 186 | } | ||
| 187 | |||
| 166 | /** | 188 | /** |
| 167 | * xa_is_err() - Report whether an XArray operation returned an error | 189 | * xa_is_err() - Report whether an XArray operation returned an error |
| 168 | * @entry: Result from calling an XArray function | 190 | * @entry: Result from calling an XArray function |
| @@ -176,7 +198,8 @@ static inline bool xa_is_internal(const void *entry) | |||
| 176 | */ | 198 | */ |
| 177 | static inline bool xa_is_err(const void *entry) | 199 | static inline bool xa_is_err(const void *entry) |
| 178 | { | 200 | { |
| 179 | return unlikely(xa_is_internal(entry)); | 201 | return unlikely(xa_is_internal(entry) && |
| 202 | entry >= xa_mk_internal(-MAX_ERRNO)); | ||
| 180 | } | 203 | } |
| 181 | 204 | ||
| 182 | /** | 205 | /** |
| @@ -199,6 +222,27 @@ static inline int xa_err(void *entry) | |||
| 199 | return 0; | 222 | return 0; |
| 200 | } | 223 | } |
| 201 | 224 | ||
| 225 | /** | ||
| 226 | * struct xa_limit - Represents a range of IDs. | ||
| 227 | * @min: The lowest ID to allocate (inclusive). | ||
| 228 | * @max: The maximum ID to allocate (inclusive). | ||
| 229 | * | ||
| 230 | * This structure is used either directly or via the XA_LIMIT() macro | ||
| 231 | * to communicate the range of IDs that are valid for allocation. | ||
| 232 | * Two common ranges are predefined for you: | ||
| 233 | * * xa_limit_32b - [0 - UINT_MAX] | ||
| 234 | * * xa_limit_31b - [0 - INT_MAX] | ||
| 235 | */ | ||
| 236 | struct xa_limit { | ||
| 237 | u32 max; | ||
| 238 | u32 min; | ||
| 239 | }; | ||
| 240 | |||
| 241 | #define XA_LIMIT(_min, _max) (struct xa_limit) { .min = _min, .max = _max } | ||
| 242 | |||
| 243 | #define xa_limit_32b XA_LIMIT(0, UINT_MAX) | ||
| 244 | #define xa_limit_31b XA_LIMIT(0, INT_MAX) | ||
| 245 | |||
| 202 | typedef unsigned __bitwise xa_mark_t; | 246 | typedef unsigned __bitwise xa_mark_t; |
| 203 | #define XA_MARK_0 ((__force xa_mark_t)0U) | 247 | #define XA_MARK_0 ((__force xa_mark_t)0U) |
| 204 | #define XA_MARK_1 ((__force xa_mark_t)1U) | 248 | #define XA_MARK_1 ((__force xa_mark_t)1U) |
| @@ -219,10 +263,14 @@ enum xa_lock_type { | |||
| 219 | #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) | 263 | #define XA_FLAGS_LOCK_IRQ ((__force gfp_t)XA_LOCK_IRQ) |
| 220 | #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) | 264 | #define XA_FLAGS_LOCK_BH ((__force gfp_t)XA_LOCK_BH) |
| 221 | #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) | 265 | #define XA_FLAGS_TRACK_FREE ((__force gfp_t)4U) |
| 266 | #define XA_FLAGS_ZERO_BUSY ((__force gfp_t)8U) | ||
| 267 | #define XA_FLAGS_ALLOC_WRAPPED ((__force gfp_t)16U) | ||
| 222 | #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ | 268 | #define XA_FLAGS_MARK(mark) ((__force gfp_t)((1U << __GFP_BITS_SHIFT) << \ |
| 223 | (__force unsigned)(mark))) | 269 | (__force unsigned)(mark))) |
| 224 | 270 | ||
| 271 | /* ALLOC is for a normal 0-based alloc. ALLOC1 is for an 1-based alloc */ | ||
| 225 | #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) | 272 | #define XA_FLAGS_ALLOC (XA_FLAGS_TRACK_FREE | XA_FLAGS_MARK(XA_FREE_MARK)) |
| 273 | #define XA_FLAGS_ALLOC1 (XA_FLAGS_TRACK_FREE | XA_FLAGS_ZERO_BUSY) | ||
| 226 | 274 | ||
| 227 | /** | 275 | /** |
| 228 | * struct xarray - The anchor of the XArray. | 276 | * struct xarray - The anchor of the XArray. |
| @@ -278,7 +326,7 @@ struct xarray { | |||
| 278 | #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) | 326 | #define DEFINE_XARRAY(name) DEFINE_XARRAY_FLAGS(name, 0) |
| 279 | 327 | ||
| 280 | /** | 328 | /** |
| 281 | * DEFINE_XARRAY_ALLOC() - Define an XArray which can allocate IDs. | 329 | * DEFINE_XARRAY_ALLOC() - Define an XArray which allocates IDs starting at 0. |
| 282 | * @name: A string that names your XArray. | 330 | * @name: A string that names your XArray. |
| 283 | * | 331 | * |
| 284 | * This is intended for file scope definitions of allocating XArrays. | 332 | * This is intended for file scope definitions of allocating XArrays. |
| @@ -286,7 +334,15 @@ struct xarray { | |||
| 286 | */ | 334 | */ |
| 287 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) | 335 | #define DEFINE_XARRAY_ALLOC(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC) |
| 288 | 336 | ||
| 289 | void xa_init_flags(struct xarray *, gfp_t flags); | 337 | /** |
| 338 | * DEFINE_XARRAY_ALLOC1() - Define an XArray which allocates IDs starting at 1. | ||
| 339 | * @name: A string that names your XArray. | ||
| 340 | * | ||
| 341 | * This is intended for file scope definitions of allocating XArrays. | ||
| 342 | * See also DEFINE_XARRAY(). | ||
| 343 | */ | ||
| 344 | #define DEFINE_XARRAY_ALLOC1(name) DEFINE_XARRAY_FLAGS(name, XA_FLAGS_ALLOC1) | ||
| 345 | |||
| 290 | void *xa_load(struct xarray *, unsigned long index); | 346 | void *xa_load(struct xarray *, unsigned long index); |
| 291 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 347 | void *xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
| 292 | void *xa_erase(struct xarray *, unsigned long index); | 348 | void *xa_erase(struct xarray *, unsigned long index); |
| @@ -304,6 +360,24 @@ unsigned int xa_extract(struct xarray *, void **dst, unsigned long start, | |||
| 304 | void xa_destroy(struct xarray *); | 360 | void xa_destroy(struct xarray *); |
| 305 | 361 | ||
| 306 | /** | 362 | /** |
| 363 | * xa_init_flags() - Initialise an empty XArray with flags. | ||
| 364 | * @xa: XArray. | ||
| 365 | * @flags: XA_FLAG values. | ||
| 366 | * | ||
| 367 | * If you need to initialise an XArray with special flags (eg you need | ||
| 368 | * to take the lock from interrupt context), use this function instead | ||
| 369 | * of xa_init(). | ||
| 370 | * | ||
| 371 | * Context: Any context. | ||
| 372 | */ | ||
| 373 | static inline void xa_init_flags(struct xarray *xa, gfp_t flags) | ||
| 374 | { | ||
| 375 | spin_lock_init(&xa->xa_lock); | ||
| 376 | xa->xa_flags = flags; | ||
| 377 | xa->xa_head = NULL; | ||
| 378 | } | ||
| 379 | |||
| 380 | /** | ||
| 307 | * xa_init() - Initialise an empty XArray. | 381 | * xa_init() - Initialise an empty XArray. |
| 308 | * @xa: XArray. | 382 | * @xa: XArray. |
| 309 | * | 383 | * |
| @@ -342,20 +416,45 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
| 342 | } | 416 | } |
| 343 | 417 | ||
| 344 | /** | 418 | /** |
| 345 | * xa_for_each() - Iterate over a portion of an XArray. | 419 | * xa_for_each_start() - Iterate over a portion of an XArray. |
| 346 | * @xa: XArray. | 420 | * @xa: XArray. |
| 421 | * @index: Index of @entry. | ||
| 347 | * @entry: Entry retrieved from array. | 422 | * @entry: Entry retrieved from array. |
| 423 | * @start: First index to retrieve from array. | ||
| 424 | * | ||
| 425 | * During the iteration, @entry will have the value of the entry stored | ||
| 426 | * in @xa at @index. You may modify @index during the iteration if you | ||
| 427 | * want to skip or reprocess indices. It is safe to modify the array | ||
| 428 | * during the iteration. At the end of the iteration, @entry will be set | ||
| 429 | * to NULL and @index will have a value less than or equal to max. | ||
| 430 | * | ||
| 431 | * xa_for_each_start() is O(n.log(n)) while xas_for_each() is O(n). You have | ||
| 432 | * to handle your own locking with xas_for_each(), and if you have to unlock | ||
| 433 | * after each iteration, it will also end up being O(n.log(n)). | ||
| 434 | * xa_for_each_start() will spin if it hits a retry entry; if you intend to | ||
| 435 | * see retry entries, you should use the xas_for_each() iterator instead. | ||
| 436 | * The xas_for_each() iterator will expand into more inline code than | ||
| 437 | * xa_for_each_start(). | ||
| 438 | * | ||
| 439 | * Context: Any context. Takes and releases the RCU lock. | ||
| 440 | */ | ||
| 441 | #define xa_for_each_start(xa, index, entry, start) \ | ||
| 442 | for (index = start, \ | ||
| 443 | entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \ | ||
| 444 | entry; \ | ||
| 445 | entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)) | ||
| 446 | |||
| 447 | /** | ||
| 448 | * xa_for_each() - Iterate over present entries in an XArray. | ||
| 449 | * @xa: XArray. | ||
| 348 | * @index: Index of @entry. | 450 | * @index: Index of @entry. |
| 349 | * @max: Maximum index to retrieve from array. | 451 | * @entry: Entry retrieved from array. |
| 350 | * @filter: Selection criterion. | ||
| 351 | * | 452 | * |
| 352 | * Initialise @index to the lowest index you want to retrieve from the | 453 | * During the iteration, @entry will have the value of the entry stored |
| 353 | * array. During the iteration, @entry will have the value of the entry | 454 | * in @xa at @index. You may modify @index during the iteration if you want |
| 354 | * stored in @xa at @index. The iteration will skip all entries in the | 455 | * to skip or reprocess indices. It is safe to modify the array during the |
| 355 | * array which do not match @filter. You may modify @index during the | 456 | * iteration. At the end of the iteration, @entry will be set to NULL and |
| 356 | * iteration if you want to skip or reprocess indices. It is safe to modify | 457 | * @index will have a value less than or equal to max. |
| 357 | * the array during the iteration. At the end of the iteration, @entry will | ||
| 358 | * be set to NULL and @index will have a value less than or equal to max. | ||
| 359 | * | 458 | * |
| 360 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have | 459 | * xa_for_each() is O(n.log(n)) while xas_for_each() is O(n). You have |
| 361 | * to handle your own locking with xas_for_each(), and if you have to unlock | 460 | * to handle your own locking with xas_for_each(), and if you have to unlock |
| @@ -366,9 +465,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark) | |||
| 366 | * | 465 | * |
| 367 | * Context: Any context. Takes and releases the RCU lock. | 466 | * Context: Any context. Takes and releases the RCU lock. |
| 368 | */ | 467 | */ |
| 369 | #define xa_for_each(xa, entry, index, max, filter) \ | 468 | #define xa_for_each(xa, index, entry) \ |
| 370 | for (entry = xa_find(xa, &index, max, filter); entry; \ | 469 | xa_for_each_start(xa, index, entry, 0) |
| 371 | entry = xa_find_after(xa, &index, max, filter)) | 470 | |
| 471 | /** | ||
| 472 | * xa_for_each_marked() - Iterate over marked entries in an XArray. | ||
| 473 | * @xa: XArray. | ||
| 474 | * @index: Index of @entry. | ||
| 475 | * @entry: Entry retrieved from array. | ||
| 476 | * @filter: Selection criterion. | ||
| 477 | * | ||
| 478 | * During the iteration, @entry will have the value of the entry stored | ||
| 479 | * in @xa at @index. The iteration will skip all entries in the array | ||
| 480 | * which do not match @filter. You may modify @index during the iteration | ||
| 481 | * if you want to skip or reprocess indices. It is safe to modify the array | ||
| 482 | * during the iteration. At the end of the iteration, @entry will be set to | ||
| 483 | * NULL and @index will have a value less than or equal to max. | ||
| 484 | * | ||
| 485 | * xa_for_each_marked() is O(n.log(n)) while xas_for_each_marked() is O(n). | ||
| 486 | * You have to handle your own locking with xas_for_each(), and if you have | ||
| 487 | * to unlock after each iteration, it will also end up being O(n.log(n)). | ||
| 488 | * xa_for_each_marked() will spin if it hits a retry entry; if you intend to | ||
| 489 | * see retry entries, you should use the xas_for_each_marked() iterator | ||
| 490 | * instead. The xas_for_each_marked() iterator will expand into more inline | ||
| 491 | * code than xa_for_each_marked(). | ||
| 492 | * | ||
| 493 | * Context: Any context. Takes and releases the RCU lock. | ||
| 494 | */ | ||
| 495 | #define xa_for_each_marked(xa, index, entry, filter) \ | ||
| 496 | for (index = 0, entry = xa_find(xa, &index, ULONG_MAX, filter); \ | ||
| 497 | entry; entry = xa_find_after(xa, &index, ULONG_MAX, filter)) | ||
| 372 | 498 | ||
| 373 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) | 499 | #define xa_trylock(xa) spin_trylock(&(xa)->xa_lock) |
| 374 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) | 500 | #define xa_lock(xa) spin_lock(&(xa)->xa_lock) |
| @@ -393,40 +519,16 @@ void *__xa_erase(struct xarray *, unsigned long index); | |||
| 393 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); | 519 | void *__xa_store(struct xarray *, unsigned long index, void *entry, gfp_t); |
| 394 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, | 520 | void *__xa_cmpxchg(struct xarray *, unsigned long index, void *old, |
| 395 | void *entry, gfp_t); | 521 | void *entry, gfp_t); |
| 396 | int __xa_alloc(struct xarray *, u32 *id, u32 max, void *entry, gfp_t); | 522 | int __must_check __xa_insert(struct xarray *, unsigned long index, |
| 397 | int __xa_reserve(struct xarray *, unsigned long index, gfp_t); | 523 | void *entry, gfp_t); |
| 524 | int __must_check __xa_alloc(struct xarray *, u32 *id, void *entry, | ||
| 525 | struct xa_limit, gfp_t); | ||
| 526 | int __must_check __xa_alloc_cyclic(struct xarray *, u32 *id, void *entry, | ||
| 527 | struct xa_limit, u32 *next, gfp_t); | ||
| 398 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); | 528 | void __xa_set_mark(struct xarray *, unsigned long index, xa_mark_t); |
| 399 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); | 529 | void __xa_clear_mark(struct xarray *, unsigned long index, xa_mark_t); |
| 400 | 530 | ||
| 401 | /** | 531 | /** |
| 402 | * __xa_insert() - Store this entry in the XArray unless another entry is | ||
| 403 | * already present. | ||
| 404 | * @xa: XArray. | ||
| 405 | * @index: Index into array. | ||
| 406 | * @entry: New entry. | ||
| 407 | * @gfp: Memory allocation flags. | ||
| 408 | * | ||
| 409 | * If you would rather see the existing entry in the array, use __xa_cmpxchg(). | ||
| 410 | * This function is for users who don't care what the entry is, only that | ||
| 411 | * one is present. | ||
| 412 | * | ||
| 413 | * Context: Any context. Expects xa_lock to be held on entry. May | ||
| 414 | * release and reacquire xa_lock if the @gfp flags permit. | ||
| 415 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | ||
| 416 | * -ENOMEM if memory could not be allocated. | ||
| 417 | */ | ||
| 418 | static inline int __xa_insert(struct xarray *xa, unsigned long index, | ||
| 419 | void *entry, gfp_t gfp) | ||
| 420 | { | ||
| 421 | void *curr = __xa_cmpxchg(xa, index, NULL, entry, gfp); | ||
| 422 | if (!curr) | ||
| 423 | return 0; | ||
| 424 | if (xa_is_err(curr)) | ||
| 425 | return xa_err(curr); | ||
| 426 | return -EEXIST; | ||
| 427 | } | ||
| 428 | |||
| 429 | /** | ||
| 430 | * xa_store_bh() - Store this entry in the XArray. | 532 | * xa_store_bh() - Store this entry in the XArray. |
| 431 | * @xa: XArray. | 533 | * @xa: XArray. |
| 432 | * @index: Index into array. | 534 | * @index: Index into array. |
| @@ -453,7 +555,7 @@ static inline void *xa_store_bh(struct xarray *xa, unsigned long index, | |||
| 453 | } | 555 | } |
| 454 | 556 | ||
| 455 | /** | 557 | /** |
| 456 | * xa_store_irq() - Erase this entry from the XArray. | 558 | * xa_store_irq() - Store this entry in the XArray. |
| 457 | * @xa: XArray. | 559 | * @xa: XArray. |
| 458 | * @index: Index into array. | 560 | * @index: Index into array. |
| 459 | * @entry: New entry. | 561 | * @entry: New entry. |
| @@ -483,9 +585,9 @@ static inline void *xa_store_irq(struct xarray *xa, unsigned long index, | |||
| 483 | * @xa: XArray. | 585 | * @xa: XArray. |
| 484 | * @index: Index of entry. | 586 | * @index: Index of entry. |
| 485 | * | 587 | * |
| 486 | * This function is the equivalent of calling xa_store() with %NULL as | 588 | * After this function returns, loading from @index will return %NULL. |
| 487 | * the third argument. The XArray does not need to allocate memory, so | 589 | * If the index is part of a multi-index entry, all indices will be erased |
| 488 | * the user does not need to provide GFP flags. | 590 | * and none of the entries will be part of a multi-index entry. |
| 489 | * | 591 | * |
| 490 | * Context: Any context. Takes and releases the xa_lock while | 592 | * Context: Any context. Takes and releases the xa_lock while |
| 491 | * disabling softirqs. | 593 | * disabling softirqs. |
| @@ -507,9 +609,9 @@ static inline void *xa_erase_bh(struct xarray *xa, unsigned long index) | |||
| 507 | * @xa: XArray. | 609 | * @xa: XArray. |
| 508 | * @index: Index of entry. | 610 | * @index: Index of entry. |
| 509 | * | 611 | * |
| 510 | * This function is the equivalent of calling xa_store() with %NULL as | 612 | * After this function returns, loading from @index will return %NULL. |
| 511 | * the third argument. The XArray does not need to allocate memory, so | 613 | * If the index is part of a multi-index entry, all indices will be erased |
| 512 | * the user does not need to provide GFP flags. | 614 | * and none of the entries will be part of a multi-index entry. |
| 513 | * | 615 | * |
| 514 | * Context: Process context. Takes and releases the xa_lock while | 616 | * Context: Process context. Takes and releases the xa_lock while |
| 515 | * disabling interrupts. | 617 | * disabling interrupts. |
| @@ -615,50 +717,109 @@ static inline void *xa_cmpxchg_irq(struct xarray *xa, unsigned long index, | |||
| 615 | * @entry: New entry. | 717 | * @entry: New entry. |
| 616 | * @gfp: Memory allocation flags. | 718 | * @gfp: Memory allocation flags. |
| 617 | * | 719 | * |
| 618 | * If you would rather see the existing entry in the array, use xa_cmpxchg(). | 720 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) |
| 619 | * This function is for users who don't care what the entry is, only that | 721 | * if no entry is present. Inserting will fail if a reserved entry is |
| 620 | * one is present. | 722 | * present, even though loading from this index will return NULL. |
| 621 | * | 723 | * |
| 622 | * Context: Process context. Takes and releases the xa_lock. | 724 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
| 623 | * May sleep if the @gfp flags permit. | 725 | * the @gfp flags permit. |
| 624 | * Return: 0 if the store succeeded. -EEXIST if another entry was present. | 726 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. |
| 625 | * -ENOMEM if memory could not be allocated. | 727 | * -ENOMEM if memory could not be allocated. |
| 626 | */ | 728 | */ |
| 627 | static inline int xa_insert(struct xarray *xa, unsigned long index, | 729 | static inline int __must_check xa_insert(struct xarray *xa, |
| 628 | void *entry, gfp_t gfp) | 730 | unsigned long index, void *entry, gfp_t gfp) |
| 629 | { | 731 | { |
| 630 | void *curr = xa_cmpxchg(xa, index, NULL, entry, gfp); | 732 | int err; |
| 631 | if (!curr) | 733 | |
| 632 | return 0; | 734 | xa_lock(xa); |
| 633 | if (xa_is_err(curr)) | 735 | err = __xa_insert(xa, index, entry, gfp); |
| 634 | return xa_err(curr); | 736 | xa_unlock(xa); |
| 635 | return -EEXIST; | 737 | |
| 738 | return err; | ||
| 739 | } | ||
| 740 | |||
| 741 | /** | ||
| 742 | * xa_insert_bh() - Store this entry in the XArray unless another entry is | ||
| 743 | * already present. | ||
| 744 | * @xa: XArray. | ||
| 745 | * @index: Index into array. | ||
| 746 | * @entry: New entry. | ||
| 747 | * @gfp: Memory allocation flags. | ||
| 748 | * | ||
| 749 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
| 750 | * if no entry is present. Inserting will fail if a reserved entry is | ||
| 751 | * present, even though loading from this index will return NULL. | ||
| 752 | * | ||
| 753 | * Context: Any context. Takes and releases the xa_lock while | ||
| 754 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
| 755 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. | ||
| 756 | * -ENOMEM if memory could not be allocated. | ||
| 757 | */ | ||
| 758 | static inline int __must_check xa_insert_bh(struct xarray *xa, | ||
| 759 | unsigned long index, void *entry, gfp_t gfp) | ||
| 760 | { | ||
| 761 | int err; | ||
| 762 | |||
| 763 | xa_lock_bh(xa); | ||
| 764 | err = __xa_insert(xa, index, entry, gfp); | ||
| 765 | xa_unlock_bh(xa); | ||
| 766 | |||
| 767 | return err; | ||
| 768 | } | ||
| 769 | |||
| 770 | /** | ||
| 771 | * xa_insert_irq() - Store this entry in the XArray unless another entry is | ||
| 772 | * already present. | ||
| 773 | * @xa: XArray. | ||
| 774 | * @index: Index into array. | ||
| 775 | * @entry: New entry. | ||
| 776 | * @gfp: Memory allocation flags. | ||
| 777 | * | ||
| 778 | * Inserting a NULL entry will store a reserved entry (like xa_reserve()) | ||
| 779 | * if no entry is present. Inserting will fail if a reserved entry is | ||
| 780 | * present, even though loading from this index will return NULL. | ||
| 781 | * | ||
| 782 | * Context: Process context. Takes and releases the xa_lock while | ||
| 783 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
| 784 | * Return: 0 if the store succeeded. -EBUSY if another entry was present. | ||
| 785 | * -ENOMEM if memory could not be allocated. | ||
| 786 | */ | ||
| 787 | static inline int __must_check xa_insert_irq(struct xarray *xa, | ||
| 788 | unsigned long index, void *entry, gfp_t gfp) | ||
| 789 | { | ||
| 790 | int err; | ||
| 791 | |||
| 792 | xa_lock_irq(xa); | ||
| 793 | err = __xa_insert(xa, index, entry, gfp); | ||
| 794 | xa_unlock_irq(xa); | ||
| 795 | |||
| 796 | return err; | ||
| 636 | } | 797 | } |
| 637 | 798 | ||
| 638 | /** | 799 | /** |
| 639 | * xa_alloc() - Find somewhere to store this entry in the XArray. | 800 | * xa_alloc() - Find somewhere to store this entry in the XArray. |
| 640 | * @xa: XArray. | 801 | * @xa: XArray. |
| 641 | * @id: Pointer to ID. | 802 | * @id: Pointer to ID. |
| 642 | * @max: Maximum ID to allocate (inclusive). | ||
| 643 | * @entry: New entry. | 803 | * @entry: New entry. |
| 804 | * @limit: Range of ID to allocate. | ||
| 644 | * @gfp: Memory allocation flags. | 805 | * @gfp: Memory allocation flags. |
| 645 | * | 806 | * |
| 646 | * Allocates an unused ID in the range specified by @id and @max. | 807 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
| 647 | * Updates the @id pointer with the index, then stores the entry at that | 808 | * stores the index into the @id pointer, then stores the entry at |
| 648 | * index. A concurrent lookup will not see an uninitialised @id. | 809 | * that index. A concurrent lookup will not see an uninitialised @id. |
| 649 | * | 810 | * |
| 650 | * Context: Process context. Takes and releases the xa_lock. May sleep if | 811 | * Context: Any context. Takes and releases the xa_lock. May sleep if |
| 651 | * the @gfp flags permit. | 812 | * the @gfp flags permit. |
| 652 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 813 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
| 653 | * there is no more space in the XArray. | 814 | * -EBUSY if there are no free entries in @limit. |
| 654 | */ | 815 | */ |
| 655 | static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, | 816 | static inline __must_check int xa_alloc(struct xarray *xa, u32 *id, |
| 656 | gfp_t gfp) | 817 | void *entry, struct xa_limit limit, gfp_t gfp) |
| 657 | { | 818 | { |
| 658 | int err; | 819 | int err; |
| 659 | 820 | ||
| 660 | xa_lock(xa); | 821 | xa_lock(xa); |
| 661 | err = __xa_alloc(xa, id, max, entry, gfp); | 822 | err = __xa_alloc(xa, id, entry, limit, gfp); |
| 662 | xa_unlock(xa); | 823 | xa_unlock(xa); |
| 663 | 824 | ||
| 664 | return err; | 825 | return err; |
| @@ -668,26 +829,26 @@ static inline int xa_alloc(struct xarray *xa, u32 *id, u32 max, void *entry, | |||
| 668 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. | 829 | * xa_alloc_bh() - Find somewhere to store this entry in the XArray. |
| 669 | * @xa: XArray. | 830 | * @xa: XArray. |
| 670 | * @id: Pointer to ID. | 831 | * @id: Pointer to ID. |
| 671 | * @max: Maximum ID to allocate (inclusive). | ||
| 672 | * @entry: New entry. | 832 | * @entry: New entry. |
| 833 | * @limit: Range of ID to allocate. | ||
| 673 | * @gfp: Memory allocation flags. | 834 | * @gfp: Memory allocation flags. |
| 674 | * | 835 | * |
| 675 | * Allocates an unused ID in the range specified by @id and @max. | 836 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
| 676 | * Updates the @id pointer with the index, then stores the entry at that | 837 | * stores the index into the @id pointer, then stores the entry at |
| 677 | * index. A concurrent lookup will not see an uninitialised @id. | 838 | * that index. A concurrent lookup will not see an uninitialised @id. |
| 678 | * | 839 | * |
| 679 | * Context: Any context. Takes and releases the xa_lock while | 840 | * Context: Any context. Takes and releases the xa_lock while |
| 680 | * disabling softirqs. May sleep if the @gfp flags permit. | 841 | * disabling softirqs. May sleep if the @gfp flags permit. |
| 681 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 842 | * Return: 0 on success, -ENOMEM if memory could not be allocated or |
| 682 | * there is no more space in the XArray. | 843 | * -EBUSY if there are no free entries in @limit. |
| 683 | */ | 844 | */ |
| 684 | static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, | 845 | static inline int __must_check xa_alloc_bh(struct xarray *xa, u32 *id, |
| 685 | gfp_t gfp) | 846 | void *entry, struct xa_limit limit, gfp_t gfp) |
| 686 | { | 847 | { |
| 687 | int err; | 848 | int err; |
| 688 | 849 | ||
| 689 | xa_lock_bh(xa); | 850 | xa_lock_bh(xa); |
| 690 | err = __xa_alloc(xa, id, max, entry, gfp); | 851 | err = __xa_alloc(xa, id, entry, limit, gfp); |
| 691 | xa_unlock_bh(xa); | 852 | xa_unlock_bh(xa); |
| 692 | 853 | ||
| 693 | return err; | 854 | return err; |
| @@ -697,26 +858,125 @@ static inline int xa_alloc_bh(struct xarray *xa, u32 *id, u32 max, void *entry, | |||
| 697 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. | 858 | * xa_alloc_irq() - Find somewhere to store this entry in the XArray. |
| 698 | * @xa: XArray. | 859 | * @xa: XArray. |
| 699 | * @id: Pointer to ID. | 860 | * @id: Pointer to ID. |
| 700 | * @max: Maximum ID to allocate (inclusive). | ||
| 701 | * @entry: New entry. | 861 | * @entry: New entry. |
| 862 | * @limit: Range of ID to allocate. | ||
| 863 | * @gfp: Memory allocation flags. | ||
| 864 | * | ||
| 865 | * Finds an empty entry in @xa between @limit.min and @limit.max, | ||
| 866 | * stores the index into the @id pointer, then stores the entry at | ||
| 867 | * that index. A concurrent lookup will not see an uninitialised @id. | ||
| 868 | * | ||
| 869 | * Context: Process context. Takes and releases the xa_lock while | ||
| 870 | * disabling interrupts. May sleep if the @gfp flags permit. | ||
| 871 | * Return: 0 on success, -ENOMEM if memory could not be allocated or | ||
| 872 | * -EBUSY if there are no free entries in @limit. | ||
| 873 | */ | ||
| 874 | static inline int __must_check xa_alloc_irq(struct xarray *xa, u32 *id, | ||
| 875 | void *entry, struct xa_limit limit, gfp_t gfp) | ||
| 876 | { | ||
| 877 | int err; | ||
| 878 | |||
| 879 | xa_lock_irq(xa); | ||
| 880 | err = __xa_alloc(xa, id, entry, limit, gfp); | ||
| 881 | xa_unlock_irq(xa); | ||
| 882 | |||
| 883 | return err; | ||
| 884 | } | ||
| 885 | |||
| 886 | /** | ||
| 887 | * xa_alloc_cyclic() - Find somewhere to store this entry in the XArray. | ||
| 888 | * @xa: XArray. | ||
| 889 | * @id: Pointer to ID. | ||
| 890 | * @entry: New entry. | ||
| 891 | * @limit: Range of allocated ID. | ||
| 892 | * @next: Pointer to next ID to allocate. | ||
| 893 | * @gfp: Memory allocation flags. | ||
| 894 | * | ||
| 895 | * Finds an empty entry in @xa between @limit.min and @limit.max, | ||
| 896 | * stores the index into the @id pointer, then stores the entry at | ||
| 897 | * that index. A concurrent lookup will not see an uninitialised @id. | ||
| 898 | * The search for an empty entry will start at @next and will wrap | ||
| 899 | * around if necessary. | ||
| 900 | * | ||
| 901 | * Context: Any context. Takes and releases the xa_lock. May sleep if | ||
| 902 | * the @gfp flags permit. | ||
| 903 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | ||
| 904 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | ||
| 905 | * allocated or -EBUSY if there are no free entries in @limit. | ||
| 906 | */ | ||
| 907 | static inline int xa_alloc_cyclic(struct xarray *xa, u32 *id, void *entry, | ||
| 908 | struct xa_limit limit, u32 *next, gfp_t gfp) | ||
| 909 | { | ||
| 910 | int err; | ||
| 911 | |||
| 912 | xa_lock(xa); | ||
| 913 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); | ||
| 914 | xa_unlock(xa); | ||
| 915 | |||
| 916 | return err; | ||
| 917 | } | ||
| 918 | |||
| 919 | /** | ||
| 920 | * xa_alloc_cyclic_bh() - Find somewhere to store this entry in the XArray. | ||
| 921 | * @xa: XArray. | ||
| 922 | * @id: Pointer to ID. | ||
| 923 | * @entry: New entry. | ||
| 924 | * @limit: Range of allocated ID. | ||
| 925 | * @next: Pointer to next ID to allocate. | ||
| 926 | * @gfp: Memory allocation flags. | ||
| 927 | * | ||
| 928 | * Finds an empty entry in @xa between @limit.min and @limit.max, | ||
| 929 | * stores the index into the @id pointer, then stores the entry at | ||
| 930 | * that index. A concurrent lookup will not see an uninitialised @id. | ||
| 931 | * The search for an empty entry will start at @next and will wrap | ||
| 932 | * around if necessary. | ||
| 933 | * | ||
| 934 | * Context: Any context. Takes and releases the xa_lock while | ||
| 935 | * disabling softirqs. May sleep if the @gfp flags permit. | ||
| 936 | * Return: 0 if the allocation succeeded without wrapping. 1 if the | ||
| 937 | * allocation succeeded after wrapping, -ENOMEM if memory could not be | ||
| 938 | * allocated or -EBUSY if there are no free entries in @limit. | ||
| 939 | */ | ||
| 940 | static inline int xa_alloc_cyclic_bh(struct xarray *xa, u32 *id, void *entry, | ||
| 941 | struct xa_limit limit, u32 *next, gfp_t gfp) | ||
| 942 | { | ||
| 943 | int err; | ||
| 944 | |||
| 945 | xa_lock_bh(xa); | ||
| 946 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); | ||
| 947 | xa_unlock_bh(xa); | ||
| 948 | |||
| 949 | return err; | ||
| 950 | } | ||
| 951 | |||
| 952 | /** | ||
| 953 | * xa_alloc_cyclic_irq() - Find somewhere to store this entry in the XArray. | ||
| 954 | * @xa: XArray. | ||
| 955 | * @id: Pointer to ID. | ||
| 956 | * @entry: New entry. | ||
| 957 | * @limit: Range of allocated ID. | ||
| 958 | * @next: Pointer to next ID to allocate. | ||
| 702 | * @gfp: Memory allocation flags. | 959 | * @gfp: Memory allocation flags. |
| 703 | * | 960 | * |
| 704 | * Allocates an unused ID in the range specified by @id and @max. | 961 | * Finds an empty entry in @xa between @limit.min and @limit.max, |
| 705 | * Updates the @id pointer with the index, then stores the entry at that | 962 | * stores the index into the @id pointer, then stores the entry at |
| 706 | * index. A concurrent lookup will not see an uninitialised @id. | 963 | * that index. A concurrent lookup will not see an uninitialised @id. |
| 964 | * The search for an empty entry will start at @next and will wrap | ||
| 965 | * around if necessary. | ||
| 707 | * | 966 | * |
| 708 | * Context: Process context. Takes and releases the xa_lock while | 967 | * Context: Process context. Takes and releases the xa_lock while |
| 709 | * disabling interrupts. May sleep if the @gfp flags permit. | 968 | * disabling interrupts. May sleep if the @gfp flags permit. |
| 710 | * Return: 0 on success, -ENOMEM if memory allocation fails or -ENOSPC if | 969 | * Return: 0 if the allocation succeeded without wrapping. 1 if the |
| 711 | * there is no more space in the XArray. | 970 | * allocation succeeded after wrapping, -ENOMEM if memory could not be |
| 971 | * allocated or -EBUSY if there are no free entries in @limit. | ||
| 712 | */ | 972 | */ |
| 713 | static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, | 973 | static inline int xa_alloc_cyclic_irq(struct xarray *xa, u32 *id, void *entry, |
| 714 | gfp_t gfp) | 974 | struct xa_limit limit, u32 *next, gfp_t gfp) |
| 715 | { | 975 | { |
| 716 | int err; | 976 | int err; |
| 717 | 977 | ||
| 718 | xa_lock_irq(xa); | 978 | xa_lock_irq(xa); |
| 719 | err = __xa_alloc(xa, id, max, entry, gfp); | 979 | err = __xa_alloc_cyclic(xa, id, entry, limit, next, gfp); |
| 720 | xa_unlock_irq(xa); | 980 | xa_unlock_irq(xa); |
| 721 | 981 | ||
| 722 | return err; | 982 | return err; |
| @@ -740,16 +1000,10 @@ static inline int xa_alloc_irq(struct xarray *xa, u32 *id, u32 max, void *entry, | |||
| 740 | * May sleep if the @gfp flags permit. | 1000 | * May sleep if the @gfp flags permit. |
| 741 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | 1001 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
| 742 | */ | 1002 | */ |
| 743 | static inline | 1003 | static inline __must_check |
| 744 | int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) | 1004 | int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 745 | { | 1005 | { |
| 746 | int ret; | 1006 | return xa_err(xa_cmpxchg(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
| 747 | |||
| 748 | xa_lock(xa); | ||
| 749 | ret = __xa_reserve(xa, index, gfp); | ||
| 750 | xa_unlock(xa); | ||
| 751 | |||
| 752 | return ret; | ||
| 753 | } | 1007 | } |
| 754 | 1008 | ||
| 755 | /** | 1009 | /** |
| @@ -764,16 +1018,10 @@ int xa_reserve(struct xarray *xa, unsigned long index, gfp_t gfp) | |||
| 764 | * disabling softirqs. | 1018 | * disabling softirqs. |
| 765 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | 1019 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
| 766 | */ | 1020 | */ |
| 767 | static inline | 1021 | static inline __must_check |
| 768 | int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) | 1022 | int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 769 | { | 1023 | { |
| 770 | int ret; | 1024 | return xa_err(xa_cmpxchg_bh(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
| 771 | |||
| 772 | xa_lock_bh(xa); | ||
| 773 | ret = __xa_reserve(xa, index, gfp); | ||
| 774 | xa_unlock_bh(xa); | ||
| 775 | |||
| 776 | return ret; | ||
| 777 | } | 1025 | } |
| 778 | 1026 | ||
| 779 | /** | 1027 | /** |
| @@ -788,16 +1036,10 @@ int xa_reserve_bh(struct xarray *xa, unsigned long index, gfp_t gfp) | |||
| 788 | * disabling interrupts. | 1036 | * disabling interrupts. |
| 789 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. | 1037 | * Return: 0 if the reservation succeeded or -ENOMEM if it failed. |
| 790 | */ | 1038 | */ |
| 791 | static inline | 1039 | static inline __must_check |
| 792 | int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) | 1040 | int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) |
| 793 | { | 1041 | { |
| 794 | int ret; | 1042 | return xa_err(xa_cmpxchg_irq(xa, index, NULL, XA_ZERO_ENTRY, gfp)); |
| 795 | |||
| 796 | xa_lock_irq(xa); | ||
| 797 | ret = __xa_reserve(xa, index, gfp); | ||
| 798 | xa_unlock_irq(xa); | ||
| 799 | |||
| 800 | return ret; | ||
| 801 | } | 1043 | } |
| 802 | 1044 | ||
| 803 | /** | 1045 | /** |
| @@ -811,7 +1053,7 @@ int xa_reserve_irq(struct xarray *xa, unsigned long index, gfp_t gfp) | |||
| 811 | */ | 1053 | */ |
| 812 | static inline void xa_release(struct xarray *xa, unsigned long index) | 1054 | static inline void xa_release(struct xarray *xa, unsigned long index) |
| 813 | { | 1055 | { |
| 814 | xa_cmpxchg(xa, index, NULL, NULL, 0); | 1056 | xa_cmpxchg(xa, index, XA_ZERO_ENTRY, NULL, 0); |
| 815 | } | 1057 | } |
| 816 | 1058 | ||
| 817 | /* Everything below here is the Advanced API. Proceed with caution. */ | 1059 | /* Everything below here is the Advanced API. Proceed with caution. */ |
| @@ -970,29 +1212,28 @@ static inline bool xa_is_sibling(const void *entry) | |||
| 970 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); | 1212 | (entry < xa_mk_sibling(XA_CHUNK_SIZE - 1)); |
| 971 | } | 1213 | } |
| 972 | 1214 | ||
| 973 | #define XA_ZERO_ENTRY xa_mk_internal(256) | 1215 | #define XA_RETRY_ENTRY xa_mk_internal(256) |
| 974 | #define XA_RETRY_ENTRY xa_mk_internal(257) | ||
| 975 | 1216 | ||
| 976 | /** | 1217 | /** |
| 977 | * xa_is_zero() - Is the entry a zero entry? | 1218 | * xa_is_retry() - Is the entry a retry entry? |
| 978 | * @entry: Entry retrieved from the XArray | 1219 | * @entry: Entry retrieved from the XArray |
| 979 | * | 1220 | * |
| 980 | * Return: %true if the entry is a zero entry. | 1221 | * Return: %true if the entry is a retry entry. |
| 981 | */ | 1222 | */ |
| 982 | static inline bool xa_is_zero(const void *entry) | 1223 | static inline bool xa_is_retry(const void *entry) |
| 983 | { | 1224 | { |
| 984 | return unlikely(entry == XA_ZERO_ENTRY); | 1225 | return unlikely(entry == XA_RETRY_ENTRY); |
| 985 | } | 1226 | } |
| 986 | 1227 | ||
| 987 | /** | 1228 | /** |
| 988 | * xa_is_retry() - Is the entry a retry entry? | 1229 | * xa_is_advanced() - Is the entry only permitted for the advanced API? |
| 989 | * @entry: Entry retrieved from the XArray | 1230 | * @entry: Entry to be stored in the XArray. |
| 990 | * | 1231 | * |
| 991 | * Return: %true if the entry is a retry entry. | 1232 | * Return: %true if the entry cannot be stored by the normal API. |
| 992 | */ | 1233 | */ |
| 993 | static inline bool xa_is_retry(const void *entry) | 1234 | static inline bool xa_is_advanced(const void *entry) |
| 994 | { | 1235 | { |
| 995 | return unlikely(entry == XA_RETRY_ENTRY); | 1236 | return xa_is_internal(entry) && (entry <= XA_RETRY_ENTRY); |
| 996 | } | 1237 | } |
| 997 | 1238 | ||
| 998 | /** | 1239 | /** |
