aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h25
-rw-r--r--include/linux/bitmap.h18
-rw-r--r--include/linux/can/dev.h3
-rw-r--r--include/linux/cpu.h6
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/devfreq-event.h5
-rw-r--r--include/linux/dma-mapping.h2
-rw-r--r--include/linux/efi.h47
-rw-r--r--include/linux/hypervisor.h17
-rw-r--r--include/linux/jump_label.h18
-rw-r--r--include/linux/kernel.h9
-rw-r--r--include/linux/lglock.h81
-rw-r--r--include/linux/list.h7
-rw-r--r--include/linux/mroute.h2
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/pci.h4
-rw-r--r--include/linux/percpu-rwsem.h108
-rw-r--r--include/linux/perf/arm_pmu.h13
-rw-r--r--include/linux/perf_event.h24
-rw-r--r--include/linux/pm_domain.h74
-rw-r--r--include/linux/property.h2
-rw-r--r--include/linux/rcu_sync.h1
-rw-r--r--include/linux/rcupdate.h1
-rw-r--r--include/linux/sched.h43
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/torture.h2
-rw-r--r--include/linux/u64_stats_sync.h45
-rw-r--r--include/linux/wait.h17
30 files changed, 380 insertions, 205 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index c5eaf2f80a4c..19e650c940b6 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -85,6 +85,8 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
85 return dev_name(&adev->dev); 85 return dev_name(&adev->dev);
86} 86}
87 87
88struct device *acpi_get_first_physical_node(struct acpi_device *adev);
89
88enum acpi_irq_model_id { 90enum acpi_irq_model_id {
89 ACPI_IRQ_MODEL_PIC = 0, 91 ACPI_IRQ_MODEL_PIC = 0,
90 ACPI_IRQ_MODEL_IOAPIC, 92 ACPI_IRQ_MODEL_IOAPIC,
@@ -267,12 +269,18 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
267 return phys_id == PHYS_CPUID_INVALID; 269 return phys_id == PHYS_CPUID_INVALID;
268} 270}
269 271
272/* Validate the processor object's proc_id */
273bool acpi_processor_validate_proc_id(int proc_id);
274
270#ifdef CONFIG_ACPI_HOTPLUG_CPU 275#ifdef CONFIG_ACPI_HOTPLUG_CPU
271/* Arch dependent functions for cpu hotplug support */ 276/* Arch dependent functions for cpu hotplug support */
272int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu); 277int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, int *pcpu);
273int acpi_unmap_cpu(int cpu); 278int acpi_unmap_cpu(int cpu);
279int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid);
274#endif /* CONFIG_ACPI_HOTPLUG_CPU */ 280#endif /* CONFIG_ACPI_HOTPLUG_CPU */
275 281
282void acpi_set_processor_mapping(void);
283
276#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC 284#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
277int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr); 285int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr);
278#endif 286#endif
@@ -634,6 +642,11 @@ static inline const char *acpi_dev_name(struct acpi_device *adev)
634 return NULL; 642 return NULL;
635} 643}
636 644
645static inline struct device *acpi_get_first_physical_node(struct acpi_device *adev)
646{
647 return NULL;
648}
649
637static inline void acpi_early_init(void) { } 650static inline void acpi_early_init(void) { }
638static inline void acpi_subsystem_init(void) { } 651static inline void acpi_subsystem_init(void) { }
639 652
@@ -751,6 +764,12 @@ static inline int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
751 764
752#endif /* !CONFIG_ACPI */ 765#endif /* !CONFIG_ACPI */
753 766
767#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC
768int acpi_ioapic_add(acpi_handle root);
769#else
770static inline int acpi_ioapic_add(acpi_handle root) { return 0; }
771#endif
772
754#ifdef CONFIG_ACPI 773#ifdef CONFIG_ACPI
755void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state, 774void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
756 u32 pm1a_ctrl, u32 pm1b_ctrl)); 775 u32 pm1a_ctrl, u32 pm1b_ctrl));
@@ -1074,4 +1093,10 @@ void acpi_table_upgrade(void);
1074static inline void acpi_table_upgrade(void) { } 1093static inline void acpi_table_upgrade(void) { }
1075#endif 1094#endif
1076 1095
1096#if defined(CONFIG_ACPI) && defined(CONFIG_ACPI_WATCHDOG)
1097extern bool acpi_has_watchdog(void);
1098#else
1099static inline bool acpi_has_watchdog(void) { return false; }
1100#endif
1101
1077#endif /*_LINUX_ACPI_H*/ 1102#endif /*_LINUX_ACPI_H*/
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 598bc999f4c2..3b77588a9360 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -339,6 +339,24 @@ static inline int bitmap_parse(const char *buf, unsigned int buflen,
339 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits); 339 return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
340} 340}
341 341
342/*
343 * bitmap_from_u64 - Check and swap words within u64.
344 * @mask: source bitmap
345 * @dst: destination bitmap
346 *
347 * In 32-bit Big Endian kernel, when using (u32 *)(&val)[*]
348 * to read u64 mask, we will get the wrong word.
349 * That is "(u32 *)(&val)[0]" gets the upper 32 bits,
350 * but we expect the lower 32-bits of u64.
351 */
352static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
353{
354 dst[0] = mask & ULONG_MAX;
355
356 if (sizeof(mask) > sizeof(unsigned long))
357 dst[1] = mask >> 32;
358}
359
342#endif /* __ASSEMBLY__ */ 360#endif /* __ASSEMBLY__ */
343 361
344#endif /* __LINUX_BITMAP_H */ 362#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5261751f6bd4..5f5270941ba0 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -32,6 +32,7 @@ enum can_mode {
32 * CAN common private data 32 * CAN common private data
33 */ 33 */
34struct can_priv { 34struct can_priv {
35 struct net_device *dev;
35 struct can_device_stats can_stats; 36 struct can_device_stats can_stats;
36 37
37 struct can_bittiming bittiming, data_bittiming; 38 struct can_bittiming bittiming, data_bittiming;
@@ -47,7 +48,7 @@ struct can_priv {
47 u32 ctrlmode_static; /* static enabled options for driver/hardware */ 48 u32 ctrlmode_static; /* static enabled options for driver/hardware */
48 49
49 int restart_ms; 50 int restart_ms;
50 struct timer_list restart_timer; 51 struct delayed_work restart_work;
51 52
52 int (*do_set_bittiming)(struct net_device *dev); 53 int (*do_set_bittiming)(struct net_device *dev);
53 int (*do_set_data_bittiming)(struct net_device *dev); 54 int (*do_set_data_bittiming)(struct net_device *dev);
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 797d9c8e9a1b..ad4f1f33a74e 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -228,7 +228,11 @@ static inline void cpu_hotplug_done(void) {}
228#endif /* CONFIG_HOTPLUG_CPU */ 228#endif /* CONFIG_HOTPLUG_CPU */
229 229
230#ifdef CONFIG_PM_SLEEP_SMP 230#ifdef CONFIG_PM_SLEEP_SMP
231extern int disable_nonboot_cpus(void); 231extern int freeze_secondary_cpus(int primary);
232static inline int disable_nonboot_cpus(void)
233{
234 return freeze_secondary_cpus(0);
235}
232extern void enable_nonboot_cpus(void); 236extern void enable_nonboot_cpus(void);
233#else /* !CONFIG_PM_SLEEP_SMP */ 237#else /* !CONFIG_PM_SLEEP_SMP */
234static inline int disable_nonboot_cpus(void) { return 0; } 238static inline int disable_nonboot_cpus(void) { return 0; }
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 34bd80512a0c..eb445a4e2a83 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -47,6 +47,8 @@ enum cpuhp_state {
47 CPUHP_AP_PERF_METAG_STARTING, 47 CPUHP_AP_PERF_METAG_STARTING,
48 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING, 48 CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
49 CPUHP_AP_ARM_VFP_STARTING, 49 CPUHP_AP_ARM_VFP_STARTING,
50 CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
51 CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
50 CPUHP_AP_PERF_ARM_STARTING, 52 CPUHP_AP_PERF_ARM_STARTING,
51 CPUHP_AP_ARM_L2X0_STARTING, 53 CPUHP_AP_ARM_L2X0_STARTING,
52 CPUHP_AP_ARM_ARCH_TIMER_STARTING, 54 CPUHP_AP_ARM_ARCH_TIMER_STARTING,
diff --git a/include/linux/devfreq-event.h b/include/linux/devfreq-event.h
index 0a83a1e648b0..4db00b02ca3f 100644
--- a/include/linux/devfreq-event.h
+++ b/include/linux/devfreq-event.h
@@ -148,11 +148,6 @@ static inline int devfreq_event_reset_event(struct devfreq_event_dev *edev)
148 return -EINVAL; 148 return -EINVAL;
149} 149}
150 150
151static inline void *devfreq_event_get_drvdata(struct devfreq_event_dev *edev)
152{
153 return ERR_PTR(-EINVAL);
154}
155
156static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle( 151static inline struct devfreq_event_dev *devfreq_event_get_edev_by_phandle(
157 struct device *dev, int index) 152 struct device *dev, int index)
158{ 153{
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 66533e18276c..dc69df04abc1 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -718,7 +718,7 @@ static inline int dma_mmap_wc(struct device *dev,
718#define dma_mmap_writecombine dma_mmap_wc 718#define dma_mmap_writecombine dma_mmap_wc
719#endif 719#endif
720 720
721#ifdef CONFIG_NEED_DMA_MAP_STATE 721#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME 722#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME 723#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) 724#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 0148a3046b48..2d089487d2da 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -20,6 +20,7 @@
20#include <linux/ioport.h> 20#include <linux/ioport.h>
21#include <linux/pfn.h> 21#include <linux/pfn.h>
22#include <linux/pstore.h> 22#include <linux/pstore.h>
23#include <linux/range.h>
23#include <linux/reboot.h> 24#include <linux/reboot.h>
24#include <linux/uuid.h> 25#include <linux/uuid.h>
25#include <linux/screen_info.h> 26#include <linux/screen_info.h>
@@ -37,6 +38,7 @@
37#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1))) 38#define EFI_WRITE_PROTECTED ( 8 | (1UL << (BITS_PER_LONG-1)))
38#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1))) 39#define EFI_OUT_OF_RESOURCES ( 9 | (1UL << (BITS_PER_LONG-1)))
39#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1))) 40#define EFI_NOT_FOUND (14 | (1UL << (BITS_PER_LONG-1)))
41#define EFI_ABORTED (21 | (1UL << (BITS_PER_LONG-1)))
40#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1))) 42#define EFI_SECURITY_VIOLATION (26 | (1UL << (BITS_PER_LONG-1)))
41 43
42typedef unsigned long efi_status_t; 44typedef unsigned long efi_status_t;
@@ -678,6 +680,18 @@ typedef struct {
678 unsigned long tables; 680 unsigned long tables;
679} efi_system_table_t; 681} efi_system_table_t;
680 682
683/*
684 * Architecture independent structure for describing a memory map for the
685 * benefit of efi_memmap_init_early(), saving us the need to pass four
686 * parameters.
687 */
688struct efi_memory_map_data {
689 phys_addr_t phys_map;
690 unsigned long size;
691 unsigned long desc_version;
692 unsigned long desc_size;
693};
694
681struct efi_memory_map { 695struct efi_memory_map {
682 phys_addr_t phys_map; 696 phys_addr_t phys_map;
683 void *map; 697 void *map;
@@ -685,6 +699,12 @@ struct efi_memory_map {
685 int nr_map; 699 int nr_map;
686 unsigned long desc_version; 700 unsigned long desc_version;
687 unsigned long desc_size; 701 unsigned long desc_size;
702 bool late;
703};
704
705struct efi_mem_range {
706 struct range range;
707 u64 attribute;
688}; 708};
689 709
690struct efi_fdt_params { 710struct efi_fdt_params {
@@ -909,6 +929,16 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
909} 929}
910#endif 930#endif
911extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr); 931extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
932
933extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
934extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
935extern void __init efi_memmap_unmap(void);
936extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
937extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
938 struct range *range);
939extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
940 void *buf, struct efi_mem_range *mem);
941
912extern int efi_config_init(efi_config_table_type_t *arch_tables); 942extern int efi_config_init(efi_config_table_type_t *arch_tables);
913#ifdef CONFIG_EFI_ESRT 943#ifdef CONFIG_EFI_ESRT
914extern void __init efi_esrt_init(void); 944extern void __init efi_esrt_init(void);
@@ -924,6 +954,7 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
924extern int __init efi_uart_console_only (void); 954extern int __init efi_uart_console_only (void);
925extern u64 efi_mem_desc_end(efi_memory_desc_t *md); 955extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
926extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md); 956extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
957extern void efi_mem_reserve(phys_addr_t addr, u64 size);
927extern void efi_initialize_iomem_resources(struct resource *code_resource, 958extern void efi_initialize_iomem_resources(struct resource *code_resource,
928 struct resource *data_resource, struct resource *bss_resource); 959 struct resource *data_resource, struct resource *bss_resource);
929extern void efi_reserve_boot_services(void); 960extern void efi_reserve_boot_services(void);
@@ -1136,12 +1167,6 @@ struct efivar_operations {
1136}; 1167};
1137 1168
1138struct efivars { 1169struct efivars {
1139 /*
1140 * ->lock protects two things:
1141 * 1) efivarfs_list and efivars_sysfs_list
1142 * 2) ->ops calls
1143 */
1144 spinlock_t lock;
1145 struct kset *kset; 1170 struct kset *kset;
1146 struct kobject *kobject; 1171 struct kobject *kobject;
1147 const struct efivar_operations *ops; 1172 const struct efivar_operations *ops;
@@ -1282,8 +1307,8 @@ struct kobject *efivars_kobject(void);
1282int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *), 1307int efivar_init(int (*func)(efi_char16_t *, efi_guid_t, unsigned long, void *),
1283 void *data, bool duplicates, struct list_head *head); 1308 void *data, bool duplicates, struct list_head *head);
1284 1309
1285void efivar_entry_add(struct efivar_entry *entry, struct list_head *head); 1310int efivar_entry_add(struct efivar_entry *entry, struct list_head *head);
1286void efivar_entry_remove(struct efivar_entry *entry); 1311int efivar_entry_remove(struct efivar_entry *entry);
1287 1312
1288int __efivar_entry_delete(struct efivar_entry *entry); 1313int __efivar_entry_delete(struct efivar_entry *entry);
1289int efivar_entry_delete(struct efivar_entry *entry); 1314int efivar_entry_delete(struct efivar_entry *entry);
@@ -1300,7 +1325,7 @@ int efivar_entry_set_get_size(struct efivar_entry *entry, u32 attributes,
1300int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes, 1325int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
1301 bool block, unsigned long size, void *data); 1326 bool block, unsigned long size, void *data);
1302 1327
1303void efivar_entry_iter_begin(void); 1328int efivar_entry_iter_begin(void);
1304void efivar_entry_iter_end(void); 1329void efivar_entry_iter_end(void);
1305 1330
1306int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *), 1331int __efivar_entry_iter(int (*func)(struct efivar_entry *, void *),
@@ -1336,7 +1361,6 @@ extern int efi_capsule_update(efi_capsule_header_t *capsule,
1336 1361
1337#ifdef CONFIG_EFI_RUNTIME_MAP 1362#ifdef CONFIG_EFI_RUNTIME_MAP
1338int efi_runtime_map_init(struct kobject *); 1363int efi_runtime_map_init(struct kobject *);
1339void efi_runtime_map_setup(void *, int, u32);
1340int efi_get_runtime_map_size(void); 1364int efi_get_runtime_map_size(void);
1341int efi_get_runtime_map_desc_size(void); 1365int efi_get_runtime_map_desc_size(void);
1342int efi_runtime_map_copy(void *buf, size_t bufsz); 1366int efi_runtime_map_copy(void *buf, size_t bufsz);
@@ -1346,9 +1370,6 @@ static inline int efi_runtime_map_init(struct kobject *kobj)
1346 return 0; 1370 return 0;
1347} 1371}
1348 1372
1349static inline void
1350efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {}
1351
1352static inline int efi_get_runtime_map_size(void) 1373static inline int efi_get_runtime_map_size(void)
1353{ 1374{
1354 return 0; 1375 return 0;
diff --git a/include/linux/hypervisor.h b/include/linux/hypervisor.h
new file mode 100644
index 000000000000..3fa5ef2b3759
--- /dev/null
+++ b/include/linux/hypervisor.h
@@ -0,0 +1,17 @@
1#ifndef __LINUX_HYPEVISOR_H
2#define __LINUX_HYPEVISOR_H
3
4/*
5 * Generic Hypervisor support
6 * Juergen Gross <jgross@suse.com>
7 */
8
9#ifdef CONFIG_HYPERVISOR_GUEST
10#include <asm/hypervisor.h>
11#else
12static inline void hypervisor_pin_vcpu(int cpu)
13{
14}
15#endif
16
17#endif /* __LINUX_HYPEVISOR_H */
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index 661af564fae8..a0547c571800 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -21,6 +21,8 @@
21 * 21 *
22 * DEFINE_STATIC_KEY_TRUE(key); 22 * DEFINE_STATIC_KEY_TRUE(key);
23 * DEFINE_STATIC_KEY_FALSE(key); 23 * DEFINE_STATIC_KEY_FALSE(key);
24 * DEFINE_STATIC_KEY_ARRAY_TRUE(keys, count);
25 * DEFINE_STATIC_KEY_ARRAY_FALSE(keys, count);
24 * static_branch_likely() 26 * static_branch_likely()
25 * static_branch_unlikely() 27 * static_branch_unlikely()
26 * 28 *
@@ -267,9 +269,25 @@ struct static_key_false {
267#define DEFINE_STATIC_KEY_TRUE(name) \ 269#define DEFINE_STATIC_KEY_TRUE(name) \
268 struct static_key_true name = STATIC_KEY_TRUE_INIT 270 struct static_key_true name = STATIC_KEY_TRUE_INIT
269 271
272#define DECLARE_STATIC_KEY_TRUE(name) \
273 extern struct static_key_true name
274
270#define DEFINE_STATIC_KEY_FALSE(name) \ 275#define DEFINE_STATIC_KEY_FALSE(name) \
271 struct static_key_false name = STATIC_KEY_FALSE_INIT 276 struct static_key_false name = STATIC_KEY_FALSE_INIT
272 277
278#define DECLARE_STATIC_KEY_FALSE(name) \
279 extern struct static_key_false name
280
281#define DEFINE_STATIC_KEY_ARRAY_TRUE(name, count) \
282 struct static_key_true name[count] = { \
283 [0 ... (count) - 1] = STATIC_KEY_TRUE_INIT, \
284 }
285
286#define DEFINE_STATIC_KEY_ARRAY_FALSE(name, count) \
287 struct static_key_false name[count] = { \
288 [0 ... (count) - 1] = STATIC_KEY_FALSE_INIT, \
289 }
290
273extern bool ____wrong_branch_error(void); 291extern bool ____wrong_branch_error(void);
274 292
275#define static_key_enabled(x) \ 293#define static_key_enabled(x) \
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d96a6118d26a..74fd6f05bc5b 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -259,17 +259,14 @@ static inline void might_fault(void) { }
259extern struct atomic_notifier_head panic_notifier_list; 259extern struct atomic_notifier_head panic_notifier_list;
260extern long (*panic_blink)(int state); 260extern long (*panic_blink)(int state);
261__printf(1, 2) 261__printf(1, 2)
262void panic(const char *fmt, ...) 262void panic(const char *fmt, ...) __noreturn __cold;
263 __noreturn __cold;
264void nmi_panic(struct pt_regs *regs, const char *msg); 263void nmi_panic(struct pt_regs *regs, const char *msg);
265extern void oops_enter(void); 264extern void oops_enter(void);
266extern void oops_exit(void); 265extern void oops_exit(void);
267void print_oops_end_marker(void); 266void print_oops_end_marker(void);
268extern int oops_may_print(void); 267extern int oops_may_print(void);
269void do_exit(long error_code) 268void do_exit(long error_code) __noreturn;
270 __noreturn; 269void complete_and_exit(struct completion *, long) __noreturn;
271void complete_and_exit(struct completion *, long)
272 __noreturn;
273 270
274/* Internal, do not use. */ 271/* Internal, do not use. */
275int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res); 272int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
deleted file mode 100644
index c92ebd100d9b..000000000000
--- a/include/linux/lglock.h
+++ /dev/null
@@ -1,81 +0,0 @@
1/*
2 * Specialised local-global spinlock. Can only be declared as global variables
3 * to avoid overhead and keep things simple (and we don't want to start using
4 * these inside dynamically allocated structures).
5 *
6 * "local/global locks" (lglocks) can be used to:
7 *
8 * - Provide fast exclusive access to per-CPU data, with exclusive access to
9 * another CPU's data allowed but possibly subject to contention, and to
10 * provide very slow exclusive access to all per-CPU data.
11 * - Or to provide very fast and scalable read serialisation, and to provide
12 * very slow exclusive serialisation of data (not necessarily per-CPU data).
13 *
14 * Brlocks are also implemented as a short-hand notation for the latter use
15 * case.
16 *
17 * Copyright 2009, 2010, Nick Piggin, Novell Inc.
18 */
19#ifndef __LINUX_LGLOCK_H
20#define __LINUX_LGLOCK_H
21
22#include <linux/spinlock.h>
23#include <linux/lockdep.h>
24#include <linux/percpu.h>
25#include <linux/cpu.h>
26#include <linux/notifier.h>
27
28#ifdef CONFIG_SMP
29
30#ifdef CONFIG_DEBUG_LOCK_ALLOC
31#define LOCKDEP_INIT_MAP lockdep_init_map
32#else
33#define LOCKDEP_INIT_MAP(a, b, c, d)
34#endif
35
36struct lglock {
37 arch_spinlock_t __percpu *lock;
38#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lock_class_key lock_key;
40 struct lockdep_map lock_dep_map;
41#endif
42};
43
44#define DEFINE_LGLOCK(name) \
45 static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
46 = __ARCH_SPIN_LOCK_UNLOCKED; \
47 struct lglock name = { .lock = &name ## _lock }
48
49#define DEFINE_STATIC_LGLOCK(name) \
50 static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
51 = __ARCH_SPIN_LOCK_UNLOCKED; \
52 static struct lglock name = { .lock = &name ## _lock }
53
54void lg_lock_init(struct lglock *lg, char *name);
55
56void lg_local_lock(struct lglock *lg);
57void lg_local_unlock(struct lglock *lg);
58void lg_local_lock_cpu(struct lglock *lg, int cpu);
59void lg_local_unlock_cpu(struct lglock *lg, int cpu);
60
61void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
62void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
63
64void lg_global_lock(struct lglock *lg);
65void lg_global_unlock(struct lglock *lg);
66
67#else
68/* When !CONFIG_SMP, map lglock to spinlock */
69#define lglock spinlock
70#define DEFINE_LGLOCK(name) DEFINE_SPINLOCK(name)
71#define DEFINE_STATIC_LGLOCK(name) static DEFINE_SPINLOCK(name)
72#define lg_lock_init(lg, name) spin_lock_init(lg)
73#define lg_local_lock spin_lock
74#define lg_local_unlock spin_unlock
75#define lg_local_lock_cpu(lg, cpu) spin_lock(lg)
76#define lg_local_unlock_cpu(lg, cpu) spin_unlock(lg)
77#define lg_global_lock spin_lock
78#define lg_global_unlock spin_unlock
79#endif
80
81#endif
diff --git a/include/linux/list.h b/include/linux/list.h
index 5183138aa932..5809e9a2de5b 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -381,8 +381,11 @@ static inline void list_splice_tail_init(struct list_head *list,
381 * 381 *
382 * Note that if the list is empty, it returns NULL. 382 * Note that if the list is empty, it returns NULL.
383 */ 383 */
384#define list_first_entry_or_null(ptr, type, member) \ 384#define list_first_entry_or_null(ptr, type, member) ({ \
385 (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) 385 struct list_head *head__ = (ptr); \
386 struct list_head *pos__ = READ_ONCE(head__->next); \
387 pos__ != head__ ? list_entry(pos__, type, member) : NULL; \
388})
386 389
387/** 390/**
388 * list_next_entry - get the next element in list 391 * list_next_entry - get the next element in list
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index d351fd3e1049..e5fb81376e92 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -120,5 +120,5 @@ struct mfc_cache {
120struct rtmsg; 120struct rtmsg;
121int ipmr_get_route(struct net *net, struct sk_buff *skb, 121int ipmr_get_route(struct net *net, struct sk_buff *skb,
122 __be32 saddr, __be32 daddr, 122 __be32 saddr, __be32 daddr,
123 struct rtmsg *rtm, int nowait); 123 struct rtmsg *rtm, int nowait, u32 portid);
124#endif 124#endif
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 3987b64040c5..19a1c0c2993b 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
116 116
117struct rtmsg; 117struct rtmsg;
118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb, 118extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
119 struct rtmsg *rtm, int nowait); 119 struct rtmsg *rtm, int nowait, u32 portid);
120 120
121#ifdef CONFIG_IPV6_MROUTE 121#ifdef CONFIG_IPV6_MROUTE
122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb); 122extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 0ab835965669..a917d4b20554 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1126,6 +1126,7 @@ void pdev_enable_device(struct pci_dev *);
1126int pci_enable_resources(struct pci_dev *, int mask); 1126int pci_enable_resources(struct pci_dev *, int mask);
1127void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *), 1127void pci_fixup_irqs(u8 (*)(struct pci_dev *, u8 *),
1128 int (*)(const struct pci_dev *, u8, u8)); 1128 int (*)(const struct pci_dev *, u8, u8));
1129struct resource *pci_find_resource(struct pci_dev *dev, struct resource *res);
1129#define HAVE_PCI_REQ_REGIONS 2 1130#define HAVE_PCI_REQ_REGIONS 2
1130int __must_check pci_request_regions(struct pci_dev *, const char *); 1131int __must_check pci_request_regions(struct pci_dev *, const char *);
1131int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *); 1132int __must_check pci_request_regions_exclusive(struct pci_dev *, const char *);
@@ -1542,6 +1543,9 @@ static inline int pci_enable_wake(struct pci_dev *dev, pci_power_t state,
1542 int enable) 1543 int enable)
1543{ return 0; } 1544{ return 0; }
1544 1545
1546static inline struct resource *pci_find_resource(struct pci_dev *dev,
1547 struct resource *res)
1548{ return NULL; }
1545static inline int pci_request_regions(struct pci_dev *dev, const char *res_name) 1549static inline int pci_request_regions(struct pci_dev *dev, const char *res_name)
1546{ return -EIO; } 1550{ return -EIO; }
1547static inline void pci_release_regions(struct pci_dev *dev) { } 1551static inline void pci_release_regions(struct pci_dev *dev) { }
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index c2fa3ecb0dce..5b2e6159b744 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -10,32 +10,122 @@
10 10
11struct percpu_rw_semaphore { 11struct percpu_rw_semaphore {
12 struct rcu_sync rss; 12 struct rcu_sync rss;
13 unsigned int __percpu *fast_read_ctr; 13 unsigned int __percpu *read_count;
14 struct rw_semaphore rw_sem; 14 struct rw_semaphore rw_sem;
15 atomic_t slow_read_ctr; 15 wait_queue_head_t writer;
16 wait_queue_head_t write_waitq; 16 int readers_block;
17}; 17};
18 18
19extern void percpu_down_read(struct percpu_rw_semaphore *); 19#define DEFINE_STATIC_PERCPU_RWSEM(name) \
20extern int percpu_down_read_trylock(struct percpu_rw_semaphore *); 20static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
21extern void percpu_up_read(struct percpu_rw_semaphore *); 21static struct percpu_rw_semaphore name = { \
22 .rss = __RCU_SYNC_INITIALIZER(name.rss, RCU_SCHED_SYNC), \
23 .read_count = &__percpu_rwsem_rc_##name, \
24 .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
25 .writer = __WAIT_QUEUE_HEAD_INITIALIZER(name.writer), \
26}
27
28extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
29extern void __percpu_up_read(struct percpu_rw_semaphore *);
30
31static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem)
32{
33 might_sleep();
34
35 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
36
37 preempt_disable();
38 /*
39 * We are in an RCU-sched read-side critical section, so the writer
40 * cannot both change sem->state from readers_fast and start checking
41 * counters while we are here. So if we see !sem->state, we know that
42 * the writer won't be checking until we're past the preempt_enable()
43 * and that one the synchronize_sched() is done, the writer will see
44 * anything we did within this RCU-sched read-size critical section.
45 */
46 __this_cpu_inc(*sem->read_count);
47 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
48 __percpu_down_read(sem, false); /* Unconditional memory barrier */
49 barrier();
50 /*
51 * The barrier() prevents the compiler from
52 * bleeding the critical section out.
53 */
54}
55
56static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
57{
58 percpu_down_read_preempt_disable(sem);
59 preempt_enable();
60}
61
62static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
63{
64 int ret = 1;
65
66 preempt_disable();
67 /*
68 * Same as in percpu_down_read().
69 */
70 __this_cpu_inc(*sem->read_count);
71 if (unlikely(!rcu_sync_is_idle(&sem->rss)))
72 ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
73 preempt_enable();
74 /*
75 * The barrier() from preempt_enable() prevents the compiler from
76 * bleeding the critical section out.
77 */
78
79 if (ret)
80 rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
81
82 return ret;
83}
84
85static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem)
86{
87 /*
88 * The barrier() prevents the compiler from
89 * bleeding the critical section out.
90 */
91 barrier();
92 /*
93 * Same as in percpu_down_read().
94 */
95 if (likely(rcu_sync_is_idle(&sem->rss)))
96 __this_cpu_dec(*sem->read_count);
97 else
98 __percpu_up_read(sem); /* Unconditional memory barrier */
99 preempt_enable();
100
101 rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
102}
103
104static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
105{
106 preempt_disable();
107 percpu_up_read_preempt_enable(sem);
108}
22 109
23extern void percpu_down_write(struct percpu_rw_semaphore *); 110extern void percpu_down_write(struct percpu_rw_semaphore *);
24extern void percpu_up_write(struct percpu_rw_semaphore *); 111extern void percpu_up_write(struct percpu_rw_semaphore *);
25 112
26extern int __percpu_init_rwsem(struct percpu_rw_semaphore *, 113extern int __percpu_init_rwsem(struct percpu_rw_semaphore *,
27 const char *, struct lock_class_key *); 114 const char *, struct lock_class_key *);
115
28extern void percpu_free_rwsem(struct percpu_rw_semaphore *); 116extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
29 117
30#define percpu_init_rwsem(brw) \ 118#define percpu_init_rwsem(sem) \
31({ \ 119({ \
32 static struct lock_class_key rwsem_key; \ 120 static struct lock_class_key rwsem_key; \
33 __percpu_init_rwsem(brw, #brw, &rwsem_key); \ 121 __percpu_init_rwsem(sem, #sem, &rwsem_key); \
34}) 122})
35 123
36
37#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem) 124#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
38 125
126#define percpu_rwsem_assert_held(sem) \
127 lockdep_assert_held(&(sem)->rw_sem)
128
39static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem, 129static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
40 bool read, unsigned long ip) 130 bool read, unsigned long ip)
41{ 131{
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index e18843809eec..9ff07d3fc8de 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -14,7 +14,7 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16#include <linux/perf_event.h> 16#include <linux/perf_event.h>
17 17#include <linux/sysfs.h>
18#include <asm/cputype.h> 18#include <asm/cputype.h>
19 19
20/* 20/*
@@ -77,6 +77,13 @@ struct pmu_hw_events {
77 struct arm_pmu *percpu_pmu; 77 struct arm_pmu *percpu_pmu;
78}; 78};
79 79
80enum armpmu_attr_groups {
81 ARMPMU_ATTR_GROUP_COMMON,
82 ARMPMU_ATTR_GROUP_EVENTS,
83 ARMPMU_ATTR_GROUP_FORMATS,
84 ARMPMU_NR_ATTR_GROUPS
85};
86
80struct arm_pmu { 87struct arm_pmu {
81 struct pmu pmu; 88 struct pmu pmu;
82 cpumask_t active_irqs; 89 cpumask_t active_irqs;
@@ -111,6 +118,8 @@ struct arm_pmu {
111 struct pmu_hw_events __percpu *hw_events; 118 struct pmu_hw_events __percpu *hw_events;
112 struct list_head entry; 119 struct list_head entry;
113 struct notifier_block cpu_pm_nb; 120 struct notifier_block cpu_pm_nb;
121 /* the attr_groups array must be NULL-terminated */
122 const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
114}; 123};
115 124
116#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 125#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -151,6 +160,8 @@ int arm_pmu_device_probe(struct platform_device *pdev,
151 const struct of_device_id *of_table, 160 const struct of_device_id *of_table,
152 const struct pmu_probe_info *probe_table); 161 const struct pmu_probe_info *probe_table);
153 162
163#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
164
154#endif /* CONFIG_ARM_PMU */ 165#endif /* CONFIG_ARM_PMU */
155 166
156#endif /* __ARM_PMU_H__ */ 167#endif /* __ARM_PMU_H__ */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 2b6b43cc0dd5..5c5362584aba 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -510,9 +510,15 @@ typedef void (*perf_overflow_handler_t)(struct perf_event *,
510 struct perf_sample_data *, 510 struct perf_sample_data *,
511 struct pt_regs *regs); 511 struct pt_regs *regs);
512 512
513enum perf_group_flag { 513/*
514 PERF_GROUP_SOFTWARE = 0x1, 514 * Event capabilities. For event_caps and groups caps.
515}; 515 *
516 * PERF_EV_CAP_SOFTWARE: Is a software event.
517 * PERF_EV_CAP_READ_ACTIVE_PKG: A CPU event (or cgroup event) that can be read
518 * from any CPU in the package where it is active.
519 */
520#define PERF_EV_CAP_SOFTWARE BIT(0)
521#define PERF_EV_CAP_READ_ACTIVE_PKG BIT(1)
516 522
517#define SWEVENT_HLIST_BITS 8 523#define SWEVENT_HLIST_BITS 8
518#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS) 524#define SWEVENT_HLIST_SIZE (1 << SWEVENT_HLIST_BITS)
@@ -568,7 +574,12 @@ struct perf_event {
568 struct hlist_node hlist_entry; 574 struct hlist_node hlist_entry;
569 struct list_head active_entry; 575 struct list_head active_entry;
570 int nr_siblings; 576 int nr_siblings;
571 int group_flags; 577
578 /* Not serialized. Only written during event initialization. */
579 int event_caps;
580 /* The cumulative AND of all event_caps for events in this group. */
581 int group_caps;
582
572 struct perf_event *group_leader; 583 struct perf_event *group_leader;
573 struct pmu *pmu; 584 struct pmu *pmu;
574 void *pmu_private; 585 void *pmu_private;
@@ -774,6 +785,9 @@ struct perf_cpu_context {
774#ifdef CONFIG_CGROUP_PERF 785#ifdef CONFIG_CGROUP_PERF
775 struct perf_cgroup *cgrp; 786 struct perf_cgroup *cgrp;
776#endif 787#endif
788
789 struct list_head sched_cb_entry;
790 int sched_cb_usage;
777}; 791};
778 792
779struct perf_output_handle { 793struct perf_output_handle {
@@ -985,7 +999,7 @@ static inline bool is_sampling_event(struct perf_event *event)
985 */ 999 */
986static inline int is_software_event(struct perf_event *event) 1000static inline int is_software_event(struct perf_event *event)
987{ 1001{
988 return event->pmu->task_ctx_nr == perf_sw_context; 1002 return event->event_caps & PERF_EV_CAP_SOFTWARE;
989} 1003}
990 1004
991extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX]; 1005extern struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 31fec858088c..a09fe5c009c8 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -51,6 +51,8 @@ struct generic_pm_domain {
51 struct mutex lock; 51 struct mutex lock;
52 struct dev_power_governor *gov; 52 struct dev_power_governor *gov;
53 struct work_struct power_off_work; 53 struct work_struct power_off_work;
54 struct fwnode_handle *provider; /* Identity of the domain provider */
55 bool has_provider;
54 const char *name; 56 const char *name;
55 atomic_t sd_count; /* Number of subdomains with power "on" */ 57 atomic_t sd_count; /* Number of subdomains with power "on" */
56 enum gpd_status status; /* Current state of the domain */ 58 enum gpd_status status; /* Current state of the domain */
@@ -116,7 +118,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
116 return to_gpd_data(dev->power.subsys_data->domain_data); 118 return to_gpd_data(dev->power.subsys_data->domain_data);
117} 119}
118 120
119extern struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev);
120extern int __pm_genpd_add_device(struct generic_pm_domain *genpd, 121extern int __pm_genpd_add_device(struct generic_pm_domain *genpd,
121 struct device *dev, 122 struct device *dev,
122 struct gpd_timing_data *td); 123 struct gpd_timing_data *td);
@@ -129,6 +130,7 @@ extern int pm_genpd_remove_subdomain(struct generic_pm_domain *genpd,
129 struct generic_pm_domain *target); 130 struct generic_pm_domain *target);
130extern int pm_genpd_init(struct generic_pm_domain *genpd, 131extern int pm_genpd_init(struct generic_pm_domain *genpd,
131 struct dev_power_governor *gov, bool is_off); 132 struct dev_power_governor *gov, bool is_off);
133extern int pm_genpd_remove(struct generic_pm_domain *genpd);
132 134
133extern struct dev_power_governor simple_qos_governor; 135extern struct dev_power_governor simple_qos_governor;
134extern struct dev_power_governor pm_domain_always_on_gov; 136extern struct dev_power_governor pm_domain_always_on_gov;
@@ -138,10 +140,6 @@ static inline struct generic_pm_domain_data *dev_gpd_data(struct device *dev)
138{ 140{
139 return ERR_PTR(-ENOSYS); 141 return ERR_PTR(-ENOSYS);
140} 142}
141static inline struct generic_pm_domain *pm_genpd_lookup_dev(struct device *dev)
142{
143 return NULL;
144}
145static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd, 143static inline int __pm_genpd_add_device(struct generic_pm_domain *genpd,
146 struct device *dev, 144 struct device *dev,
147 struct gpd_timing_data *td) 145 struct gpd_timing_data *td)
@@ -168,6 +166,10 @@ static inline int pm_genpd_init(struct generic_pm_domain *genpd,
168{ 166{
169 return -ENOSYS; 167 return -ENOSYS;
170} 168}
169static inline int pm_genpd_remove(struct generic_pm_domain *genpd)
170{
171 return -ENOTSUPP;
172}
171#endif 173#endif
172 174
173static inline int pm_genpd_add_device(struct generic_pm_domain *genpd, 175static inline int pm_genpd_add_device(struct generic_pm_domain *genpd,
@@ -192,57 +194,57 @@ struct genpd_onecell_data {
192 unsigned int num_domains; 194 unsigned int num_domains;
193}; 195};
194 196
195typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
196 void *data);
197
198#ifdef CONFIG_PM_GENERIC_DOMAINS_OF 197#ifdef CONFIG_PM_GENERIC_DOMAINS_OF
199int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate, 198int of_genpd_add_provider_simple(struct device_node *np,
200 void *data); 199 struct generic_pm_domain *genpd);
200int of_genpd_add_provider_onecell(struct device_node *np,
201 struct genpd_onecell_data *data);
201void of_genpd_del_provider(struct device_node *np); 202void of_genpd_del_provider(struct device_node *np);
202struct generic_pm_domain *of_genpd_get_from_provider( 203extern int of_genpd_add_device(struct of_phandle_args *args,
203 struct of_phandle_args *genpdspec); 204 struct device *dev);
204 205extern int of_genpd_add_subdomain(struct of_phandle_args *parent,
205struct generic_pm_domain *__of_genpd_xlate_simple( 206 struct of_phandle_args *new_subdomain);
206 struct of_phandle_args *genpdspec, 207extern struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
207 void *data);
208struct generic_pm_domain *__of_genpd_xlate_onecell(
209 struct of_phandle_args *genpdspec,
210 void *data);
211 208
212int genpd_dev_pm_attach(struct device *dev); 209int genpd_dev_pm_attach(struct device *dev);
213#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */ 210#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
214static inline int __of_genpd_add_provider(struct device_node *np, 211static inline int of_genpd_add_provider_simple(struct device_node *np,
215 genpd_xlate_t xlate, void *data) 212 struct generic_pm_domain *genpd)
216{ 213{
217 return 0; 214 return -ENOTSUPP;
218} 215}
219static inline void of_genpd_del_provider(struct device_node *np) {}
220 216
221static inline struct generic_pm_domain *of_genpd_get_from_provider( 217static inline int of_genpd_add_provider_onecell(struct device_node *np,
222 struct of_phandle_args *genpdspec) 218 struct genpd_onecell_data *data)
223{ 219{
224 return NULL; 220 return -ENOTSUPP;
225} 221}
226 222
227#define __of_genpd_xlate_simple NULL 223static inline void of_genpd_del_provider(struct device_node *np) {}
228#define __of_genpd_xlate_onecell NULL
229 224
230static inline int genpd_dev_pm_attach(struct device *dev) 225static inline int of_genpd_add_device(struct of_phandle_args *args,
226 struct device *dev)
231{ 227{
232 return -ENODEV; 228 return -ENODEV;
233} 229}
234#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
235 230
236static inline int of_genpd_add_provider_simple(struct device_node *np, 231static inline int of_genpd_add_subdomain(struct of_phandle_args *parent,
237 struct generic_pm_domain *genpd) 232 struct of_phandle_args *new_subdomain)
238{ 233{
239 return __of_genpd_add_provider(np, __of_genpd_xlate_simple, genpd); 234 return -ENODEV;
240} 235}
241static inline int of_genpd_add_provider_onecell(struct device_node *np, 236
242 struct genpd_onecell_data *data) 237static inline int genpd_dev_pm_attach(struct device *dev)
238{
239 return -ENODEV;
240}
241
242static inline
243struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
243{ 244{
244 return __of_genpd_add_provider(np, __of_genpd_xlate_onecell, data); 245 return ERR_PTR(-ENOTSUPP);
245} 246}
247#endif /* CONFIG_PM_GENERIC_DOMAINS_OF */
246 248
247#ifdef CONFIG_PM 249#ifdef CONFIG_PM
248extern int dev_pm_domain_attach(struct device *dev, bool power_on); 250extern int dev_pm_domain_attach(struct device *dev, bool power_on);
diff --git a/include/linux/property.h b/include/linux/property.h
index 3a2f9ae25c86..856e50b2140c 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -190,7 +190,7 @@ struct property_entry {
190 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \ 190 .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
191 .is_array = true, \ 191 .is_array = true, \
192 .is_string = false, \ 192 .is_string = false, \
193 { .pointer = { _type_##_data = _val_ } }, \ 193 { .pointer = { ._type_##_data = _val_ } }, \
194} 194}
195 195
196#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \ 196#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
diff --git a/include/linux/rcu_sync.h b/include/linux/rcu_sync.h
index a63a33e6196e..ece7ed9a4a70 100644
--- a/include/linux/rcu_sync.h
+++ b/include/linux/rcu_sync.h
@@ -59,6 +59,7 @@ static inline bool rcu_sync_is_idle(struct rcu_sync *rsp)
59} 59}
60 60
61extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type); 61extern void rcu_sync_init(struct rcu_sync *, enum rcu_sync_type);
62extern void rcu_sync_enter_start(struct rcu_sync *);
62extern void rcu_sync_enter(struct rcu_sync *); 63extern void rcu_sync_enter(struct rcu_sync *);
63extern void rcu_sync_exit(struct rcu_sync *); 64extern void rcu_sync_exit(struct rcu_sync *);
64extern void rcu_sync_dtor(struct rcu_sync *); 65extern void rcu_sync_dtor(struct rcu_sync *);
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 1aa62e1a761b..321f9ed552a9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -334,6 +334,7 @@ void rcu_sched_qs(void);
334void rcu_bh_qs(void); 334void rcu_bh_qs(void);
335void rcu_check_callbacks(int user); 335void rcu_check_callbacks(int user);
336void rcu_report_dead(unsigned int cpu); 336void rcu_report_dead(unsigned int cpu);
337void rcu_cpu_starting(unsigned int cpu);
337 338
338#ifndef CONFIG_TINY_RCU 339#ifndef CONFIG_TINY_RCU
339void rcu_end_inkernel_boot(void); 340void rcu_end_inkernel_boot(void);
diff --git a/include/linux/sched.h b/include/linux/sched.h
index abb795afc823..7543a476178b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -448,6 +448,8 @@ static inline void io_schedule(void)
448 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT); 448 io_schedule_timeout(MAX_SCHEDULE_TIMEOUT);
449} 449}
450 450
451void __noreturn do_task_dead(void);
452
451struct nsproxy; 453struct nsproxy;
452struct user_namespace; 454struct user_namespace;
453 455
@@ -1022,7 +1024,8 @@ extern void wake_up_q(struct wake_q_head *head);
1022#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */ 1024#define SD_BALANCE_FORK 0x0008 /* Balance on fork, clone */
1023#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */ 1025#define SD_BALANCE_WAKE 0x0010 /* Balance on wakeup */
1024#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */ 1026#define SD_WAKE_AFFINE 0x0020 /* Wake task to waking CPU */
1025#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu power */ 1027#define SD_ASYM_CPUCAPACITY 0x0040 /* Groups have different max cpu capacities */
1028#define SD_SHARE_CPUCAPACITY 0x0080 /* Domain members share cpu capacity */
1026#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */ 1029#define SD_SHARE_POWERDOMAIN 0x0100 /* Domain members share power domain */
1027#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */ 1030#define SD_SHARE_PKG_RESOURCES 0x0200 /* Domain members share cpu pkg resources */
1028#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */ 1031#define SD_SERIALIZE 0x0400 /* Only a single load balancing instance */
@@ -1064,6 +1067,12 @@ extern int sched_domain_level_max;
1064 1067
1065struct sched_group; 1068struct sched_group;
1066 1069
1070struct sched_domain_shared {
1071 atomic_t ref;
1072 atomic_t nr_busy_cpus;
1073 int has_idle_cores;
1074};
1075
1067struct sched_domain { 1076struct sched_domain {
1068 /* These fields must be setup */ 1077 /* These fields must be setup */
1069 struct sched_domain *parent; /* top domain must be null terminated */ 1078 struct sched_domain *parent; /* top domain must be null terminated */
@@ -1094,6 +1103,8 @@ struct sched_domain {
1094 u64 max_newidle_lb_cost; 1103 u64 max_newidle_lb_cost;
1095 unsigned long next_decay_max_lb_cost; 1104 unsigned long next_decay_max_lb_cost;
1096 1105
1106 u64 avg_scan_cost; /* select_idle_sibling */
1107
1097#ifdef CONFIG_SCHEDSTATS 1108#ifdef CONFIG_SCHEDSTATS
1098 /* load_balance() stats */ 1109 /* load_balance() stats */
1099 unsigned int lb_count[CPU_MAX_IDLE_TYPES]; 1110 unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -1132,6 +1143,7 @@ struct sched_domain {
1132 void *private; /* used during construction */ 1143 void *private; /* used during construction */
1133 struct rcu_head rcu; /* used during destruction */ 1144 struct rcu_head rcu; /* used during destruction */
1134 }; 1145 };
1146 struct sched_domain_shared *shared;
1135 1147
1136 unsigned int span_weight; 1148 unsigned int span_weight;
1137 /* 1149 /*
@@ -1165,6 +1177,7 @@ typedef int (*sched_domain_flags_f)(void);
1165 1177
1166struct sd_data { 1178struct sd_data {
1167 struct sched_domain **__percpu sd; 1179 struct sched_domain **__percpu sd;
1180 struct sched_domain_shared **__percpu sds;
1168 struct sched_group **__percpu sg; 1181 struct sched_group **__percpu sg;
1169 struct sched_group_capacity **__percpu sgc; 1182 struct sched_group_capacity **__percpu sgc;
1170}; 1183};
@@ -2597,7 +2610,7 @@ static inline bool is_idle_task(const struct task_struct *p)
2597 return p->pid == 0; 2610 return p->pid == 0;
2598} 2611}
2599extern struct task_struct *curr_task(int cpu); 2612extern struct task_struct *curr_task(int cpu);
2600extern void set_curr_task(int cpu, struct task_struct *p); 2613extern void ia64_set_curr_task(int cpu, struct task_struct *p);
2601 2614
2602void yield(void); 2615void yield(void);
2603 2616
@@ -3279,7 +3292,11 @@ static inline int signal_pending_state(long state, struct task_struct *p)
3279 * cond_resched_lock() will drop the spinlock before scheduling, 3292 * cond_resched_lock() will drop the spinlock before scheduling,
3280 * cond_resched_softirq() will enable bhs before scheduling. 3293 * cond_resched_softirq() will enable bhs before scheduling.
3281 */ 3294 */
3295#ifndef CONFIG_PREEMPT
3282extern int _cond_resched(void); 3296extern int _cond_resched(void);
3297#else
3298static inline int _cond_resched(void) { return 0; }
3299#endif
3283 3300
3284#define cond_resched() ({ \ 3301#define cond_resched() ({ \
3285 ___might_sleep(__FILE__, __LINE__, 0); \ 3302 ___might_sleep(__FILE__, __LINE__, 0); \
@@ -3309,6 +3326,15 @@ static inline void cond_resched_rcu(void)
3309#endif 3326#endif
3310} 3327}
3311 3328
3329static inline unsigned long get_preempt_disable_ip(struct task_struct *p)
3330{
3331#ifdef CONFIG_DEBUG_PREEMPT
3332 return p->preempt_disable_ip;
3333#else
3334 return 0;
3335#endif
3336}
3337
3312/* 3338/*
3313 * Does a critical section need to be broken due to another 3339 * Does a critical section need to be broken due to another
3314 * task waiting?: (technically does not depend on CONFIG_PREEMPT, 3340 * task waiting?: (technically does not depend on CONFIG_PREEMPT,
@@ -3546,15 +3572,20 @@ static inline unsigned long rlimit_max(unsigned int limit)
3546 return task_rlimit_max(current, limit); 3572 return task_rlimit_max(current, limit);
3547} 3573}
3548 3574
3575#define SCHED_CPUFREQ_RT (1U << 0)
3576#define SCHED_CPUFREQ_DL (1U << 1)
3577#define SCHED_CPUFREQ_IOWAIT (1U << 2)
3578
3579#define SCHED_CPUFREQ_RT_DL (SCHED_CPUFREQ_RT | SCHED_CPUFREQ_DL)
3580
3549#ifdef CONFIG_CPU_FREQ 3581#ifdef CONFIG_CPU_FREQ
3550struct update_util_data { 3582struct update_util_data {
3551 void (*func)(struct update_util_data *data, 3583 void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
3552 u64 time, unsigned long util, unsigned long max);
3553}; 3584};
3554 3585
3555void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data, 3586void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
3556 void (*func)(struct update_util_data *data, u64 time, 3587 void (*func)(struct update_util_data *data, u64 time,
3557 unsigned long util, unsigned long max)); 3588 unsigned int flags));
3558void cpufreq_remove_update_util_hook(int cpu); 3589void cpufreq_remove_update_util_hook(int cpu);
3559#endif /* CONFIG_CPU_FREQ */ 3590#endif /* CONFIG_CPU_FREQ */
3560 3591
diff --git a/include/linux/smp.h b/include/linux/smp.h
index eccae4690f41..8e0cb7a0f836 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -196,6 +196,9 @@ extern void arch_enable_nonboot_cpus_end(void);
196 196
197void smp_setup_processor_id(void); 197void smp_setup_processor_id(void);
198 198
199int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par,
200 bool phys);
201
199/* SMP core functions */ 202/* SMP core functions */
200int smpcfd_prepare_cpu(unsigned int cpu); 203int smpcfd_prepare_cpu(unsigned int cpu);
201int smpcfd_dead_cpu(unsigned int cpu); 204int smpcfd_dead_cpu(unsigned int cpu);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 7693e39b14fe..d9718378a8be 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,6 +245,7 @@ static inline bool idle_should_freeze(void)
245 return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); 245 return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER);
246} 246}
247 247
248extern void __init pm_states_init(void);
248extern void freeze_set_ops(const struct platform_freeze_ops *ops); 249extern void freeze_set_ops(const struct platform_freeze_ops *ops);
249extern void freeze_wake(void); 250extern void freeze_wake(void);
250 251
@@ -279,6 +280,7 @@ static inline bool pm_resume_via_firmware(void) { return false; }
279static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} 280static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
280static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } 281static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
281static inline bool idle_should_freeze(void) { return false; } 282static inline bool idle_should_freeze(void) { return false; }
283static inline void __init pm_states_init(void) {}
282static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} 284static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {}
283static inline void freeze_wake(void) {} 285static inline void freeze_wake(void) {}
284#endif /* !CONFIG_SUSPEND */ 286#endif /* !CONFIG_SUSPEND */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b17cc4830fa6..4a529c984a3f 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -257,6 +257,7 @@ static inline void workingset_node_pages_inc(struct radix_tree_node *node)
257 257
258static inline void workingset_node_pages_dec(struct radix_tree_node *node) 258static inline void workingset_node_pages_dec(struct radix_tree_node *node)
259{ 259{
260 VM_BUG_ON(!workingset_node_pages(node));
260 node->count--; 261 node->count--;
261} 262}
262 263
@@ -272,6 +273,7 @@ static inline void workingset_node_shadows_inc(struct radix_tree_node *node)
272 273
273static inline void workingset_node_shadows_dec(struct radix_tree_node *node) 274static inline void workingset_node_shadows_dec(struct radix_tree_node *node)
274{ 275{
276 VM_BUG_ON(!workingset_node_shadows(node));
275 node->count -= 1U << RADIX_TREE_COUNT_SHIFT; 277 node->count -= 1U << RADIX_TREE_COUNT_SHIFT;
276} 278}
277 279
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 6685a73736a2..a45702eb3e7b 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -43,7 +43,7 @@
43 43
44#define TORTURE_FLAG "-torture:" 44#define TORTURE_FLAG "-torture:"
45#define TOROUT_STRING(s) \ 45#define TOROUT_STRING(s) \
46 pr_alert("%s" TORTURE_FLAG s "\n", torture_type) 46 pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s)
47#define VERBOSE_TOROUT_STRING(s) \ 47#define VERBOSE_TOROUT_STRING(s) \
48 do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0) 48 do { if (verbose) pr_alert("%s" TORTURE_FLAG " %s\n", torture_type, s); } while (0)
49#define VERBOSE_TOROUT_ERRSTRING(s) \ 49#define VERBOSE_TOROUT_ERRSTRING(s) \
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index d3a2bb712af3..650f3dd6b800 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -103,31 +103,42 @@ static inline void u64_stats_update_end_raw(struct u64_stats_sync *syncp)
103#endif 103#endif
104} 104}
105 105
106static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp) 106static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
107{ 107{
108#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 108#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
109 return read_seqcount_begin(&syncp->seq); 109 return read_seqcount_begin(&syncp->seq);
110#else 110#else
111#if BITS_PER_LONG==32
112 preempt_disable();
113#endif
114 return 0; 111 return 0;
115#endif 112#endif
116} 113}
117 114
118static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp, 115static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
116{
117#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
118 preempt_disable();
119#endif
120 return __u64_stats_fetch_begin(syncp);
121}
122
123static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
119 unsigned int start) 124 unsigned int start)
120{ 125{
121#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 126#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
122 return read_seqcount_retry(&syncp->seq, start); 127 return read_seqcount_retry(&syncp->seq, start);
123#else 128#else
124#if BITS_PER_LONG==32
125 preempt_enable();
126#endif
127 return false; 129 return false;
128#endif 130#endif
129} 131}
130 132
133static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
134 unsigned int start)
135{
136#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
137 preempt_enable();
138#endif
139 return __u64_stats_fetch_retry(syncp, start);
140}
141
131/* 142/*
132 * In case irq handlers can update u64 counters, readers can use following helpers 143 * In case irq handlers can update u64 counters, readers can use following helpers
133 * - SMP 32bit arches use seqcount protection, irq safe. 144 * - SMP 32bit arches use seqcount protection, irq safe.
@@ -136,27 +147,19 @@ static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
136 */ 147 */
137static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp) 148static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
138{ 149{
139#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 150#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
140 return read_seqcount_begin(&syncp->seq);
141#else
142#if BITS_PER_LONG==32
143 local_irq_disable(); 151 local_irq_disable();
144#endif 152#endif
145 return 0; 153 return __u64_stats_fetch_begin(syncp);
146#endif
147} 154}
148 155
149static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp, 156static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
150 unsigned int start) 157 unsigned int start)
151{ 158{
152#if BITS_PER_LONG==32 && defined(CONFIG_SMP) 159#if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
153 return read_seqcount_retry(&syncp->seq, start);
154#else
155#if BITS_PER_LONG==32
156 local_irq_enable(); 160 local_irq_enable();
157#endif 161#endif
158 return false; 162 return __u64_stats_fetch_retry(syncp, start);
159#endif
160} 163}
161 164
162#endif /* _LINUX_U64_STATS_SYNC_H */ 165#endif /* _LINUX_U64_STATS_SYNC_H */
diff --git a/include/linux/wait.h b/include/linux/wait.h
index c3ff74d764fa..2408e8d5c05c 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -248,6 +248,8 @@ wait_queue_head_t *bit_waitqueue(void *, int);
248 (!__builtin_constant_p(state) || \ 248 (!__builtin_constant_p(state) || \
249 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \ 249 state == TASK_INTERRUPTIBLE || state == TASK_KILLABLE) \
250 250
251extern void init_wait_entry(wait_queue_t *__wait, int flags);
252
251/* 253/*
252 * The below macro ___wait_event() has an explicit shadow of the __ret 254 * The below macro ___wait_event() has an explicit shadow of the __ret
253 * variable when used from the wait_event_*() macros. 255 * variable when used from the wait_event_*() macros.
@@ -266,12 +268,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
266 wait_queue_t __wait; \ 268 wait_queue_t __wait; \
267 long __ret = ret; /* explicit shadow */ \ 269 long __ret = ret; /* explicit shadow */ \
268 \ 270 \
269 INIT_LIST_HEAD(&__wait.task_list); \ 271 init_wait_entry(&__wait, exclusive ? WQ_FLAG_EXCLUSIVE : 0); \
270 if (exclusive) \
271 __wait.flags = WQ_FLAG_EXCLUSIVE; \
272 else \
273 __wait.flags = 0; \
274 \
275 for (;;) { \ 272 for (;;) { \
276 long __int = prepare_to_wait_event(&wq, &__wait, state);\ 273 long __int = prepare_to_wait_event(&wq, &__wait, state);\
277 \ 274 \
@@ -280,12 +277,7 @@ wait_queue_head_t *bit_waitqueue(void *, int);
280 \ 277 \
281 if (___wait_is_interruptible(state) && __int) { \ 278 if (___wait_is_interruptible(state) && __int) { \
282 __ret = __int; \ 279 __ret = __int; \
283 if (exclusive) { \ 280 goto __out; \
284 abort_exclusive_wait(&wq, &__wait, \
285 state, NULL); \
286 goto __out; \
287 } \
288 break; \
289 } \ 281 } \
290 \ 282 \
291 cmd; \ 283 cmd; \
@@ -989,7 +981,6 @@ void prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state);
989void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state); 981void prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state);
990long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state); 982long prepare_to_wait_event(wait_queue_head_t *q, wait_queue_t *wait, int state);
991void finish_wait(wait_queue_head_t *q, wait_queue_t *wait); 983void finish_wait(wait_queue_head_t *q, wait_queue_t *wait);
992void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, unsigned int mode, void *key);
993long wait_woken(wait_queue_t *wait, unsigned mode, long timeout); 984long wait_woken(wait_queue_t *wait, unsigned mode, long timeout);
994int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 985int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);
995int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key); 986int autoremove_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key);