diff options
| author | Ingo Molnar <mingo@kernel.org> | 2014-01-25 03:16:14 -0500 |
|---|---|---|
| committer | Ingo Molnar <mingo@kernel.org> | 2014-01-25 03:16:14 -0500 |
| commit | 2b45e0f9f34f718725e093f4e335600811d7105a (patch) | |
| tree | 3c6d594539eb16fc955906da65b9fa7aacbc9145 /include/linux | |
| parent | a85eba8814631d0d48361c8b9a7ee0984e80c03c (diff) | |
| parent | 15c81026204da897a05424c79263aea861a782cc (diff) | |
Merge branch 'linus' into x86/urgent
Merge in the x86 changes to apply a fix.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include/linux')
38 files changed, 563 insertions, 851 deletions
diff --git a/include/linux/bottom_half.h b/include/linux/bottom_half.h index 27b1bcffe408..86c12c93e3cf 100644 --- a/include/linux/bottom_half.h +++ b/include/linux/bottom_half.h | |||
| @@ -1,9 +1,35 @@ | |||
| 1 | #ifndef _LINUX_BH_H | 1 | #ifndef _LINUX_BH_H |
| 2 | #define _LINUX_BH_H | 2 | #define _LINUX_BH_H |
| 3 | 3 | ||
| 4 | extern void local_bh_disable(void); | 4 | #include <linux/preempt.h> |
| 5 | #include <linux/preempt_mask.h> | ||
| 6 | |||
| 7 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
| 8 | extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); | ||
| 9 | #else | ||
| 10 | static __always_inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) | ||
| 11 | { | ||
| 12 | preempt_count_add(cnt); | ||
| 13 | barrier(); | ||
| 14 | } | ||
| 15 | #endif | ||
| 16 | |||
| 17 | static inline void local_bh_disable(void) | ||
| 18 | { | ||
| 19 | __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); | ||
| 20 | } | ||
| 21 | |||
| 5 | extern void _local_bh_enable(void); | 22 | extern void _local_bh_enable(void); |
| 6 | extern void local_bh_enable(void); | 23 | extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt); |
| 7 | extern void local_bh_enable_ip(unsigned long ip); | 24 | |
| 25 | static inline void local_bh_enable_ip(unsigned long ip) | ||
| 26 | { | ||
| 27 | __local_bh_enable_ip(ip, SOFTIRQ_DISABLE_OFFSET); | ||
| 28 | } | ||
| 29 | |||
| 30 | static inline void local_bh_enable(void) | ||
| 31 | { | ||
| 32 | __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); | ||
| 33 | } | ||
| 8 | 34 | ||
| 9 | #endif /* _LINUX_BH_H */ | 35 | #endif /* _LINUX_BH_H */ |
diff --git a/include/linux/compiler.h b/include/linux/compiler.h index 92669cd182a6..fe7a686dfd8d 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h | |||
| @@ -298,6 +298,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 298 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) | 298 | # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b)) |
| 299 | #endif | 299 | #endif |
| 300 | 300 | ||
| 301 | /* Is this type a native word size -- useful for atomic operations */ | ||
| 302 | #ifndef __native_word | ||
| 303 | # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) | ||
| 304 | #endif | ||
| 305 | |||
| 301 | /* Compile time object size, -1 for unknown */ | 306 | /* Compile time object size, -1 for unknown */ |
| 302 | #ifndef __compiletime_object_size | 307 | #ifndef __compiletime_object_size |
| 303 | # define __compiletime_object_size(obj) -1 | 308 | # define __compiletime_object_size(obj) -1 |
| @@ -337,6 +342,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect); | |||
| 337 | #define compiletime_assert(condition, msg) \ | 342 | #define compiletime_assert(condition, msg) \ |
| 338 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) | 343 | _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) |
| 339 | 344 | ||
| 345 | #define compiletime_assert_atomic_type(t) \ | ||
| 346 | compiletime_assert(__native_word(t), \ | ||
| 347 | "Need native word sized stores/loads for atomicity.") | ||
| 348 | |||
| 340 | /* | 349 | /* |
| 341 | * Prevent the compiler from merging or refetching accesses. The compiler | 350 | * Prevent the compiler from merging or refetching accesses. The compiler |
| 342 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), | 351 | * is also forbidden from reordering successive instances of ACCESS_ONCE(), |
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h index 158158704c30..37b81bd51ec0 100644 --- a/include/linux/context_tracking.h +++ b/include/linux/context_tracking.h | |||
| @@ -17,13 +17,13 @@ extern void __context_tracking_task_switch(struct task_struct *prev, | |||
| 17 | 17 | ||
| 18 | static inline void user_enter(void) | 18 | static inline void user_enter(void) |
| 19 | { | 19 | { |
| 20 | if (static_key_false(&context_tracking_enabled)) | 20 | if (context_tracking_is_enabled()) |
| 21 | context_tracking_user_enter(); | 21 | context_tracking_user_enter(); |
| 22 | 22 | ||
| 23 | } | 23 | } |
| 24 | static inline void user_exit(void) | 24 | static inline void user_exit(void) |
| 25 | { | 25 | { |
| 26 | if (static_key_false(&context_tracking_enabled)) | 26 | if (context_tracking_is_enabled()) |
| 27 | context_tracking_user_exit(); | 27 | context_tracking_user_exit(); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| @@ -31,7 +31,7 @@ static inline enum ctx_state exception_enter(void) | |||
| 31 | { | 31 | { |
| 32 | enum ctx_state prev_ctx; | 32 | enum ctx_state prev_ctx; |
| 33 | 33 | ||
| 34 | if (!static_key_false(&context_tracking_enabled)) | 34 | if (!context_tracking_is_enabled()) |
| 35 | return 0; | 35 | return 0; |
| 36 | 36 | ||
| 37 | prev_ctx = this_cpu_read(context_tracking.state); | 37 | prev_ctx = this_cpu_read(context_tracking.state); |
| @@ -42,7 +42,7 @@ static inline enum ctx_state exception_enter(void) | |||
| 42 | 42 | ||
| 43 | static inline void exception_exit(enum ctx_state prev_ctx) | 43 | static inline void exception_exit(enum ctx_state prev_ctx) |
| 44 | { | 44 | { |
| 45 | if (static_key_false(&context_tracking_enabled)) { | 45 | if (context_tracking_is_enabled()) { |
| 46 | if (prev_ctx == IN_USER) | 46 | if (prev_ctx == IN_USER) |
| 47 | context_tracking_user_enter(); | 47 | context_tracking_user_enter(); |
| 48 | } | 48 | } |
| @@ -51,7 +51,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) | |||
| 51 | static inline void context_tracking_task_switch(struct task_struct *prev, | 51 | static inline void context_tracking_task_switch(struct task_struct *prev, |
| 52 | struct task_struct *next) | 52 | struct task_struct *next) |
| 53 | { | 53 | { |
| 54 | if (static_key_false(&context_tracking_enabled)) | 54 | if (context_tracking_is_enabled()) |
| 55 | __context_tracking_task_switch(prev, next); | 55 | __context_tracking_task_switch(prev, next); |
| 56 | } | 56 | } |
| 57 | #else | 57 | #else |
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h index 0f1979d0674f..97a81225d037 100644 --- a/include/linux/context_tracking_state.h +++ b/include/linux/context_tracking_state.h | |||
| @@ -22,15 +22,20 @@ struct context_tracking { | |||
| 22 | extern struct static_key context_tracking_enabled; | 22 | extern struct static_key context_tracking_enabled; |
| 23 | DECLARE_PER_CPU(struct context_tracking, context_tracking); | 23 | DECLARE_PER_CPU(struct context_tracking, context_tracking); |
| 24 | 24 | ||
| 25 | static inline bool context_tracking_in_user(void) | 25 | static inline bool context_tracking_is_enabled(void) |
| 26 | { | 26 | { |
| 27 | return __this_cpu_read(context_tracking.state) == IN_USER; | 27 | return static_key_false(&context_tracking_enabled); |
| 28 | } | 28 | } |
| 29 | 29 | ||
| 30 | static inline bool context_tracking_active(void) | 30 | static inline bool context_tracking_cpu_is_enabled(void) |
| 31 | { | 31 | { |
| 32 | return __this_cpu_read(context_tracking.active); | 32 | return __this_cpu_read(context_tracking.active); |
| 33 | } | 33 | } |
| 34 | |||
| 35 | static inline bool context_tracking_in_user(void) | ||
| 36 | { | ||
| 37 | return __this_cpu_read(context_tracking.state) == IN_USER; | ||
| 38 | } | ||
| 34 | #else | 39 | #else |
| 35 | static inline bool context_tracking_in_user(void) { return false; } | 40 | static inline bool context_tracking_in_user(void) { return false; } |
| 36 | static inline bool context_tracking_active(void) { return false; } | 41 | static inline bool context_tracking_active(void) { return false; } |
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index fe68a5a98583..7032518f8542 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
| @@ -6,6 +6,8 @@ | |||
| 6 | #include <linux/proc_fs.h> | 6 | #include <linux/proc_fs.h> |
| 7 | #include <linux/elf.h> | 7 | #include <linux/elf.h> |
| 8 | 8 | ||
| 9 | #include <asm/pgtable.h> /* for pgprot_t */ | ||
| 10 | |||
| 9 | #define ELFCORE_ADDR_MAX (-1ULL) | 11 | #define ELFCORE_ADDR_MAX (-1ULL) |
| 10 | #define ELFCORE_ADDR_ERR (-2ULL) | 12 | #define ELFCORE_ADDR_ERR (-2ULL) |
| 11 | 13 | ||
diff --git a/include/linux/edac.h b/include/linux/edac.h index dbdffe8d4469..8e6c20af11a2 100644 --- a/include/linux/edac.h +++ b/include/linux/edac.h | |||
| @@ -35,6 +35,34 @@ extern void edac_atomic_assert_error(void); | |||
| 35 | extern struct bus_type *edac_get_sysfs_subsys(void); | 35 | extern struct bus_type *edac_get_sysfs_subsys(void); |
| 36 | extern void edac_put_sysfs_subsys(void); | 36 | extern void edac_put_sysfs_subsys(void); |
| 37 | 37 | ||
| 38 | enum { | ||
| 39 | EDAC_REPORTING_ENABLED, | ||
| 40 | EDAC_REPORTING_DISABLED, | ||
| 41 | EDAC_REPORTING_FORCE | ||
| 42 | }; | ||
| 43 | |||
| 44 | extern int edac_report_status; | ||
| 45 | #ifdef CONFIG_EDAC | ||
| 46 | static inline int get_edac_report_status(void) | ||
| 47 | { | ||
| 48 | return edac_report_status; | ||
| 49 | } | ||
| 50 | |||
| 51 | static inline void set_edac_report_status(int new) | ||
| 52 | { | ||
| 53 | edac_report_status = new; | ||
| 54 | } | ||
| 55 | #else | ||
| 56 | static inline int get_edac_report_status(void) | ||
| 57 | { | ||
| 58 | return EDAC_REPORTING_DISABLED; | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline void set_edac_report_status(int new) | ||
| 62 | { | ||
| 63 | } | ||
| 64 | #endif | ||
| 65 | |||
| 38 | static inline void opstate_init(void) | 66 | static inline void opstate_init(void) |
| 39 | { | 67 | { |
| 40 | switch (edac_op_state) { | 68 | switch (edac_op_state) { |
diff --git a/include/linux/efi.h b/include/linux/efi.h index 11ce6784a196..0a819e7a60c9 100644 --- a/include/linux/efi.h +++ b/include/linux/efi.h | |||
| @@ -556,6 +556,9 @@ extern struct efi { | |||
| 556 | unsigned long hcdp; /* HCDP table */ | 556 | unsigned long hcdp; /* HCDP table */ |
| 557 | unsigned long uga; /* UGA table */ | 557 | unsigned long uga; /* UGA table */ |
| 558 | unsigned long uv_systab; /* UV system table */ | 558 | unsigned long uv_systab; /* UV system table */ |
| 559 | unsigned long fw_vendor; /* fw_vendor */ | ||
| 560 | unsigned long runtime; /* runtime table */ | ||
| 561 | unsigned long config_table; /* config tables */ | ||
| 559 | efi_get_time_t *get_time; | 562 | efi_get_time_t *get_time; |
| 560 | efi_set_time_t *set_time; | 563 | efi_set_time_t *set_time; |
| 561 | efi_get_wakeup_time_t *get_wakeup_time; | 564 | efi_get_wakeup_time_t *get_wakeup_time; |
| @@ -653,6 +656,7 @@ extern int __init efi_setup_pcdp_console(char *); | |||
| 653 | #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ | 656 | #define EFI_RUNTIME_SERVICES 3 /* Can we use runtime services? */ |
| 654 | #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ | 657 | #define EFI_MEMMAP 4 /* Can we use EFI memory map? */ |
| 655 | #define EFI_64BIT 5 /* Is the firmware 64-bit? */ | 658 | #define EFI_64BIT 5 /* Is the firmware 64-bit? */ |
| 659 | #define EFI_ARCH_1 6 /* First arch-specific bit */ | ||
| 656 | 660 | ||
| 657 | #ifdef CONFIG_EFI | 661 | #ifdef CONFIG_EFI |
| 658 | # ifdef CONFIG_X86 | 662 | # ifdef CONFIG_X86 |
| @@ -872,4 +876,17 @@ int efivars_sysfs_init(void); | |||
| 872 | 876 | ||
| 873 | #endif /* CONFIG_EFI_VARS */ | 877 | #endif /* CONFIG_EFI_VARS */ |
| 874 | 878 | ||
| 879 | #ifdef CONFIG_EFI_RUNTIME_MAP | ||
| 880 | int efi_runtime_map_init(struct kobject *); | ||
| 881 | void efi_runtime_map_setup(void *, int, u32); | ||
| 882 | #else | ||
| 883 | static inline int efi_runtime_map_init(struct kobject *kobj) | ||
| 884 | { | ||
| 885 | return 0; | ||
| 886 | } | ||
| 887 | |||
| 888 | static inline void | ||
| 889 | efi_runtime_map_setup(void *map, int nr_entries, u32 desc_size) {} | ||
| 890 | #endif | ||
| 891 | |||
| 875 | #endif /* _LINUX_EFI_H */ | 892 | #endif /* _LINUX_EFI_H */ |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index d9cf963ac832..12d5f972f23f 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
| @@ -5,6 +5,7 @@ | |||
| 5 | #include <linux/lockdep.h> | 5 | #include <linux/lockdep.h> |
| 6 | #include <linux/ftrace_irq.h> | 6 | #include <linux/ftrace_irq.h> |
| 7 | #include <linux/vtime.h> | 7 | #include <linux/vtime.h> |
| 8 | #include <asm/hardirq.h> | ||
| 8 | 9 | ||
| 9 | 10 | ||
| 10 | extern void synchronize_irq(unsigned int irq); | 11 | extern void synchronize_irq(unsigned int irq); |
diff --git a/include/linux/i2c.h b/include/linux/i2c.h index eff50e062be8..d9c8dbd3373f 100644 --- a/include/linux/i2c.h +++ b/include/linux/i2c.h | |||
| @@ -445,7 +445,7 @@ static inline void i2c_set_adapdata(struct i2c_adapter *dev, void *data) | |||
| 445 | static inline struct i2c_adapter * | 445 | static inline struct i2c_adapter * |
| 446 | i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) | 446 | i2c_parent_is_i2c_adapter(const struct i2c_adapter *adapter) |
| 447 | { | 447 | { |
| 448 | #if IS_ENABLED(I2C_MUX) | 448 | #if IS_ENABLED(CONFIG_I2C_MUX) |
| 449 | struct device *parent = adapter->dev.parent; | 449 | struct device *parent = adapter->dev.parent; |
| 450 | 450 | ||
| 451 | if (parent != NULL && parent->type == &i2c_adapter_type) | 451 | if (parent != NULL && parent->type == &i2c_adapter_type) |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index b0ed422e4e4a..f0e52383a001 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
| @@ -11,6 +11,7 @@ | |||
| 11 | #include <linux/user_namespace.h> | 11 | #include <linux/user_namespace.h> |
| 12 | #include <linux/securebits.h> | 12 | #include <linux/securebits.h> |
| 13 | #include <linux/seqlock.h> | 13 | #include <linux/seqlock.h> |
| 14 | #include <linux/rbtree.h> | ||
| 14 | #include <net/net_namespace.h> | 15 | #include <net/net_namespace.h> |
| 15 | #include <linux/sched/rt.h> | 16 | #include <linux/sched/rt.h> |
| 16 | 17 | ||
| @@ -154,6 +155,14 @@ extern struct task_group root_task_group; | |||
| 154 | 155 | ||
| 155 | #define INIT_TASK_COMM "swapper" | 156 | #define INIT_TASK_COMM "swapper" |
| 156 | 157 | ||
| 158 | #ifdef CONFIG_RT_MUTEXES | ||
| 159 | # define INIT_RT_MUTEXES(tsk) \ | ||
| 160 | .pi_waiters = RB_ROOT, \ | ||
| 161 | .pi_waiters_leftmost = NULL, | ||
| 162 | #else | ||
| 163 | # define INIT_RT_MUTEXES(tsk) | ||
| 164 | #endif | ||
| 165 | |||
| 157 | /* | 166 | /* |
| 158 | * INIT_TASK is used to set up the first task table, touch at | 167 | * INIT_TASK is used to set up the first task table, touch at |
| 159 | * your own risk!. Base=0, limit=0x1fffff (=2MB) | 168 | * your own risk!. Base=0, limit=0x1fffff (=2MB) |
| @@ -221,6 +230,7 @@ extern struct task_group root_task_group; | |||
| 221 | INIT_TRACE_RECURSION \ | 230 | INIT_TRACE_RECURSION \ |
| 222 | INIT_TASK_RCU_PREEMPT(tsk) \ | 231 | INIT_TASK_RCU_PREEMPT(tsk) \ |
| 223 | INIT_CPUSET_SEQ(tsk) \ | 232 | INIT_CPUSET_SEQ(tsk) \ |
| 233 | INIT_RT_MUTEXES(tsk) \ | ||
| 224 | INIT_VTIME(tsk) \ | 234 | INIT_VTIME(tsk) \ |
| 225 | } | 235 | } |
| 226 | 236 | ||
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index ecb87544cc5d..2aa3d4b000e6 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
| @@ -394,6 +394,15 @@ extern int panic_on_oops; | |||
| 394 | extern int panic_on_unrecovered_nmi; | 394 | extern int panic_on_unrecovered_nmi; |
| 395 | extern int panic_on_io_nmi; | 395 | extern int panic_on_io_nmi; |
| 396 | extern int sysctl_panic_on_stackoverflow; | 396 | extern int sysctl_panic_on_stackoverflow; |
| 397 | /* | ||
| 398 | * Only to be used by arch init code. If the user over-wrote the default | ||
| 399 | * CONFIG_PANIC_TIMEOUT, honor it. | ||
| 400 | */ | ||
| 401 | static inline void set_arch_panic_timeout(int timeout, int arch_default_timeout) | ||
| 402 | { | ||
| 403 | if (panic_timeout == arch_default_timeout) | ||
| 404 | panic_timeout = timeout; | ||
| 405 | } | ||
| 397 | extern const char *print_tainted(void); | 406 | extern const char *print_tainted(void); |
| 398 | enum lockdep_ok { | 407 | enum lockdep_ok { |
| 399 | LOCKDEP_STILL_OK, | 408 | LOCKDEP_STILL_OK, |
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 2e069d1288df..e56b07f5c9b6 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h | |||
| @@ -320,6 +320,7 @@ struct perf_event { | |||
| 320 | struct list_head migrate_entry; | 320 | struct list_head migrate_entry; |
| 321 | 321 | ||
| 322 | struct hlist_node hlist_entry; | 322 | struct hlist_node hlist_entry; |
| 323 | struct list_head active_entry; | ||
| 323 | int nr_siblings; | 324 | int nr_siblings; |
| 324 | int group_flags; | 325 | int group_flags; |
| 325 | struct perf_event *group_leader; | 326 | struct perf_event *group_leader; |
diff --git a/include/linux/platform_data/hwmon-s3c.h b/include/linux/platform_data/hwmon-s3c.h index c167e4429bc7..0e3cce130fe2 100644 --- a/include/linux/platform_data/hwmon-s3c.h +++ b/include/linux/platform_data/hwmon-s3c.h | |||
| @@ -1,5 +1,4 @@ | |||
| 1 | /* linux/arch/arm/plat-s3c/include/plat/hwmon.h | 1 | /* |
| 2 | * | ||
| 3 | * Copyright 2005 Simtec Electronics | 2 | * Copyright 2005 Simtec Electronics |
| 4 | * Ben Dooks <ben@simtec.co.uk> | 3 | * Ben Dooks <ben@simtec.co.uk> |
| 5 | * http://armlinux.simtec.co.uk/ | 4 | * http://armlinux.simtec.co.uk/ |
| @@ -11,8 +10,8 @@ | |||
| 11 | * published by the Free Software Foundation. | 10 | * published by the Free Software Foundation. |
| 12 | */ | 11 | */ |
| 13 | 12 | ||
| 14 | #ifndef __ASM_ARCH_ADC_HWMON_H | 13 | #ifndef __HWMON_S3C_H__ |
| 15 | #define __ASM_ARCH_ADC_HWMON_H __FILE__ | 14 | #define __HWMON_S3C_H__ |
| 16 | 15 | ||
| 17 | /** | 16 | /** |
| 18 | * s3c_hwmon_chcfg - channel configuration | 17 | * s3c_hwmon_chcfg - channel configuration |
| @@ -47,5 +46,4 @@ struct s3c_hwmon_pdata { | |||
| 47 | */ | 46 | */ |
| 48 | extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); | 47 | extern void __init s3c_hwmon_set_platdata(struct s3c_hwmon_pdata *pd); |
| 49 | 48 | ||
| 50 | #endif /* __ASM_ARCH_ADC_HWMON_H */ | 49 | #endif /* __HWMON_S3C_H__ */ |
| 51 | |||
diff --git a/include/linux/platform_data/max197.h b/include/linux/platform_data/max197.h index e2a41dd7690c..8da8f94ee15c 100644 --- a/include/linux/platform_data/max197.h +++ b/include/linux/platform_data/max197.h | |||
| @@ -11,6 +11,9 @@ | |||
| 11 | * For further information, see the Documentation/hwmon/max197 file. | 11 | * For further information, see the Documentation/hwmon/max197 file. |
| 12 | */ | 12 | */ |
| 13 | 13 | ||
| 14 | #ifndef _PDATA_MAX197_H | ||
| 15 | #define _PDATA_MAX197_H | ||
| 16 | |||
| 14 | /** | 17 | /** |
| 15 | * struct max197_platform_data - MAX197 connectivity info | 18 | * struct max197_platform_data - MAX197 connectivity info |
| 16 | * @convert: Function used to start a conversion with control byte ctrl. | 19 | * @convert: Function used to start a conversion with control byte ctrl. |
| @@ -19,3 +22,5 @@ | |||
| 19 | struct max197_platform_data { | 22 | struct max197_platform_data { |
| 20 | int (*convert)(u8 ctrl); | 23 | int (*convert)(u8 ctrl); |
| 21 | }; | 24 | }; |
| 25 | |||
| 26 | #endif /* _PDATA_MAX197_H */ | ||
diff --git a/include/linux/platform_data/sht15.h b/include/linux/platform_data/sht15.h index 33e0fd27225e..12289c1e9413 100644 --- a/include/linux/platform_data/sht15.h +++ b/include/linux/platform_data/sht15.h | |||
| @@ -12,6 +12,9 @@ | |||
| 12 | * For further information, see the Documentation/hwmon/sht15 file. | 12 | * For further information, see the Documentation/hwmon/sht15 file. |
| 13 | */ | 13 | */ |
| 14 | 14 | ||
| 15 | #ifndef _PDATA_SHT15_H | ||
| 16 | #define _PDATA_SHT15_H | ||
| 17 | |||
| 15 | /** | 18 | /** |
| 16 | * struct sht15_platform_data - sht15 connectivity info | 19 | * struct sht15_platform_data - sht15 connectivity info |
| 17 | * @gpio_data: no. of gpio to which bidirectional data line is | 20 | * @gpio_data: no. of gpio to which bidirectional data line is |
| @@ -31,3 +34,5 @@ struct sht15_platform_data { | |||
| 31 | bool no_otp_reload; | 34 | bool no_otp_reload; |
| 32 | bool low_resolution; | 35 | bool low_resolution; |
| 33 | }; | 36 | }; |
| 37 | |||
| 38 | #endif /* _PDATA_SHT15_H */ | ||
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index a3d9dc8c2c00..59749fc48328 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -64,7 +64,11 @@ do { \ | |||
| 64 | } while (0) | 64 | } while (0) |
| 65 | 65 | ||
| 66 | #else | 66 | #else |
| 67 | #define preempt_enable() preempt_enable_no_resched() | 67 | #define preempt_enable() \ |
| 68 | do { \ | ||
| 69 | barrier(); \ | ||
| 70 | preempt_count_dec(); \ | ||
| 71 | } while (0) | ||
| 68 | #define preempt_check_resched() do { } while (0) | 72 | #define preempt_check_resched() do { } while (0) |
| 69 | #endif | 73 | #endif |
| 70 | 74 | ||
| @@ -93,7 +97,11 @@ do { \ | |||
| 93 | __preempt_schedule_context(); \ | 97 | __preempt_schedule_context(); \ |
| 94 | } while (0) | 98 | } while (0) |
| 95 | #else | 99 | #else |
| 96 | #define preempt_enable_notrace() preempt_enable_no_resched_notrace() | 100 | #define preempt_enable_notrace() \ |
| 101 | do { \ | ||
| 102 | barrier(); \ | ||
| 103 | __preempt_count_dec(); \ | ||
| 104 | } while (0) | ||
| 97 | #endif | 105 | #endif |
| 98 | 106 | ||
| 99 | #else /* !CONFIG_PREEMPT_COUNT */ | 107 | #else /* !CONFIG_PREEMPT_COUNT */ |
| @@ -116,6 +124,31 @@ do { \ | |||
| 116 | 124 | ||
| 117 | #endif /* CONFIG_PREEMPT_COUNT */ | 125 | #endif /* CONFIG_PREEMPT_COUNT */ |
| 118 | 126 | ||
| 127 | #ifdef MODULE | ||
| 128 | /* | ||
| 129 | * Modules have no business playing preemption tricks. | ||
| 130 | */ | ||
| 131 | #undef sched_preempt_enable_no_resched | ||
| 132 | #undef preempt_enable_no_resched | ||
| 133 | #undef preempt_enable_no_resched_notrace | ||
| 134 | #undef preempt_check_resched | ||
| 135 | #endif | ||
| 136 | |||
| 137 | #ifdef CONFIG_PREEMPT | ||
| 138 | #define preempt_set_need_resched() \ | ||
| 139 | do { \ | ||
| 140 | set_preempt_need_resched(); \ | ||
| 141 | } while (0) | ||
| 142 | #define preempt_fold_need_resched() \ | ||
| 143 | do { \ | ||
| 144 | if (tif_need_resched()) \ | ||
| 145 | set_preempt_need_resched(); \ | ||
| 146 | } while (0) | ||
| 147 | #else | ||
| 148 | #define preempt_set_need_resched() do { } while (0) | ||
| 149 | #define preempt_fold_need_resched() do { } while (0) | ||
| 150 | #endif | ||
| 151 | |||
| 119 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 152 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 120 | 153 | ||
| 121 | struct preempt_notifier; | 154 | struct preempt_notifier; |
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h index d169820203dd..dbeec4d4a3be 100644 --- a/include/linux/preempt_mask.h +++ b/include/linux/preempt_mask.h | |||
| @@ -2,7 +2,6 @@ | |||
| 2 | #define LINUX_PREEMPT_MASK_H | 2 | #define LINUX_PREEMPT_MASK_H |
| 3 | 3 | ||
| 4 | #include <linux/preempt.h> | 4 | #include <linux/preempt.h> |
| 5 | #include <asm/hardirq.h> | ||
| 6 | 5 | ||
| 7 | /* | 6 | /* |
| 8 | * We put the hardirq and softirq counter into the preemption | 7 | * We put the hardirq and softirq counter into the preemption |
| @@ -79,6 +78,21 @@ | |||
| 79 | #endif | 78 | #endif |
| 80 | 79 | ||
| 81 | /* | 80 | /* |
| 81 | * The preempt_count offset needed for things like: | ||
| 82 | * | ||
| 83 | * spin_lock_bh() | ||
| 84 | * | ||
| 85 | * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and | ||
| 86 | * softirqs, such that unlock sequences of: | ||
| 87 | * | ||
| 88 | * spin_unlock(); | ||
| 89 | * local_bh_enable(); | ||
| 90 | * | ||
| 91 | * Work as expected. | ||
| 92 | */ | ||
| 93 | #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET) | ||
| 94 | |||
| 95 | /* | ||
| 82 | * Are we running in atomic context? WARNING: this macro cannot | 96 | * Are we running in atomic context? WARNING: this macro cannot |
| 83 | * always detect atomic context; in particular, it cannot know about | 97 | * always detect atomic context; in particular, it cannot know about |
| 84 | * held spinlocks in non-preemptible kernels. Thus it should not be | 98 | * held spinlocks in non-preemptible kernels. Thus it should not be |
diff --git a/include/linux/rculist.h b/include/linux/rculist.h index 45a0a9e81478..dbaf99084112 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h | |||
| @@ -55,8 +55,8 @@ static inline void __list_add_rcu(struct list_head *new, | |||
| 55 | next->prev = new; | 55 | next->prev = new; |
| 56 | } | 56 | } |
| 57 | #else | 57 | #else |
| 58 | extern void __list_add_rcu(struct list_head *new, | 58 | void __list_add_rcu(struct list_head *new, |
| 59 | struct list_head *prev, struct list_head *next); | 59 | struct list_head *prev, struct list_head *next); |
| 60 | #endif | 60 | #endif |
| 61 | 61 | ||
| 62 | /** | 62 | /** |
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 39cbb889e20d..3e355c688618 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
| @@ -50,13 +50,13 @@ extern int rcutorture_runnable; /* for sysctl */ | |||
| 50 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ | 50 | #endif /* #ifdef CONFIG_RCU_TORTURE_TEST */ |
| 51 | 51 | ||
| 52 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) | 52 | #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) |
| 53 | extern void rcutorture_record_test_transition(void); | 53 | void rcutorture_record_test_transition(void); |
| 54 | extern void rcutorture_record_progress(unsigned long vernum); | 54 | void rcutorture_record_progress(unsigned long vernum); |
| 55 | extern void do_trace_rcu_torture_read(const char *rcutorturename, | 55 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 56 | struct rcu_head *rhp, | 56 | struct rcu_head *rhp, |
| 57 | unsigned long secs, | 57 | unsigned long secs, |
| 58 | unsigned long c_old, | 58 | unsigned long c_old, |
| 59 | unsigned long c); | 59 | unsigned long c); |
| 60 | #else | 60 | #else |
| 61 | static inline void rcutorture_record_test_transition(void) | 61 | static inline void rcutorture_record_test_transition(void) |
| 62 | { | 62 | { |
| @@ -65,11 +65,11 @@ static inline void rcutorture_record_progress(unsigned long vernum) | |||
| 65 | { | 65 | { |
| 66 | } | 66 | } |
| 67 | #ifdef CONFIG_RCU_TRACE | 67 | #ifdef CONFIG_RCU_TRACE |
| 68 | extern void do_trace_rcu_torture_read(const char *rcutorturename, | 68 | void do_trace_rcu_torture_read(const char *rcutorturename, |
| 69 | struct rcu_head *rhp, | 69 | struct rcu_head *rhp, |
| 70 | unsigned long secs, | 70 | unsigned long secs, |
| 71 | unsigned long c_old, | 71 | unsigned long c_old, |
| 72 | unsigned long c); | 72 | unsigned long c); |
| 73 | #else | 73 | #else |
| 74 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ | 74 | #define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \ |
| 75 | do { } while (0) | 75 | do { } while (0) |
| @@ -118,8 +118,8 @@ extern void do_trace_rcu_torture_read(const char *rcutorturename, | |||
| 118 | * if CPU A and CPU B are the same CPU (but again only if the system has | 118 | * if CPU A and CPU B are the same CPU (but again only if the system has |
| 119 | * more than one CPU). | 119 | * more than one CPU). |
| 120 | */ | 120 | */ |
| 121 | extern void call_rcu(struct rcu_head *head, | 121 | void call_rcu(struct rcu_head *head, |
| 122 | void (*func)(struct rcu_head *head)); | 122 | void (*func)(struct rcu_head *head)); |
| 123 | 123 | ||
| 124 | #else /* #ifdef CONFIG_PREEMPT_RCU */ | 124 | #else /* #ifdef CONFIG_PREEMPT_RCU */ |
| 125 | 125 | ||
| @@ -149,8 +149,8 @@ extern void call_rcu(struct rcu_head *head, | |||
| 149 | * See the description of call_rcu() for more detailed information on | 149 | * See the description of call_rcu() for more detailed information on |
| 150 | * memory ordering guarantees. | 150 | * memory ordering guarantees. |
| 151 | */ | 151 | */ |
| 152 | extern void call_rcu_bh(struct rcu_head *head, | 152 | void call_rcu_bh(struct rcu_head *head, |
| 153 | void (*func)(struct rcu_head *head)); | 153 | void (*func)(struct rcu_head *head)); |
| 154 | 154 | ||
| 155 | /** | 155 | /** |
| 156 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. | 156 | * call_rcu_sched() - Queue an RCU for invocation after sched grace period. |
| @@ -171,16 +171,16 @@ extern void call_rcu_bh(struct rcu_head *head, | |||
| 171 | * See the description of call_rcu() for more detailed information on | 171 | * See the description of call_rcu() for more detailed information on |
| 172 | * memory ordering guarantees. | 172 | * memory ordering guarantees. |
| 173 | */ | 173 | */ |
| 174 | extern void call_rcu_sched(struct rcu_head *head, | 174 | void call_rcu_sched(struct rcu_head *head, |
| 175 | void (*func)(struct rcu_head *rcu)); | 175 | void (*func)(struct rcu_head *rcu)); |
| 176 | 176 | ||
| 177 | extern void synchronize_sched(void); | 177 | void synchronize_sched(void); |
| 178 | 178 | ||
| 179 | #ifdef CONFIG_PREEMPT_RCU | 179 | #ifdef CONFIG_PREEMPT_RCU |
| 180 | 180 | ||
| 181 | extern void __rcu_read_lock(void); | 181 | void __rcu_read_lock(void); |
| 182 | extern void __rcu_read_unlock(void); | 182 | void __rcu_read_unlock(void); |
| 183 | extern void rcu_read_unlock_special(struct task_struct *t); | 183 | void rcu_read_unlock_special(struct task_struct *t); |
| 184 | void synchronize_rcu(void); | 184 | void synchronize_rcu(void); |
| 185 | 185 | ||
| 186 | /* | 186 | /* |
| @@ -216,19 +216,19 @@ static inline int rcu_preempt_depth(void) | |||
| 216 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ | 216 | #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ |
| 217 | 217 | ||
| 218 | /* Internal to kernel */ | 218 | /* Internal to kernel */ |
| 219 | extern void rcu_init(void); | 219 | void rcu_init(void); |
| 220 | extern void rcu_sched_qs(int cpu); | 220 | void rcu_sched_qs(int cpu); |
| 221 | extern void rcu_bh_qs(int cpu); | 221 | void rcu_bh_qs(int cpu); |
| 222 | extern void rcu_check_callbacks(int cpu, int user); | 222 | void rcu_check_callbacks(int cpu, int user); |
| 223 | struct notifier_block; | 223 | struct notifier_block; |
| 224 | extern void rcu_idle_enter(void); | 224 | void rcu_idle_enter(void); |
| 225 | extern void rcu_idle_exit(void); | 225 | void rcu_idle_exit(void); |
| 226 | extern void rcu_irq_enter(void); | 226 | void rcu_irq_enter(void); |
| 227 | extern void rcu_irq_exit(void); | 227 | void rcu_irq_exit(void); |
| 228 | 228 | ||
| 229 | #ifdef CONFIG_RCU_USER_QS | 229 | #ifdef CONFIG_RCU_USER_QS |
| 230 | extern void rcu_user_enter(void); | 230 | void rcu_user_enter(void); |
| 231 | extern void rcu_user_exit(void); | 231 | void rcu_user_exit(void); |
| 232 | #else | 232 | #else |
| 233 | static inline void rcu_user_enter(void) { } | 233 | static inline void rcu_user_enter(void) { } |
| 234 | static inline void rcu_user_exit(void) { } | 234 | static inline void rcu_user_exit(void) { } |
| @@ -262,7 +262,7 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev, | |||
| 262 | } while (0) | 262 | } while (0) |
| 263 | 263 | ||
| 264 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) | 264 | #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) |
| 265 | extern bool __rcu_is_watching(void); | 265 | bool __rcu_is_watching(void); |
| 266 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ | 266 | #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ |
| 267 | 267 | ||
| 268 | /* | 268 | /* |
| @@ -289,8 +289,8 @@ void wait_rcu_gp(call_rcu_func_t crf); | |||
| 289 | * initialization. | 289 | * initialization. |
| 290 | */ | 290 | */ |
| 291 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD | 291 | #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD |
| 292 | extern void init_rcu_head_on_stack(struct rcu_head *head); | 292 | void init_rcu_head_on_stack(struct rcu_head *head); |
| 293 | extern void destroy_rcu_head_on_stack(struct rcu_head *head); | 293 | void destroy_rcu_head_on_stack(struct rcu_head *head); |
| 294 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ | 294 | #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ |
| 295 | static inline void init_rcu_head_on_stack(struct rcu_head *head) | 295 | static inline void init_rcu_head_on_stack(struct rcu_head *head) |
| 296 | { | 296 | { |
| @@ -325,6 +325,7 @@ static inline void rcu_lock_release(struct lockdep_map *map) | |||
| 325 | extern struct lockdep_map rcu_lock_map; | 325 | extern struct lockdep_map rcu_lock_map; |
| 326 | extern struct lockdep_map rcu_bh_lock_map; | 326 | extern struct lockdep_map rcu_bh_lock_map; |
| 327 | extern struct lockdep_map rcu_sched_lock_map; | 327 | extern struct lockdep_map rcu_sched_lock_map; |
| 328 | extern struct lockdep_map rcu_callback_map; | ||
| 328 | extern int debug_lockdep_rcu_enabled(void); | 329 | extern int debug_lockdep_rcu_enabled(void); |
| 329 | 330 | ||
| 330 | /** | 331 | /** |
| @@ -362,7 +363,7 @@ static inline int rcu_read_lock_held(void) | |||
| 362 | * rcu_read_lock_bh_held() is defined out of line to avoid #include-file | 363 | * rcu_read_lock_bh_held() is defined out of line to avoid #include-file |
| 363 | * hell. | 364 | * hell. |
| 364 | */ | 365 | */ |
| 365 | extern int rcu_read_lock_bh_held(void); | 366 | int rcu_read_lock_bh_held(void); |
| 366 | 367 | ||
| 367 | /** | 368 | /** |
| 368 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? | 369 | * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? |
| @@ -448,7 +449,7 @@ static inline int rcu_read_lock_sched_held(void) | |||
| 448 | 449 | ||
| 449 | #ifdef CONFIG_PROVE_RCU | 450 | #ifdef CONFIG_PROVE_RCU |
| 450 | 451 | ||
| 451 | extern int rcu_my_thread_group_empty(void); | 452 | int rcu_my_thread_group_empty(void); |
| 452 | 453 | ||
| 453 | /** | 454 | /** |
| 454 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met | 455 | * rcu_lockdep_assert - emit lockdep splat if specified condition not met |
| @@ -548,10 +549,48 @@ static inline void rcu_preempt_sleep_check(void) | |||
| 548 | smp_read_barrier_depends(); \ | 549 | smp_read_barrier_depends(); \ |
| 549 | (_________p1); \ | 550 | (_________p1); \ |
| 550 | }) | 551 | }) |
| 551 | #define __rcu_assign_pointer(p, v, space) \ | 552 | |
| 553 | /** | ||
| 554 | * RCU_INITIALIZER() - statically initialize an RCU-protected global variable | ||
| 555 | * @v: The value to statically initialize with. | ||
| 556 | */ | ||
| 557 | #define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) | ||
| 558 | |||
| 559 | /** | ||
| 560 | * rcu_assign_pointer() - assign to RCU-protected pointer | ||
| 561 | * @p: pointer to assign to | ||
| 562 | * @v: value to assign (publish) | ||
| 563 | * | ||
| 564 | * Assigns the specified value to the specified RCU-protected | ||
| 565 | * pointer, ensuring that any concurrent RCU readers will see | ||
| 566 | * any prior initialization. | ||
| 567 | * | ||
| 568 | * Inserts memory barriers on architectures that require them | ||
| 569 | * (which is most of them), and also prevents the compiler from | ||
| 570 | * reordering the code that initializes the structure after the pointer | ||
| 571 | * assignment. More importantly, this call documents which pointers | ||
| 572 | * will be dereferenced by RCU read-side code. | ||
| 573 | * | ||
| 574 | * In some special cases, you may use RCU_INIT_POINTER() instead | ||
| 575 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due | ||
| 576 | * to the fact that it does not constrain either the CPU or the compiler. | ||
| 577 | * That said, using RCU_INIT_POINTER() when you should have used | ||
| 578 | * rcu_assign_pointer() is a very bad thing that results in | ||
| 579 | * impossible-to-diagnose memory corruption. So please be careful. | ||
| 580 | * See the RCU_INIT_POINTER() comment header for details. | ||
| 581 | * | ||
| 582 | * Note that rcu_assign_pointer() evaluates each of its arguments only | ||
| 583 | * once, appearances notwithstanding. One of the "extra" evaluations | ||
| 584 | * is in typeof() and the other visible only to sparse (__CHECKER__), | ||
| 585 | * neither of which actually execute the argument. As with most cpp | ||
| 586 | * macros, this execute-arguments-only-once property is important, so | ||
| 587 | * please be careful when making changes to rcu_assign_pointer() and the | ||
| 588 | * other macros that it invokes. | ||
| 589 | */ | ||
| 590 | #define rcu_assign_pointer(p, v) \ | ||
| 552 | do { \ | 591 | do { \ |
| 553 | smp_wmb(); \ | 592 | smp_wmb(); \ |
| 554 | (p) = (typeof(*v) __force space *)(v); \ | 593 | ACCESS_ONCE(p) = RCU_INITIALIZER(v); \ |
| 555 | } while (0) | 594 | } while (0) |
| 556 | 595 | ||
| 557 | 596 | ||
| @@ -890,32 +929,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 890 | } | 929 | } |
| 891 | 930 | ||
| 892 | /** | 931 | /** |
| 893 | * rcu_assign_pointer() - assign to RCU-protected pointer | ||
| 894 | * @p: pointer to assign to | ||
| 895 | * @v: value to assign (publish) | ||
| 896 | * | ||
| 897 | * Assigns the specified value to the specified RCU-protected | ||
| 898 | * pointer, ensuring that any concurrent RCU readers will see | ||
| 899 | * any prior initialization. | ||
| 900 | * | ||
| 901 | * Inserts memory barriers on architectures that require them | ||
| 902 | * (which is most of them), and also prevents the compiler from | ||
| 903 | * reordering the code that initializes the structure after the pointer | ||
| 904 | * assignment. More importantly, this call documents which pointers | ||
| 905 | * will be dereferenced by RCU read-side code. | ||
| 906 | * | ||
| 907 | * In some special cases, you may use RCU_INIT_POINTER() instead | ||
| 908 | * of rcu_assign_pointer(). RCU_INIT_POINTER() is a bit faster due | ||
| 909 | * to the fact that it does not constrain either the CPU or the compiler. | ||
| 910 | * That said, using RCU_INIT_POINTER() when you should have used | ||
| 911 | * rcu_assign_pointer() is a very bad thing that results in | ||
| 912 | * impossible-to-diagnose memory corruption. So please be careful. | ||
| 913 | * See the RCU_INIT_POINTER() comment header for details. | ||
| 914 | */ | ||
| 915 | #define rcu_assign_pointer(p, v) \ | ||
| 916 | __rcu_assign_pointer((p), (v), __rcu) | ||
| 917 | |||
| 918 | /** | ||
| 919 | * RCU_INIT_POINTER() - initialize an RCU protected pointer | 932 | * RCU_INIT_POINTER() - initialize an RCU protected pointer |
| 920 | * | 933 | * |
| 921 | * Initialize an RCU-protected pointer in special cases where readers | 934 | * Initialize an RCU-protected pointer in special cases where readers |
| @@ -949,7 +962,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 949 | */ | 962 | */ |
| 950 | #define RCU_INIT_POINTER(p, v) \ | 963 | #define RCU_INIT_POINTER(p, v) \ |
| 951 | do { \ | 964 | do { \ |
| 952 | p = (typeof(*v) __force __rcu *)(v); \ | 965 | p = RCU_INITIALIZER(v); \ |
| 953 | } while (0) | 966 | } while (0) |
| 954 | 967 | ||
| 955 | /** | 968 | /** |
| @@ -958,7 +971,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 958 | * GCC-style initialization for an RCU-protected pointer in a structure field. | 971 | * GCC-style initialization for an RCU-protected pointer in a structure field. |
| 959 | */ | 972 | */ |
| 960 | #define RCU_POINTER_INITIALIZER(p, v) \ | 973 | #define RCU_POINTER_INITIALIZER(p, v) \ |
| 961 | .p = (typeof(*v) __force __rcu *)(v) | 974 | .p = RCU_INITIALIZER(v) |
| 962 | 975 | ||
| 963 | /* | 976 | /* |
| 964 | * Does the specified offset indicate that the corresponding rcu_head | 977 | * Does the specified offset indicate that the corresponding rcu_head |
| @@ -1005,7 +1018,7 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) | |||
| 1005 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) | 1018 | __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head)) |
| 1006 | 1019 | ||
| 1007 | #ifdef CONFIG_RCU_NOCB_CPU | 1020 | #ifdef CONFIG_RCU_NOCB_CPU |
| 1008 | extern bool rcu_is_nocb_cpu(int cpu); | 1021 | bool rcu_is_nocb_cpu(int cpu); |
| 1009 | #else | 1022 | #else |
| 1010 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } | 1023 | static inline bool rcu_is_nocb_cpu(int cpu) { return false; } |
| 1011 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ | 1024 | #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */ |
| @@ -1013,8 +1026,8 @@ static inline bool rcu_is_nocb_cpu(int cpu) { return false; } | |||
| 1013 | 1026 | ||
| 1014 | /* Only for use by adaptive-ticks code. */ | 1027 | /* Only for use by adaptive-ticks code. */ |
| 1015 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE | 1028 | #ifdef CONFIG_NO_HZ_FULL_SYSIDLE |
| 1016 | extern bool rcu_sys_is_idle(void); | 1029 | bool rcu_sys_is_idle(void); |
| 1017 | extern void rcu_sysidle_force_exit(void); | 1030 | void rcu_sysidle_force_exit(void); |
| 1018 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ | 1031 | #else /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */ |
| 1019 | 1032 | ||
| 1020 | static inline bool rcu_sys_is_idle(void) | 1033 | static inline bool rcu_sys_is_idle(void) |
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h index 09ebcbe9fd78..6f01771b571c 100644 --- a/include/linux/rcutiny.h +++ b/include/linux/rcutiny.h | |||
| @@ -125,7 +125,7 @@ static inline void exit_rcu(void) | |||
| 125 | 125 | ||
| 126 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | 126 | #ifdef CONFIG_DEBUG_LOCK_ALLOC |
| 127 | extern int rcu_scheduler_active __read_mostly; | 127 | extern int rcu_scheduler_active __read_mostly; |
| 128 | extern void rcu_scheduler_starting(void); | 128 | void rcu_scheduler_starting(void); |
| 129 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ | 129 | #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ |
| 130 | static inline void rcu_scheduler_starting(void) | 130 | static inline void rcu_scheduler_starting(void) |
| 131 | { | 131 | { |
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 4b9c81548742..72137ee8c603 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h | |||
| @@ -30,9 +30,9 @@ | |||
| 30 | #ifndef __LINUX_RCUTREE_H | 30 | #ifndef __LINUX_RCUTREE_H |
| 31 | #define __LINUX_RCUTREE_H | 31 | #define __LINUX_RCUTREE_H |
| 32 | 32 | ||
| 33 | extern void rcu_note_context_switch(int cpu); | 33 | void rcu_note_context_switch(int cpu); |
| 34 | extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); | 34 | int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies); |
| 35 | extern void rcu_cpu_stall_reset(void); | 35 | void rcu_cpu_stall_reset(void); |
| 36 | 36 | ||
| 37 | /* | 37 | /* |
| 38 | * Note a virtualization-based context switch. This is simply a | 38 | * Note a virtualization-based context switch. This is simply a |
| @@ -44,9 +44,9 @@ static inline void rcu_virt_note_context_switch(int cpu) | |||
| 44 | rcu_note_context_switch(cpu); | 44 | rcu_note_context_switch(cpu); |
| 45 | } | 45 | } |
| 46 | 46 | ||
| 47 | extern void synchronize_rcu_bh(void); | 47 | void synchronize_rcu_bh(void); |
| 48 | extern void synchronize_sched_expedited(void); | 48 | void synchronize_sched_expedited(void); |
| 49 | extern void synchronize_rcu_expedited(void); | 49 | void synchronize_rcu_expedited(void); |
| 50 | 50 | ||
| 51 | void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); | 51 | void kfree_call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); |
| 52 | 52 | ||
| @@ -71,25 +71,25 @@ static inline void synchronize_rcu_bh_expedited(void) | |||
| 71 | synchronize_sched_expedited(); | 71 | synchronize_sched_expedited(); |
| 72 | } | 72 | } |
| 73 | 73 | ||
| 74 | extern void rcu_barrier(void); | 74 | void rcu_barrier(void); |
| 75 | extern void rcu_barrier_bh(void); | 75 | void rcu_barrier_bh(void); |
| 76 | extern void rcu_barrier_sched(void); | 76 | void rcu_barrier_sched(void); |
| 77 | 77 | ||
| 78 | extern unsigned long rcutorture_testseq; | 78 | extern unsigned long rcutorture_testseq; |
| 79 | extern unsigned long rcutorture_vernum; | 79 | extern unsigned long rcutorture_vernum; |
| 80 | extern long rcu_batches_completed(void); | 80 | long rcu_batches_completed(void); |
| 81 | extern long rcu_batches_completed_bh(void); | 81 | long rcu_batches_completed_bh(void); |
| 82 | extern long rcu_batches_completed_sched(void); | 82 | long rcu_batches_completed_sched(void); |
| 83 | 83 | ||
| 84 | extern void rcu_force_quiescent_state(void); | 84 | void rcu_force_quiescent_state(void); |
| 85 | extern void rcu_bh_force_quiescent_state(void); | 85 | void rcu_bh_force_quiescent_state(void); |
| 86 | extern void rcu_sched_force_quiescent_state(void); | 86 | void rcu_sched_force_quiescent_state(void); |
| 87 | 87 | ||
| 88 | extern void exit_rcu(void); | 88 | void exit_rcu(void); |
| 89 | 89 | ||
| 90 | extern void rcu_scheduler_starting(void); | 90 | void rcu_scheduler_starting(void); |
| 91 | extern int rcu_scheduler_active __read_mostly; | 91 | extern int rcu_scheduler_active __read_mostly; |
| 92 | 92 | ||
| 93 | extern bool rcu_is_watching(void); | 93 | bool rcu_is_watching(void); |
| 94 | 94 | ||
| 95 | #endif /* __LINUX_RCUTREE_H */ | 95 | #endif /* __LINUX_RCUTREE_H */ |
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index de17134244f3..3aed8d737e1a 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h | |||
| @@ -13,7 +13,7 @@ | |||
| 13 | #define __LINUX_RT_MUTEX_H | 13 | #define __LINUX_RT_MUTEX_H |
| 14 | 14 | ||
| 15 | #include <linux/linkage.h> | 15 | #include <linux/linkage.h> |
| 16 | #include <linux/plist.h> | 16 | #include <linux/rbtree.h> |
| 17 | #include <linux/spinlock_types.h> | 17 | #include <linux/spinlock_types.h> |
| 18 | 18 | ||
| 19 | extern int max_lock_depth; /* for sysctl */ | 19 | extern int max_lock_depth; /* for sysctl */ |
| @@ -22,12 +22,14 @@ extern int max_lock_depth; /* for sysctl */ | |||
| 22 | * The rt_mutex structure | 22 | * The rt_mutex structure |
| 23 | * | 23 | * |
| 24 | * @wait_lock: spinlock to protect the structure | 24 | * @wait_lock: spinlock to protect the structure |
| 25 | * @wait_list: pilist head to enqueue waiters in priority order | 25 | * @waiters: rbtree root to enqueue waiters in priority order |
| 26 | * @waiters_leftmost: top waiter | ||
| 26 | * @owner: the mutex owner | 27 | * @owner: the mutex owner |
| 27 | */ | 28 | */ |
| 28 | struct rt_mutex { | 29 | struct rt_mutex { |
| 29 | raw_spinlock_t wait_lock; | 30 | raw_spinlock_t wait_lock; |
| 30 | struct plist_head wait_list; | 31 | struct rb_root waiters; |
| 32 | struct rb_node *waiters_leftmost; | ||
| 31 | struct task_struct *owner; | 33 | struct task_struct *owner; |
| 32 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 34 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
| 33 | int save_state; | 35 | int save_state; |
| @@ -66,7 +68,7 @@ struct hrtimer_sleeper; | |||
| 66 | 68 | ||
| 67 | #define __RT_MUTEX_INITIALIZER(mutexname) \ | 69 | #define __RT_MUTEX_INITIALIZER(mutexname) \ |
| 68 | { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ | 70 | { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ |
| 69 | , .wait_list = PLIST_HEAD_INIT(mutexname.wait_list) \ | 71 | , .waiters = RB_ROOT \ |
| 70 | , .owner = NULL \ | 72 | , .owner = NULL \ |
| 71 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} | 73 | __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} |
| 72 | 74 | ||
| @@ -98,12 +100,4 @@ extern int rt_mutex_trylock(struct rt_mutex *lock); | |||
| 98 | 100 | ||
| 99 | extern void rt_mutex_unlock(struct rt_mutex *lock); | 101 | extern void rt_mutex_unlock(struct rt_mutex *lock); |
| 100 | 102 | ||
| 101 | #ifdef CONFIG_RT_MUTEXES | ||
| 102 | # define INIT_RT_MUTEXES(tsk) \ | ||
| 103 | .pi_waiters = PLIST_HEAD_INIT(tsk.pi_waiters), \ | ||
| 104 | INIT_RT_MUTEX_DEBUG(tsk) | ||
| 105 | #else | ||
| 106 | # define INIT_RT_MUTEXES(tsk) | ||
| 107 | #endif | ||
| 108 | |||
| 109 | #endif | 103 | #endif |
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h index 9c9f0495d37c..5b9b84b20407 100644 --- a/include/linux/rwlock_api_smp.h +++ b/include/linux/rwlock_api_smp.h | |||
| @@ -172,8 +172,7 @@ static inline void __raw_read_lock_irq(rwlock_t *lock) | |||
| 172 | 172 | ||
| 173 | static inline void __raw_read_lock_bh(rwlock_t *lock) | 173 | static inline void __raw_read_lock_bh(rwlock_t *lock) |
| 174 | { | 174 | { |
| 175 | local_bh_disable(); | 175 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 176 | preempt_disable(); | ||
| 177 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); | 176 | rwlock_acquire_read(&lock->dep_map, 0, 0, _RET_IP_); |
| 178 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); | 177 | LOCK_CONTENDED(lock, do_raw_read_trylock, do_raw_read_lock); |
| 179 | } | 178 | } |
| @@ -200,8 +199,7 @@ static inline void __raw_write_lock_irq(rwlock_t *lock) | |||
| 200 | 199 | ||
| 201 | static inline void __raw_write_lock_bh(rwlock_t *lock) | 200 | static inline void __raw_write_lock_bh(rwlock_t *lock) |
| 202 | { | 201 | { |
| 203 | local_bh_disable(); | 202 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 204 | preempt_disable(); | ||
| 205 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 203 | rwlock_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 206 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); | 204 | LOCK_CONTENDED(lock, do_raw_write_trylock, do_raw_write_lock); |
| 207 | } | 205 | } |
| @@ -250,8 +248,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock) | |||
| 250 | { | 248 | { |
| 251 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 249 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 252 | do_raw_read_unlock(lock); | 250 | do_raw_read_unlock(lock); |
| 253 | preempt_enable_no_resched(); | 251 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 254 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
| 255 | } | 252 | } |
| 256 | 253 | ||
| 257 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, | 254 | static inline void __raw_write_unlock_irqrestore(rwlock_t *lock, |
| @@ -275,8 +272,7 @@ static inline void __raw_write_unlock_bh(rwlock_t *lock) | |||
| 275 | { | 272 | { |
| 276 | rwlock_release(&lock->dep_map, 1, _RET_IP_); | 273 | rwlock_release(&lock->dep_map, 1, _RET_IP_); |
| 277 | do_raw_write_unlock(lock); | 274 | do_raw_write_unlock(lock); |
| 278 | preempt_enable_no_resched(); | 275 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 279 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
| 280 | } | 276 | } |
| 281 | 277 | ||
| 282 | #endif /* __LINUX_RWLOCK_API_SMP_H */ | 278 | #endif /* __LINUX_RWLOCK_API_SMP_H */ |
diff --git a/include/linux/sched.h b/include/linux/sched.h index 53f97eb8dbc7..ffccdad050b5 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
| @@ -16,6 +16,7 @@ struct sched_param { | |||
| 16 | #include <linux/types.h> | 16 | #include <linux/types.h> |
| 17 | #include <linux/timex.h> | 17 | #include <linux/timex.h> |
| 18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
| 19 | #include <linux/plist.h> | ||
| 19 | #include <linux/rbtree.h> | 20 | #include <linux/rbtree.h> |
| 20 | #include <linux/thread_info.h> | 21 | #include <linux/thread_info.h> |
| 21 | #include <linux/cpumask.h> | 22 | #include <linux/cpumask.h> |
| @@ -56,6 +57,70 @@ struct sched_param { | |||
| 56 | 57 | ||
| 57 | #include <asm/processor.h> | 58 | #include <asm/processor.h> |
| 58 | 59 | ||
| 60 | #define SCHED_ATTR_SIZE_VER0 48 /* sizeof first published struct */ | ||
| 61 | |||
| 62 | /* | ||
| 63 | * Extended scheduling parameters data structure. | ||
| 64 | * | ||
| 65 | * This is needed because the original struct sched_param can not be | ||
| 66 | * altered without introducing ABI issues with legacy applications | ||
| 67 | * (e.g., in sched_getparam()). | ||
| 68 | * | ||
| 69 | * However, the possibility of specifying more than just a priority for | ||
| 70 | * the tasks may be useful for a wide variety of application fields, e.g., | ||
| 71 | * multimedia, streaming, automation and control, and many others. | ||
| 72 | * | ||
| 73 | * This variant (sched_attr) is meant at describing a so-called | ||
| 74 | * sporadic time-constrained task. In such model a task is specified by: | ||
| 75 | * - the activation period or minimum instance inter-arrival time; | ||
| 76 | * - the maximum (or average, depending on the actual scheduling | ||
| 77 | * discipline) computation time of all instances, a.k.a. runtime; | ||
| 78 | * - the deadline (relative to the actual activation time) of each | ||
| 79 | * instance. | ||
| 80 | * Very briefly, a periodic (sporadic) task asks for the execution of | ||
| 81 | * some specific computation --which is typically called an instance-- | ||
| 82 | * (at most) every period. Moreover, each instance typically lasts no more | ||
| 83 | * than the runtime and must be completed by time instant t equal to | ||
| 84 | * the instance activation time + the deadline. | ||
| 85 | * | ||
| 86 | * This is reflected by the actual fields of the sched_attr structure: | ||
| 87 | * | ||
| 88 | * @size size of the structure, for fwd/bwd compat. | ||
| 89 | * | ||
| 90 | * @sched_policy task's scheduling policy | ||
| 91 | * @sched_flags for customizing the scheduler behaviour | ||
| 92 | * @sched_nice task's nice value (SCHED_NORMAL/BATCH) | ||
| 93 | * @sched_priority task's static priority (SCHED_FIFO/RR) | ||
| 94 | * @sched_deadline representative of the task's deadline | ||
| 95 | * @sched_runtime representative of the task's runtime | ||
| 96 | * @sched_period representative of the task's period | ||
| 97 | * | ||
| 98 | * Given this task model, there are a multiplicity of scheduling algorithms | ||
| 99 | * and policies, that can be used to ensure all the tasks will make their | ||
| 100 | * timing constraints. | ||
| 101 | * | ||
| 102 | * As of now, the SCHED_DEADLINE policy (sched_dl scheduling class) is the | ||
| 103 | * only user of this new interface. More information about the algorithm | ||
| 104 | * available in the scheduling class file or in Documentation/. | ||
| 105 | */ | ||
| 106 | struct sched_attr { | ||
| 107 | u32 size; | ||
| 108 | |||
| 109 | u32 sched_policy; | ||
| 110 | u64 sched_flags; | ||
| 111 | |||
| 112 | /* SCHED_NORMAL, SCHED_BATCH */ | ||
| 113 | s32 sched_nice; | ||
| 114 | |||
| 115 | /* SCHED_FIFO, SCHED_RR */ | ||
| 116 | u32 sched_priority; | ||
| 117 | |||
| 118 | /* SCHED_DEADLINE */ | ||
| 119 | u64 sched_runtime; | ||
| 120 | u64 sched_deadline; | ||
| 121 | u64 sched_period; | ||
| 122 | }; | ||
| 123 | |||
| 59 | struct exec_domain; | 124 | struct exec_domain; |
| 60 | struct futex_pi_state; | 125 | struct futex_pi_state; |
| 61 | struct robust_list_head; | 126 | struct robust_list_head; |
| @@ -168,7 +233,6 @@ extern char ___assert_task_state[1 - 2*!!( | |||
| 168 | 233 | ||
| 169 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) | 234 | #define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) |
| 170 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) | 235 | #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) |
| 171 | #define task_is_dead(task) ((task)->exit_state != 0) | ||
| 172 | #define task_is_stopped_or_traced(task) \ | 236 | #define task_is_stopped_or_traced(task) \ |
| 173 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) | 237 | ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) |
| 174 | #define task_contributes_to_load(task) \ | 238 | #define task_contributes_to_load(task) \ |
| @@ -1029,6 +1093,51 @@ struct sched_rt_entity { | |||
| 1029 | #endif | 1093 | #endif |
| 1030 | }; | 1094 | }; |
| 1031 | 1095 | ||
| 1096 | struct sched_dl_entity { | ||
| 1097 | struct rb_node rb_node; | ||
| 1098 | |||
| 1099 | /* | ||
| 1100 | * Original scheduling parameters. Copied here from sched_attr | ||
| 1101 | * during sched_setscheduler2(), they will remain the same until | ||
| 1102 | * the next sched_setscheduler2(). | ||
| 1103 | */ | ||
| 1104 | u64 dl_runtime; /* maximum runtime for each instance */ | ||
| 1105 | u64 dl_deadline; /* relative deadline of each instance */ | ||
| 1106 | u64 dl_period; /* separation of two instances (period) */ | ||
| 1107 | u64 dl_bw; /* dl_runtime / dl_deadline */ | ||
| 1108 | |||
| 1109 | /* | ||
| 1110 | * Actual scheduling parameters. Initialized with the values above, | ||
| 1111 | * they are continously updated during task execution. Note that | ||
| 1112 | * the remaining runtime could be < 0 in case we are in overrun. | ||
| 1113 | */ | ||
| 1114 | s64 runtime; /* remaining runtime for this instance */ | ||
| 1115 | u64 deadline; /* absolute deadline for this instance */ | ||
| 1116 | unsigned int flags; /* specifying the scheduler behaviour */ | ||
| 1117 | |||
| 1118 | /* | ||
| 1119 | * Some bool flags: | ||
| 1120 | * | ||
| 1121 | * @dl_throttled tells if we exhausted the runtime. If so, the | ||
| 1122 | * task has to wait for a replenishment to be performed at the | ||
| 1123 | * next firing of dl_timer. | ||
| 1124 | * | ||
| 1125 | * @dl_new tells if a new instance arrived. If so we must | ||
| 1126 | * start executing it with full runtime and reset its absolute | ||
| 1127 | * deadline; | ||
| 1128 | * | ||
| 1129 | * @dl_boosted tells if we are boosted due to DI. If so we are | ||
| 1130 | * outside bandwidth enforcement mechanism (but only until we | ||
| 1131 | * exit the critical section). | ||
| 1132 | */ | ||
| 1133 | int dl_throttled, dl_new, dl_boosted; | ||
| 1134 | |||
| 1135 | /* | ||
| 1136 | * Bandwidth enforcement timer. Each -deadline task has its | ||
| 1137 | * own bandwidth to be enforced, thus we need one timer per task. | ||
| 1138 | */ | ||
| 1139 | struct hrtimer dl_timer; | ||
| 1140 | }; | ||
| 1032 | 1141 | ||
| 1033 | struct rcu_node; | 1142 | struct rcu_node; |
| 1034 | 1143 | ||
| @@ -1065,6 +1174,7 @@ struct task_struct { | |||
| 1065 | #ifdef CONFIG_CGROUP_SCHED | 1174 | #ifdef CONFIG_CGROUP_SCHED |
| 1066 | struct task_group *sched_task_group; | 1175 | struct task_group *sched_task_group; |
| 1067 | #endif | 1176 | #endif |
| 1177 | struct sched_dl_entity dl; | ||
| 1068 | 1178 | ||
| 1069 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 1179 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
| 1070 | /* list of struct preempt_notifier: */ | 1180 | /* list of struct preempt_notifier: */ |
| @@ -1098,6 +1208,7 @@ struct task_struct { | |||
| 1098 | struct list_head tasks; | 1208 | struct list_head tasks; |
| 1099 | #ifdef CONFIG_SMP | 1209 | #ifdef CONFIG_SMP |
| 1100 | struct plist_node pushable_tasks; | 1210 | struct plist_node pushable_tasks; |
| 1211 | struct rb_node pushable_dl_tasks; | ||
| 1101 | #endif | 1212 | #endif |
| 1102 | 1213 | ||
| 1103 | struct mm_struct *mm, *active_mm; | 1214 | struct mm_struct *mm, *active_mm; |
| @@ -1249,9 +1360,12 @@ struct task_struct { | |||
| 1249 | 1360 | ||
| 1250 | #ifdef CONFIG_RT_MUTEXES | 1361 | #ifdef CONFIG_RT_MUTEXES |
| 1251 | /* PI waiters blocked on a rt_mutex held by this task */ | 1362 | /* PI waiters blocked on a rt_mutex held by this task */ |
| 1252 | struct plist_head pi_waiters; | 1363 | struct rb_root pi_waiters; |
| 1364 | struct rb_node *pi_waiters_leftmost; | ||
| 1253 | /* Deadlock detection and priority inheritance handling */ | 1365 | /* Deadlock detection and priority inheritance handling */ |
| 1254 | struct rt_mutex_waiter *pi_blocked_on; | 1366 | struct rt_mutex_waiter *pi_blocked_on; |
| 1367 | /* Top pi_waiters task */ | ||
| 1368 | struct task_struct *pi_top_task; | ||
| 1255 | #endif | 1369 | #endif |
| 1256 | 1370 | ||
| 1257 | #ifdef CONFIG_DEBUG_MUTEXES | 1371 | #ifdef CONFIG_DEBUG_MUTEXES |
| @@ -1880,7 +1994,9 @@ static inline void sched_clock_idle_wakeup_event(u64 delta_ns) | |||
| 1880 | * but then during bootup it turns out that sched_clock() | 1994 | * but then during bootup it turns out that sched_clock() |
| 1881 | * is reliable after all: | 1995 | * is reliable after all: |
| 1882 | */ | 1996 | */ |
| 1883 | extern int sched_clock_stable; | 1997 | extern int sched_clock_stable(void); |
| 1998 | extern void set_sched_clock_stable(void); | ||
| 1999 | extern void clear_sched_clock_stable(void); | ||
| 1884 | 2000 | ||
| 1885 | extern void sched_clock_tick(void); | 2001 | extern void sched_clock_tick(void); |
| 1886 | extern void sched_clock_idle_sleep_event(void); | 2002 | extern void sched_clock_idle_sleep_event(void); |
| @@ -1959,6 +2075,8 @@ extern int sched_setscheduler(struct task_struct *, int, | |||
| 1959 | const struct sched_param *); | 2075 | const struct sched_param *); |
| 1960 | extern int sched_setscheduler_nocheck(struct task_struct *, int, | 2076 | extern int sched_setscheduler_nocheck(struct task_struct *, int, |
| 1961 | const struct sched_param *); | 2077 | const struct sched_param *); |
| 2078 | extern int sched_setattr(struct task_struct *, | ||
| 2079 | const struct sched_attr *); | ||
| 1962 | extern struct task_struct *idle_task(int cpu); | 2080 | extern struct task_struct *idle_task(int cpu); |
| 1963 | /** | 2081 | /** |
| 1964 | * is_idle_task - is the specified task an idle task? | 2082 | * is_idle_task - is the specified task an idle task? |
| @@ -2038,7 +2156,7 @@ extern void wake_up_new_task(struct task_struct *tsk); | |||
| 2038 | #else | 2156 | #else |
| 2039 | static inline void kick_process(struct task_struct *tsk) { } | 2157 | static inline void kick_process(struct task_struct *tsk) { } |
| 2040 | #endif | 2158 | #endif |
| 2041 | extern void sched_fork(unsigned long clone_flags, struct task_struct *p); | 2159 | extern int sched_fork(unsigned long clone_flags, struct task_struct *p); |
| 2042 | extern void sched_dead(struct task_struct *p); | 2160 | extern void sched_dead(struct task_struct *p); |
| 2043 | 2161 | ||
| 2044 | extern void proc_caches_init(void); | 2162 | extern void proc_caches_init(void); |
| @@ -2627,6 +2745,21 @@ static inline bool __must_check current_clr_polling_and_test(void) | |||
| 2627 | } | 2745 | } |
| 2628 | #endif | 2746 | #endif |
| 2629 | 2747 | ||
| 2748 | static inline void current_clr_polling(void) | ||
| 2749 | { | ||
| 2750 | __current_clr_polling(); | ||
| 2751 | |||
| 2752 | /* | ||
| 2753 | * Ensure we check TIF_NEED_RESCHED after we clear the polling bit. | ||
| 2754 | * Once the bit is cleared, we'll get IPIs with every new | ||
| 2755 | * TIF_NEED_RESCHED and the IPI handler, scheduler_ipi(), will also | ||
| 2756 | * fold. | ||
| 2757 | */ | ||
| 2758 | smp_mb(); /* paired with resched_task() */ | ||
| 2759 | |||
| 2760 | preempt_fold_need_resched(); | ||
| 2761 | } | ||
| 2762 | |||
| 2630 | static __always_inline bool need_resched(void) | 2763 | static __always_inline bool need_resched(void) |
| 2631 | { | 2764 | { |
| 2632 | return unlikely(tif_need_resched()); | 2765 | return unlikely(tif_need_resched()); |
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h new file mode 100644 index 000000000000..9d303b8847df --- /dev/null +++ b/include/linux/sched/deadline.h | |||
| @@ -0,0 +1,24 @@ | |||
| 1 | #ifndef _SCHED_DEADLINE_H | ||
| 2 | #define _SCHED_DEADLINE_H | ||
| 3 | |||
| 4 | /* | ||
| 5 | * SCHED_DEADLINE tasks has negative priorities, reflecting | ||
| 6 | * the fact that any of them has higher prio than RT and | ||
| 7 | * NORMAL/BATCH tasks. | ||
| 8 | */ | ||
| 9 | |||
| 10 | #define MAX_DL_PRIO 0 | ||
| 11 | |||
| 12 | static inline int dl_prio(int prio) | ||
| 13 | { | ||
| 14 | if (unlikely(prio < MAX_DL_PRIO)) | ||
| 15 | return 1; | ||
| 16 | return 0; | ||
| 17 | } | ||
| 18 | |||
| 19 | static inline int dl_task(struct task_struct *p) | ||
| 20 | { | ||
| 21 | return dl_prio(p->prio); | ||
| 22 | } | ||
| 23 | |||
| 24 | #endif /* _SCHED_DEADLINE_H */ | ||
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h index 440434df3627..34e4ebea8fce 100644 --- a/include/linux/sched/rt.h +++ b/include/linux/sched/rt.h | |||
| @@ -35,6 +35,7 @@ static inline int rt_task(struct task_struct *p) | |||
| 35 | #ifdef CONFIG_RT_MUTEXES | 35 | #ifdef CONFIG_RT_MUTEXES |
| 36 | extern int rt_mutex_getprio(struct task_struct *p); | 36 | extern int rt_mutex_getprio(struct task_struct *p); |
| 37 | extern void rt_mutex_setprio(struct task_struct *p, int prio); | 37 | extern void rt_mutex_setprio(struct task_struct *p, int prio); |
| 38 | extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task); | ||
| 38 | extern void rt_mutex_adjust_pi(struct task_struct *p); | 39 | extern void rt_mutex_adjust_pi(struct task_struct *p); |
| 39 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | 40 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) |
| 40 | { | 41 | { |
| @@ -45,6 +46,10 @@ static inline int rt_mutex_getprio(struct task_struct *p) | |||
| 45 | { | 46 | { |
| 46 | return p->normal_prio; | 47 | return p->normal_prio; |
| 47 | } | 48 | } |
| 49 | static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task) | ||
| 50 | { | ||
| 51 | return NULL; | ||
| 52 | } | ||
| 48 | # define rt_mutex_adjust_pi(p) do { } while (0) | 53 | # define rt_mutex_adjust_pi(p) do { } while (0) |
| 49 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) | 54 | static inline bool tsk_is_pi_blocked(struct task_struct *tsk) |
| 50 | { | 55 | { |
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h index 41467f8ff8ec..31e0193cb0c5 100644 --- a/include/linux/sched/sysctl.h +++ b/include/linux/sched/sysctl.h | |||
| @@ -48,7 +48,6 @@ extern unsigned int sysctl_numa_balancing_scan_delay; | |||
| 48 | extern unsigned int sysctl_numa_balancing_scan_period_min; | 48 | extern unsigned int sysctl_numa_balancing_scan_period_min; |
| 49 | extern unsigned int sysctl_numa_balancing_scan_period_max; | 49 | extern unsigned int sysctl_numa_balancing_scan_period_max; |
| 50 | extern unsigned int sysctl_numa_balancing_scan_size; | 50 | extern unsigned int sysctl_numa_balancing_scan_size; |
| 51 | extern unsigned int sysctl_numa_balancing_settle_count; | ||
| 52 | 51 | ||
| 53 | #ifdef CONFIG_SCHED_DEBUG | 52 | #ifdef CONFIG_SCHED_DEBUG |
| 54 | extern unsigned int sysctl_sched_migration_cost; | 53 | extern unsigned int sysctl_sched_migration_cost; |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index cf87a24c0f92..535f158977b9 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
| @@ -117,15 +117,15 @@ repeat: | |||
| 117 | } | 117 | } |
| 118 | 118 | ||
| 119 | /** | 119 | /** |
| 120 | * read_seqcount_begin_no_lockdep - start seq-read critical section w/o lockdep | 120 | * raw_read_seqcount_begin - start seq-read critical section w/o lockdep |
| 121 | * @s: pointer to seqcount_t | 121 | * @s: pointer to seqcount_t |
| 122 | * Returns: count to be passed to read_seqcount_retry | 122 | * Returns: count to be passed to read_seqcount_retry |
| 123 | * | 123 | * |
| 124 | * read_seqcount_begin_no_lockdep opens a read critical section of the given | 124 | * raw_read_seqcount_begin opens a read critical section of the given |
| 125 | * seqcount, but without any lockdep checking. Validity of the critical | 125 | * seqcount, but without any lockdep checking. Validity of the critical |
| 126 | * section is tested by checking read_seqcount_retry function. | 126 | * section is tested by checking read_seqcount_retry function. |
| 127 | */ | 127 | */ |
| 128 | static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) | 128 | static inline unsigned raw_read_seqcount_begin(const seqcount_t *s) |
| 129 | { | 129 | { |
| 130 | unsigned ret = __read_seqcount_begin(s); | 130 | unsigned ret = __read_seqcount_begin(s); |
| 131 | smp_rmb(); | 131 | smp_rmb(); |
| @@ -144,7 +144,7 @@ static inline unsigned read_seqcount_begin_no_lockdep(const seqcount_t *s) | |||
| 144 | static inline unsigned read_seqcount_begin(const seqcount_t *s) | 144 | static inline unsigned read_seqcount_begin(const seqcount_t *s) |
| 145 | { | 145 | { |
| 146 | seqcount_lockdep_reader_access(s); | 146 | seqcount_lockdep_reader_access(s); |
| 147 | return read_seqcount_begin_no_lockdep(s); | 147 | return raw_read_seqcount_begin(s); |
| 148 | } | 148 | } |
| 149 | 149 | ||
| 150 | /** | 150 | /** |
| @@ -206,14 +206,26 @@ static inline int read_seqcount_retry(const seqcount_t *s, unsigned start) | |||
| 206 | } | 206 | } |
| 207 | 207 | ||
| 208 | 208 | ||
| 209 | |||
| 210 | static inline void raw_write_seqcount_begin(seqcount_t *s) | ||
| 211 | { | ||
| 212 | s->sequence++; | ||
| 213 | smp_wmb(); | ||
| 214 | } | ||
| 215 | |||
| 216 | static inline void raw_write_seqcount_end(seqcount_t *s) | ||
| 217 | { | ||
| 218 | smp_wmb(); | ||
| 219 | s->sequence++; | ||
| 220 | } | ||
| 221 | |||
| 209 | /* | 222 | /* |
| 210 | * Sequence counter only version assumes that callers are using their | 223 | * Sequence counter only version assumes that callers are using their |
| 211 | * own mutexing. | 224 | * own mutexing. |
| 212 | */ | 225 | */ |
| 213 | static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) | 226 | static inline void write_seqcount_begin_nested(seqcount_t *s, int subclass) |
| 214 | { | 227 | { |
| 215 | s->sequence++; | 228 | raw_write_seqcount_begin(s); |
| 216 | smp_wmb(); | ||
| 217 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); | 229 | seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_); |
| 218 | } | 230 | } |
| 219 | 231 | ||
| @@ -225,8 +237,7 @@ static inline void write_seqcount_begin(seqcount_t *s) | |||
| 225 | static inline void write_seqcount_end(seqcount_t *s) | 237 | static inline void write_seqcount_end(seqcount_t *s) |
| 226 | { | 238 | { |
| 227 | seqcount_release(&s->dep_map, 1, _RET_IP_); | 239 | seqcount_release(&s->dep_map, 1, _RET_IP_); |
| 228 | smp_wmb(); | 240 | raw_write_seqcount_end(s); |
| 229 | s->sequence++; | ||
| 230 | } | 241 | } |
| 231 | 242 | ||
| 232 | /** | 243 | /** |
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h index 75f34949d9ab..3f2867ff0ced 100644 --- a/include/linux/spinlock.h +++ b/include/linux/spinlock.h | |||
| @@ -130,6 +130,16 @@ do { \ | |||
| 130 | #define smp_mb__before_spinlock() smp_wmb() | 130 | #define smp_mb__before_spinlock() smp_wmb() |
| 131 | #endif | 131 | #endif |
| 132 | 132 | ||
| 133 | /* | ||
| 134 | * Place this after a lock-acquisition primitive to guarantee that | ||
| 135 | * an UNLOCK+LOCK pair act as a full barrier. This guarantee applies | ||
| 136 | * if the UNLOCK and LOCK are executed by the same CPU or if the | ||
| 137 | * UNLOCK and LOCK operate on the same lock variable. | ||
| 138 | */ | ||
| 139 | #ifndef smp_mb__after_unlock_lock | ||
| 140 | #define smp_mb__after_unlock_lock() do { } while (0) | ||
| 141 | #endif | ||
| 142 | |||
| 133 | /** | 143 | /** |
| 134 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked | 144 | * raw_spin_unlock_wait - wait until the spinlock gets unlocked |
| 135 | * @lock: the spinlock in question. | 145 | * @lock: the spinlock in question. |
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h index bdb9993f0fda..42dfab89e740 100644 --- a/include/linux/spinlock_api_smp.h +++ b/include/linux/spinlock_api_smp.h | |||
| @@ -131,8 +131,7 @@ static inline void __raw_spin_lock_irq(raw_spinlock_t *lock) | |||
| 131 | 131 | ||
| 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) | 132 | static inline void __raw_spin_lock_bh(raw_spinlock_t *lock) |
| 133 | { | 133 | { |
| 134 | local_bh_disable(); | 134 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 135 | preempt_disable(); | ||
| 136 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); | 135 | spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); |
| 137 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); | 136 | LOCK_CONTENDED(lock, do_raw_spin_trylock, do_raw_spin_lock); |
| 138 | } | 137 | } |
| @@ -174,20 +173,17 @@ static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock) | |||
| 174 | { | 173 | { |
| 175 | spin_release(&lock->dep_map, 1, _RET_IP_); | 174 | spin_release(&lock->dep_map, 1, _RET_IP_); |
| 176 | do_raw_spin_unlock(lock); | 175 | do_raw_spin_unlock(lock); |
| 177 | preempt_enable_no_resched(); | 176 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 178 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
| 179 | } | 177 | } |
| 180 | 178 | ||
| 181 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) | 179 | static inline int __raw_spin_trylock_bh(raw_spinlock_t *lock) |
| 182 | { | 180 | { |
| 183 | local_bh_disable(); | 181 | __local_bh_disable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 184 | preempt_disable(); | ||
| 185 | if (do_raw_spin_trylock(lock)) { | 182 | if (do_raw_spin_trylock(lock)) { |
| 186 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); | 183 | spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); |
| 187 | return 1; | 184 | return 1; |
| 188 | } | 185 | } |
| 189 | preempt_enable_no_resched(); | 186 | __local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET); |
| 190 | local_bh_enable_ip((unsigned long)__builtin_return_address(0)); | ||
| 191 | return 0; | 187 | return 0; |
| 192 | } | 188 | } |
| 193 | 189 | ||
diff --git a/include/linux/spinlock_api_up.h b/include/linux/spinlock_api_up.h index af1f47229e70..d0d188861ad6 100644 --- a/include/linux/spinlock_api_up.h +++ b/include/linux/spinlock_api_up.h | |||
| @@ -24,11 +24,14 @@ | |||
| 24 | * flags straight, to suppress compiler warnings of unused lock | 24 | * flags straight, to suppress compiler warnings of unused lock |
| 25 | * variables, and to add the proper checker annotations: | 25 | * variables, and to add the proper checker annotations: |
| 26 | */ | 26 | */ |
| 27 | #define ___LOCK(lock) \ | ||
| 28 | do { __acquire(lock); (void)(lock); } while (0) | ||
| 29 | |||
| 27 | #define __LOCK(lock) \ | 30 | #define __LOCK(lock) \ |
| 28 | do { preempt_disable(); __acquire(lock); (void)(lock); } while (0) | 31 | do { preempt_disable(); ___LOCK(lock); } while (0) |
| 29 | 32 | ||
| 30 | #define __LOCK_BH(lock) \ | 33 | #define __LOCK_BH(lock) \ |
| 31 | do { local_bh_disable(); __LOCK(lock); } while (0) | 34 | do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0) |
| 32 | 35 | ||
| 33 | #define __LOCK_IRQ(lock) \ | 36 | #define __LOCK_IRQ(lock) \ |
| 34 | do { local_irq_disable(); __LOCK(lock); } while (0) | 37 | do { local_irq_disable(); __LOCK(lock); } while (0) |
| @@ -36,12 +39,15 @@ | |||
| 36 | #define __LOCK_IRQSAVE(lock, flags) \ | 39 | #define __LOCK_IRQSAVE(lock, flags) \ |
| 37 | do { local_irq_save(flags); __LOCK(lock); } while (0) | 40 | do { local_irq_save(flags); __LOCK(lock); } while (0) |
| 38 | 41 | ||
| 42 | #define ___UNLOCK(lock) \ | ||
| 43 | do { __release(lock); (void)(lock); } while (0) | ||
| 44 | |||
| 39 | #define __UNLOCK(lock) \ | 45 | #define __UNLOCK(lock) \ |
| 40 | do { preempt_enable(); __release(lock); (void)(lock); } while (0) | 46 | do { preempt_enable(); ___UNLOCK(lock); } while (0) |
| 41 | 47 | ||
| 42 | #define __UNLOCK_BH(lock) \ | 48 | #define __UNLOCK_BH(lock) \ |
| 43 | do { preempt_enable_no_resched(); local_bh_enable(); \ | 49 | do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \ |
| 44 | __release(lock); (void)(lock); } while (0) | 50 | ___UNLOCK(lock); } while (0) |
| 45 | 51 | ||
| 46 | #define __UNLOCK_IRQ(lock) \ | 52 | #define __UNLOCK_IRQ(lock) \ |
| 47 | do { local_irq_enable(); __UNLOCK(lock); } while (0) | 53 | do { local_irq_enable(); __UNLOCK(lock); } while (0) |
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h index 94273bbe6050..40ed9e9a77e5 100644 --- a/include/linux/syscalls.h +++ b/include/linux/syscalls.h | |||
| @@ -38,6 +38,7 @@ struct rlimit; | |||
| 38 | struct rlimit64; | 38 | struct rlimit64; |
| 39 | struct rusage; | 39 | struct rusage; |
| 40 | struct sched_param; | 40 | struct sched_param; |
| 41 | struct sched_attr; | ||
| 41 | struct sel_arg_struct; | 42 | struct sel_arg_struct; |
| 42 | struct semaphore; | 43 | struct semaphore; |
| 43 | struct sembuf; | 44 | struct sembuf; |
| @@ -279,9 +280,14 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, | |||
| 279 | struct sched_param __user *param); | 280 | struct sched_param __user *param); |
| 280 | asmlinkage long sys_sched_setparam(pid_t pid, | 281 | asmlinkage long sys_sched_setparam(pid_t pid, |
| 281 | struct sched_param __user *param); | 282 | struct sched_param __user *param); |
| 283 | asmlinkage long sys_sched_setattr(pid_t pid, | ||
| 284 | struct sched_attr __user *attr); | ||
| 282 | asmlinkage long sys_sched_getscheduler(pid_t pid); | 285 | asmlinkage long sys_sched_getscheduler(pid_t pid); |
| 283 | asmlinkage long sys_sched_getparam(pid_t pid, | 286 | asmlinkage long sys_sched_getparam(pid_t pid, |
| 284 | struct sched_param __user *param); | 287 | struct sched_param __user *param); |
| 288 | asmlinkage long sys_sched_getattr(pid_t pid, | ||
| 289 | struct sched_attr __user *attr, | ||
| 290 | unsigned int size); | ||
| 285 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, | 291 | asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len, |
| 286 | unsigned long __user *user_mask_ptr); | 292 | unsigned long __user *user_mask_ptr); |
| 287 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, | 293 | asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len, |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 5128d33bbb39..0175d8663b6c 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -104,7 +104,7 @@ extern struct cpumask *tick_get_broadcast_oneshot_mask(void); | |||
| 104 | extern void tick_clock_notify(void); | 104 | extern void tick_clock_notify(void); |
| 105 | extern int tick_check_oneshot_change(int allow_nohz); | 105 | extern int tick_check_oneshot_change(int allow_nohz); |
| 106 | extern struct tick_sched *tick_get_tick_sched(int cpu); | 106 | extern struct tick_sched *tick_get_tick_sched(int cpu); |
| 107 | extern void tick_check_idle(int cpu); | 107 | extern void tick_check_idle(void); |
| 108 | extern int tick_oneshot_mode_active(void); | 108 | extern int tick_oneshot_mode_active(void); |
| 109 | # ifndef arch_needs_cpu | 109 | # ifndef arch_needs_cpu |
| 110 | # define arch_needs_cpu(cpu) (0) | 110 | # define arch_needs_cpu(cpu) (0) |
| @@ -112,7 +112,7 @@ extern int tick_oneshot_mode_active(void); | |||
| 112 | # else | 112 | # else |
| 113 | static inline void tick_clock_notify(void) { } | 113 | static inline void tick_clock_notify(void) { } |
| 114 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } | 114 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } |
| 115 | static inline void tick_check_idle(int cpu) { } | 115 | static inline void tick_check_idle(void) { } |
| 116 | static inline int tick_oneshot_mode_active(void) { return 0; } | 116 | static inline int tick_oneshot_mode_active(void) { return 0; } |
| 117 | # endif | 117 | # endif |
| 118 | 118 | ||
| @@ -121,7 +121,7 @@ static inline void tick_init(void) { } | |||
| 121 | static inline void tick_cancel_sched_timer(int cpu) { } | 121 | static inline void tick_cancel_sched_timer(int cpu) { } |
| 122 | static inline void tick_clock_notify(void) { } | 122 | static inline void tick_clock_notify(void) { } |
| 123 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } | 123 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } |
| 124 | static inline void tick_check_idle(int cpu) { } | 124 | static inline void tick_check_idle(void) { } |
| 125 | static inline int tick_oneshot_mode_active(void) { return 0; } | 125 | static inline int tick_oneshot_mode_active(void) { return 0; } |
| 126 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ | 126 | #endif /* !CONFIG_GENERIC_CLOCKEVENTS */ |
| 127 | 127 | ||
| @@ -165,7 +165,7 @@ extern cpumask_var_t tick_nohz_full_mask; | |||
| 165 | 165 | ||
| 166 | static inline bool tick_nohz_full_enabled(void) | 166 | static inline bool tick_nohz_full_enabled(void) |
| 167 | { | 167 | { |
| 168 | if (!static_key_false(&context_tracking_enabled)) | 168 | if (!context_tracking_is_enabled()) |
| 169 | return false; | 169 | return false; |
| 170 | 170 | ||
| 171 | return tick_nohz_full_running; | 171 | return tick_nohz_full_running; |
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h index 9d8cf056e661..ecd3319dac33 100644 --- a/include/linux/uaccess.h +++ b/include/linux/uaccess.h | |||
| @@ -25,13 +25,16 @@ static inline void pagefault_disable(void) | |||
| 25 | 25 | ||
| 26 | static inline void pagefault_enable(void) | 26 | static inline void pagefault_enable(void) |
| 27 | { | 27 | { |
| 28 | #ifndef CONFIG_PREEMPT | ||
| 28 | /* | 29 | /* |
| 29 | * make sure to issue those last loads/stores before enabling | 30 | * make sure to issue those last loads/stores before enabling |
| 30 | * the pagefault handler again. | 31 | * the pagefault handler again. |
| 31 | */ | 32 | */ |
| 32 | barrier(); | 33 | barrier(); |
| 33 | preempt_count_dec(); | 34 | preempt_count_dec(); |
| 34 | preempt_check_resched(); | 35 | #else |
| 36 | preempt_enable(); | ||
| 37 | #endif | ||
| 35 | } | 38 | } |
| 36 | 39 | ||
| 37 | #ifndef ARCH_HAS_NOCACHE_UACCESS | 40 | #ifndef ARCH_HAS_NOCACHE_UACCESS |
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h index 319eae70fe84..e32251e00e62 100644 --- a/include/linux/uprobes.h +++ b/include/linux/uprobes.h | |||
| @@ -26,16 +26,13 @@ | |||
| 26 | 26 | ||
| 27 | #include <linux/errno.h> | 27 | #include <linux/errno.h> |
| 28 | #include <linux/rbtree.h> | 28 | #include <linux/rbtree.h> |
| 29 | #include <linux/types.h> | ||
| 29 | 30 | ||
| 30 | struct vm_area_struct; | 31 | struct vm_area_struct; |
| 31 | struct mm_struct; | 32 | struct mm_struct; |
| 32 | struct inode; | 33 | struct inode; |
| 33 | struct notifier_block; | 34 | struct notifier_block; |
| 34 | 35 | ||
| 35 | #ifdef CONFIG_ARCH_SUPPORTS_UPROBES | ||
| 36 | # include <asm/uprobes.h> | ||
| 37 | #endif | ||
| 38 | |||
| 39 | #define UPROBE_HANDLER_REMOVE 1 | 36 | #define UPROBE_HANDLER_REMOVE 1 |
| 40 | #define UPROBE_HANDLER_MASK 1 | 37 | #define UPROBE_HANDLER_MASK 1 |
| 41 | 38 | ||
| @@ -60,6 +57,8 @@ struct uprobe_consumer { | |||
| 60 | }; | 57 | }; |
| 61 | 58 | ||
| 62 | #ifdef CONFIG_UPROBES | 59 | #ifdef CONFIG_UPROBES |
| 60 | #include <asm/uprobes.h> | ||
| 61 | |||
| 63 | enum uprobe_task_state { | 62 | enum uprobe_task_state { |
| 64 | UTASK_RUNNING, | 63 | UTASK_RUNNING, |
| 65 | UTASK_SSTEP, | 64 | UTASK_SSTEP, |
| @@ -72,35 +71,28 @@ enum uprobe_task_state { | |||
| 72 | */ | 71 | */ |
| 73 | struct uprobe_task { | 72 | struct uprobe_task { |
| 74 | enum uprobe_task_state state; | 73 | enum uprobe_task_state state; |
| 75 | struct arch_uprobe_task autask; | ||
| 76 | 74 | ||
| 77 | struct return_instance *return_instances; | 75 | union { |
| 78 | unsigned int depth; | 76 | struct { |
| 79 | struct uprobe *active_uprobe; | 77 | struct arch_uprobe_task autask; |
| 78 | unsigned long vaddr; | ||
| 79 | }; | ||
| 80 | 80 | ||
| 81 | struct { | ||
| 82 | struct callback_head dup_xol_work; | ||
| 83 | unsigned long dup_xol_addr; | ||
| 84 | }; | ||
| 85 | }; | ||
| 86 | |||
| 87 | struct uprobe *active_uprobe; | ||
| 81 | unsigned long xol_vaddr; | 88 | unsigned long xol_vaddr; |
| 82 | unsigned long vaddr; | ||
| 83 | }; | ||
| 84 | 89 | ||
| 85 | /* | 90 | struct return_instance *return_instances; |
| 86 | * On a breakpoint hit, thread contests for a slot. It frees the | 91 | unsigned int depth; |
| 87 | * slot after singlestep. Currently a fixed number of slots are | ||
| 88 | * allocated. | ||
| 89 | */ | ||
| 90 | struct xol_area { | ||
| 91 | wait_queue_head_t wq; /* if all slots are busy */ | ||
| 92 | atomic_t slot_count; /* number of in-use slots */ | ||
| 93 | unsigned long *bitmap; /* 0 = free slot */ | ||
| 94 | struct page *page; | ||
| 95 | |||
| 96 | /* | ||
| 97 | * We keep the vma's vm_start rather than a pointer to the vma | ||
| 98 | * itself. The probed process or a naughty kernel module could make | ||
| 99 | * the vma go away, and we must handle that reasonably gracefully. | ||
| 100 | */ | ||
| 101 | unsigned long vaddr; /* Page(s) of instruction slots */ | ||
| 102 | }; | 92 | }; |
| 103 | 93 | ||
| 94 | struct xol_area; | ||
| 95 | |||
| 104 | struct uprobes_state { | 96 | struct uprobes_state { |
| 105 | struct xol_area *xol_area; | 97 | struct xol_area *xol_area; |
| 106 | }; | 98 | }; |
| @@ -109,6 +101,7 @@ extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsign | |||
| 109 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); | 101 | extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); |
| 110 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); | 102 | extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); |
| 111 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); | 103 | extern bool __weak is_trap_insn(uprobe_opcode_t *insn); |
| 104 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | ||
| 112 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); | 105 | extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); |
| 113 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); | 106 | extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); |
| 114 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); | 107 | extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool); |
| @@ -120,7 +113,6 @@ extern void uprobe_end_dup_mmap(void); | |||
| 120 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); | 113 | extern void uprobe_dup_mmap(struct mm_struct *oldmm, struct mm_struct *newmm); |
| 121 | extern void uprobe_free_utask(struct task_struct *t); | 114 | extern void uprobe_free_utask(struct task_struct *t); |
| 122 | extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); | 115 | extern void uprobe_copy_process(struct task_struct *t, unsigned long flags); |
| 123 | extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); | ||
| 124 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); | 116 | extern int uprobe_post_sstep_notifier(struct pt_regs *regs); |
| 125 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); | 117 | extern int uprobe_pre_sstep_notifier(struct pt_regs *regs); |
| 126 | extern void uprobe_notify_resume(struct pt_regs *regs); | 118 | extern void uprobe_notify_resume(struct pt_regs *regs); |
| @@ -176,10 +168,6 @@ static inline bool uprobe_deny_signal(void) | |||
| 176 | { | 168 | { |
| 177 | return false; | 169 | return false; |
| 178 | } | 170 | } |
| 179 | static inline unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | ||
| 180 | { | ||
| 181 | return 0; | ||
| 182 | } | ||
| 183 | static inline void uprobe_free_utask(struct task_struct *t) | 171 | static inline void uprobe_free_utask(struct task_struct *t) |
| 184 | { | 172 | { |
| 185 | } | 173 | } |
diff --git a/include/linux/vtime.h b/include/linux/vtime.h index f5b72b364bda..c5165fd256f9 100644 --- a/include/linux/vtime.h +++ b/include/linux/vtime.h | |||
| @@ -19,8 +19,8 @@ static inline bool vtime_accounting_enabled(void) { return true; } | |||
| 19 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN | 19 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN |
| 20 | static inline bool vtime_accounting_enabled(void) | 20 | static inline bool vtime_accounting_enabled(void) |
| 21 | { | 21 | { |
| 22 | if (static_key_false(&context_tracking_enabled)) { | 22 | if (context_tracking_is_enabled()) { |
| 23 | if (context_tracking_active()) | 23 | if (context_tracking_cpu_is_enabled()) |
| 24 | return true; | 24 | return true; |
| 25 | } | 25 | } |
| 26 | 26 | ||
diff --git a/include/linux/zorro.h b/include/linux/zorro.h index dff42025649b..63fbba0740c2 100644 --- a/include/linux/zorro.h +++ b/include/linux/zorro.h | |||
| @@ -11,107 +11,10 @@ | |||
| 11 | #ifndef _LINUX_ZORRO_H | 11 | #ifndef _LINUX_ZORRO_H |
| 12 | #define _LINUX_ZORRO_H | 12 | #define _LINUX_ZORRO_H |
| 13 | 13 | ||
| 14 | #include <linux/device.h> | ||
| 15 | |||
| 16 | |||
| 17 | /* | ||
| 18 | * Each Zorro board has a 32-bit ID of the form | ||
| 19 | * | ||
| 20 | * mmmmmmmmmmmmmmmmppppppppeeeeeeee | ||
| 21 | * | ||
| 22 | * with | ||
| 23 | * | ||
| 24 | * mmmmmmmmmmmmmmmm 16-bit Manufacturer ID (assigned by CBM (sigh)) | ||
| 25 | * pppppppp 8-bit Product ID (assigned by manufacturer) | ||
| 26 | * eeeeeeee 8-bit Extended Product ID (currently only used | ||
| 27 | * for some GVP boards) | ||
| 28 | */ | ||
| 29 | |||
| 30 | |||
| 31 | #define ZORRO_MANUF(id) ((id) >> 16) | ||
| 32 | #define ZORRO_PROD(id) (((id) >> 8) & 0xff) | ||
| 33 | #define ZORRO_EPC(id) ((id) & 0xff) | ||
| 34 | |||
| 35 | #define ZORRO_ID(manuf, prod, epc) \ | ||
| 36 | ((ZORRO_MANUF_##manuf << 16) | ((prod) << 8) | (epc)) | ||
| 37 | |||
| 38 | typedef __u32 zorro_id; | ||
| 39 | |||
| 40 | |||
| 41 | /* Include the ID list */ | ||
| 42 | #include <linux/zorro_ids.h> | ||
| 43 | |||
| 44 | 14 | ||
| 45 | /* | 15 | #include <uapi/linux/zorro.h> |
| 46 | * GVP identifies most of its products through the 'extended product code' | ||
| 47 | * (epc). The epc has to be ANDed with the GVP_PRODMASK before the | ||
| 48 | * identification. | ||
| 49 | */ | ||
| 50 | |||
| 51 | #define GVP_PRODMASK (0xf8) | ||
| 52 | #define GVP_SCSICLKMASK (0x01) | ||
| 53 | |||
| 54 | enum GVP_flags { | ||
| 55 | GVP_IO = 0x01, | ||
| 56 | GVP_ACCEL = 0x02, | ||
| 57 | GVP_SCSI = 0x04, | ||
| 58 | GVP_24BITDMA = 0x08, | ||
| 59 | GVP_25BITDMA = 0x10, | ||
| 60 | GVP_NOBANK = 0x20, | ||
| 61 | GVP_14MHZ = 0x40, | ||
| 62 | }; | ||
| 63 | |||
| 64 | |||
| 65 | struct Node { | ||
| 66 | struct Node *ln_Succ; /* Pointer to next (successor) */ | ||
| 67 | struct Node *ln_Pred; /* Pointer to previous (predecessor) */ | ||
| 68 | __u8 ln_Type; | ||
| 69 | __s8 ln_Pri; /* Priority, for sorting */ | ||
| 70 | __s8 *ln_Name; /* ID string, null terminated */ | ||
| 71 | } __attribute__ ((packed)); | ||
| 72 | |||
| 73 | struct ExpansionRom { | ||
| 74 | /* -First 16 bytes of the expansion ROM */ | ||
| 75 | __u8 er_Type; /* Board type, size and flags */ | ||
| 76 | __u8 er_Product; /* Product number, assigned by manufacturer */ | ||
| 77 | __u8 er_Flags; /* Flags */ | ||
| 78 | __u8 er_Reserved03; /* Must be zero ($ff inverted) */ | ||
| 79 | __u16 er_Manufacturer; /* Unique ID, ASSIGNED BY COMMODORE-AMIGA! */ | ||
| 80 | __u32 er_SerialNumber; /* Available for use by manufacturer */ | ||
| 81 | __u16 er_InitDiagVec; /* Offset to optional "DiagArea" structure */ | ||
| 82 | __u8 er_Reserved0c; | ||
| 83 | __u8 er_Reserved0d; | ||
| 84 | __u8 er_Reserved0e; | ||
| 85 | __u8 er_Reserved0f; | ||
| 86 | } __attribute__ ((packed)); | ||
| 87 | |||
| 88 | /* er_Type board type bits */ | ||
| 89 | #define ERT_TYPEMASK 0xc0 | ||
| 90 | #define ERT_ZORROII 0xc0 | ||
| 91 | #define ERT_ZORROIII 0x80 | ||
| 92 | |||
| 93 | /* other bits defined in er_Type */ | ||
| 94 | #define ERTB_MEMLIST 5 /* Link RAM into free memory list */ | ||
| 95 | #define ERTF_MEMLIST (1<<5) | ||
| 96 | |||
| 97 | struct ConfigDev { | ||
| 98 | struct Node cd_Node; | ||
| 99 | __u8 cd_Flags; /* (read/write) */ | ||
| 100 | __u8 cd_Pad; /* reserved */ | ||
| 101 | struct ExpansionRom cd_Rom; /* copy of board's expansion ROM */ | ||
| 102 | void *cd_BoardAddr; /* where in memory the board was placed */ | ||
| 103 | __u32 cd_BoardSize; /* size of board in bytes */ | ||
| 104 | __u16 cd_SlotAddr; /* which slot number (PRIVATE) */ | ||
| 105 | __u16 cd_SlotSize; /* number of slots (PRIVATE) */ | ||
| 106 | void *cd_Driver; /* pointer to node of driver */ | ||
| 107 | struct ConfigDev *cd_NextCD; /* linked list of drivers to config */ | ||
| 108 | __u32 cd_Unused[4]; /* for whatever the driver wants */ | ||
| 109 | } __attribute__ ((packed)); | ||
| 110 | |||
| 111 | #define ZORRO_NUM_AUTO 16 | ||
| 112 | |||
| 113 | #ifdef __KERNEL__ | ||
| 114 | 16 | ||
| 17 | #include <linux/device.h> | ||
| 115 | #include <linux/init.h> | 18 | #include <linux/init.h> |
| 116 | #include <linux/ioport.h> | 19 | #include <linux/ioport.h> |
| 117 | #include <linux/mod_devicetable.h> | 20 | #include <linux/mod_devicetable.h> |
| @@ -175,7 +78,23 @@ static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z) | |||
| 175 | 78 | ||
| 176 | 79 | ||
| 177 | extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ | 80 | extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */ |
| 178 | extern struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; | 81 | extern struct zorro_dev *zorro_autocon; |
| 82 | |||
| 83 | |||
| 84 | /* | ||
| 85 | * Minimal information about a Zorro device, passed from bootinfo | ||
| 86 | * Only available temporarily, i.e. until initmem has been freed! | ||
| 87 | */ | ||
| 88 | |||
| 89 | struct zorro_dev_init { | ||
| 90 | struct ExpansionRom rom; | ||
| 91 | u16 slotaddr; | ||
| 92 | u16 slotsize; | ||
| 93 | u32 boardaddr; | ||
| 94 | u32 boardsize; | ||
| 95 | }; | ||
| 96 | |||
| 97 | extern struct zorro_dev_init zorro_autocon_init[ZORRO_NUM_AUTO] __initdata; | ||
| 179 | 98 | ||
| 180 | 99 | ||
| 181 | /* | 100 | /* |
| @@ -229,6 +148,4 @@ extern DECLARE_BITMAP(zorro_unused_z2ram, 128); | |||
| 229 | #define Z2RAM_CHUNKSHIFT (16) | 148 | #define Z2RAM_CHUNKSHIFT (16) |
| 230 | 149 | ||
| 231 | 150 | ||
| 232 | #endif /* __KERNEL__ */ | ||
| 233 | |||
| 234 | #endif /* _LINUX_ZORRO_H */ | 151 | #endif /* _LINUX_ZORRO_H */ |
diff --git a/include/linux/zorro_ids.h b/include/linux/zorro_ids.h deleted file mode 100644 index 74bc53bcfdcf..000000000000 --- a/include/linux/zorro_ids.h +++ /dev/null | |||
| @@ -1,552 +0,0 @@ | |||
| 1 | /* | ||
| 2 | * Zorro board IDs | ||
| 3 | * | ||
| 4 | * Please keep sorted. | ||
| 5 | */ | ||
| 6 | |||
| 7 | |||
| 8 | #define ZORRO_MANUF_PACIFIC_PERIPHERALS 0x00D3 | ||
| 9 | #define ZORRO_PROD_PACIFIC_PERIPHERALS_SE_2000_A500 ZORRO_ID(PACIFIC_PERIPHERALS, 0x00, 0) | ||
| 10 | #define ZORRO_PROD_PACIFIC_PERIPHERALS_SCSI ZORRO_ID(PACIFIC_PERIPHERALS, 0x0A, 0) | ||
| 11 | |||
| 12 | #define ZORRO_MANUF_MACROSYSTEMS_USA_2 0x0100 | ||
| 13 | #define ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE ZORRO_ID(MACROSYSTEMS_USA_2, 0x13, 0) | ||
| 14 | |||
| 15 | #define ZORRO_MANUF_KUPKE_1 0x00DD | ||
| 16 | #define ZORRO_PROD_KUPKE_GOLEM_RAM_BOX_2MB ZORRO_ID(KUPKE_1, 0x00, 0) | ||
| 17 | |||
| 18 | #define ZORRO_MANUF_MEMPHIS 0x0100 | ||
| 19 | #define ZORRO_PROD_MEMPHIS_STORMBRINGER ZORRO_ID(MEMPHIS, 0x00, 0) | ||
| 20 | |||
| 21 | #define ZORRO_MANUF_3_STATE 0x0200 | ||
| 22 | #define ZORRO_PROD_3_STATE_MEGAMIX_2000 ZORRO_ID(3_STATE, 0x02, 0) | ||
| 23 | |||
| 24 | #define ZORRO_MANUF_COMMODORE_BRAUNSCHWEIG 0x0201 | ||
| 25 | #define ZORRO_PROD_CBM_A2088_A2286 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x01, 0) | ||
| 26 | #define ZORRO_PROD_CBM_A2286 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x02, 0) | ||
| 27 | #define ZORRO_PROD_CBM_A4091_1 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x54, 0) | ||
| 28 | #define ZORRO_PROD_CBM_A2386SX_1 ZORRO_ID(COMMODORE_BRAUNSCHWEIG, 0x67, 0) | ||
| 29 | |||
| 30 | #define ZORRO_MANUF_COMMODORE_WEST_CHESTER_1 0x0202 | ||
| 31 | #define ZORRO_PROD_CBM_A2090A ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x01, 0) | ||
| 32 | #define ZORRO_PROD_CBM_A590_A2091_1 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x02, 0) | ||
| 33 | #define ZORRO_PROD_CBM_A590_A2091_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x03, 0) | ||
| 34 | #define ZORRO_PROD_CBM_A2090B ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x04, 0) | ||
| 35 | #define ZORRO_PROD_CBM_A2060 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x09, 0) | ||
| 36 | #define ZORRO_PROD_CBM_A590_A2052_A2058_A2091 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x0A, 0) | ||
| 37 | #define ZORRO_PROD_CBM_A560_RAM ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x20, 0) | ||
| 38 | #define ZORRO_PROD_CBM_A2232_PROTOTYPE ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x45, 0) | ||
| 39 | #define ZORRO_PROD_CBM_A2232 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x46, 0) | ||
| 40 | #define ZORRO_PROD_CBM_A2620 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x50, 0) | ||
| 41 | #define ZORRO_PROD_CBM_A2630 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x51, 0) | ||
| 42 | #define ZORRO_PROD_CBM_A4091_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x54, 0) | ||
| 43 | #define ZORRO_PROD_CBM_A2065_1 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x5A, 0) | ||
| 44 | #define ZORRO_PROD_CBM_ROMULATOR ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x60, 0) | ||
| 45 | #define ZORRO_PROD_CBM_A3000_TEST_FIXTURE ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x61, 0) | ||
| 46 | #define ZORRO_PROD_CBM_A2386SX_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x67, 0) | ||
| 47 | #define ZORRO_PROD_CBM_A2065_2 ZORRO_ID(COMMODORE_WEST_CHESTER_1, 0x70, 0) | ||
| 48 | |||
| 49 | #define ZORRO_MANUF_COMMODORE_WEST_CHESTER_2 0x0203 | ||
| 50 | #define ZORRO_PROD_CBM_A2090A_CM ZORRO_ID(COMMODORE_WEST_CHESTER_2, 0x03, 0) | ||
| 51 | |||
| 52 | #define ZORRO_MANUF_PROGRESSIVE_PERIPHERALS_AND_SYSTEMS_2 0x02F4 | ||
| 53 | #define ZORRO_PROD_PPS_EXP8000 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS_2, 0x02, 0) | ||
| 54 | |||
| 55 | #define ZORRO_MANUF_KOLFF_COMPUTER_SUPPLIES 0x02FF | ||
| 56 | #define ZORRO_PROD_KCS_POWER_PC_BOARD ZORRO_ID(KOLFF_COMPUTER_SUPPLIES, 0x00, 0) | ||
| 57 | |||
| 58 | #define ZORRO_MANUF_CARDCO_1 0x03EC | ||
| 59 | #define ZORRO_PROD_CARDCO_KRONOS_2000_1 ZORRO_ID(CARDCO_1, 0x04, 0) | ||
| 60 | #define ZORRO_PROD_CARDCO_A1000_1 ZORRO_ID(CARDCO_1, 0x0C, 0) | ||
| 61 | #define ZORRO_PROD_CARDCO_ESCORT ZORRO_ID(CARDCO_1, 0x0E, 0) | ||
| 62 | #define ZORRO_PROD_CARDCO_A2410 ZORRO_ID(CARDCO_1, 0xF5, 0) | ||
| 63 | |||
| 64 | #define ZORRO_MANUF_A_SQUARED 0x03ED | ||
| 65 | #define ZORRO_PROD_A_SQUARED_LIVE_2000 ZORRO_ID(A_SQUARED, 0x01, 0) | ||
| 66 | |||
| 67 | #define ZORRO_MANUF_COMSPEC_COMMUNICATIONS 0x03EE | ||
| 68 | #define ZORRO_PROD_COMSPEC_COMMUNICATIONS_AX2000 ZORRO_ID(COMSPEC_COMMUNICATIONS, 0x01, 0) | ||
| 69 | |||
| 70 | #define ZORRO_MANUF_ANAKIN_RESEARCH 0x03F1 | ||
| 71 | #define ZORRO_PROD_ANAKIN_RESEARCH_EASYL ZORRO_ID(ANAKIN_RESEARCH, 0x01, 0) | ||
| 72 | |||
| 73 | #define ZORRO_MANUF_MICROBOTICS 0x03F2 | ||
| 74 | #define ZORRO_PROD_MICROBOTICS_STARBOARD_II ZORRO_ID(MICROBOTICS, 0x00, 0) | ||
| 75 | #define ZORRO_PROD_MICROBOTICS_STARDRIVE ZORRO_ID(MICROBOTICS, 0x02, 0) | ||
| 76 | #define ZORRO_PROD_MICROBOTICS_8_UP_A ZORRO_ID(MICROBOTICS, 0x03, 0) | ||
| 77 | #define ZORRO_PROD_MICROBOTICS_8_UP_Z ZORRO_ID(MICROBOTICS, 0x04, 0) | ||
| 78 | #define ZORRO_PROD_MICROBOTICS_DELTA_RAM ZORRO_ID(MICROBOTICS, 0x20, 0) | ||
| 79 | #define ZORRO_PROD_MICROBOTICS_8_STAR_RAM ZORRO_ID(MICROBOTICS, 0x40, 0) | ||
| 80 | #define ZORRO_PROD_MICROBOTICS_8_STAR ZORRO_ID(MICROBOTICS, 0x41, 0) | ||
| 81 | #define ZORRO_PROD_MICROBOTICS_VXL_RAM_32 ZORRO_ID(MICROBOTICS, 0x44, 0) | ||
| 82 | #define ZORRO_PROD_MICROBOTICS_VXL_68030 ZORRO_ID(MICROBOTICS, 0x45, 0) | ||
| 83 | #define ZORRO_PROD_MICROBOTICS_DELTA ZORRO_ID(MICROBOTICS, 0x60, 0) | ||
| 84 | #define ZORRO_PROD_MICROBOTICS_MBX_1200_1200Z_RAM ZORRO_ID(MICROBOTICS, 0x81, 0) | ||
| 85 | #define ZORRO_PROD_MICROBOTICS_HARDFRAME_2000_1 ZORRO_ID(MICROBOTICS, 0x96, 0) | ||
| 86 | #define ZORRO_PROD_MICROBOTICS_HARDFRAME_2000_2 ZORRO_ID(MICROBOTICS, 0x9E, 0) | ||
| 87 | #define ZORRO_PROD_MICROBOTICS_MBX_1200_1200Z ZORRO_ID(MICROBOTICS, 0xC1, 0) | ||
| 88 | |||
| 89 | #define ZORRO_MANUF_ACCESS_ASSOCIATES_ALEGRA 0x03F4 | ||
| 90 | |||
| 91 | #define ZORRO_MANUF_EXPANSION_TECHNOLOGIES 0x03F6 | ||
| 92 | |||
| 93 | #define ZORRO_MANUF_ASDG 0x03FF | ||
| 94 | #define ZORRO_PROD_ASDG_MEMORY_1 ZORRO_ID(ASDG, 0x01, 0) | ||
| 95 | #define ZORRO_PROD_ASDG_MEMORY_2 ZORRO_ID(ASDG, 0x02, 0) | ||
| 96 | #define ZORRO_PROD_ASDG_EB920_LAN_ROVER ZORRO_ID(ASDG, 0xFE, 0) | ||
| 97 | #define ZORRO_PROD_ASDG_GPIB_DUALIEEE488_TWIN_X ZORRO_ID(ASDG, 0xFF, 0) | ||
| 98 | |||
| 99 | #define ZORRO_MANUF_IMTRONICS_1 0x0404 | ||
| 100 | #define ZORRO_PROD_IMTRONICS_HURRICANE_2800_1 ZORRO_ID(IMTRONICS_1, 0x39, 0) | ||
| 101 | #define ZORRO_PROD_IMTRONICS_HURRICANE_2800_2 ZORRO_ID(IMTRONICS_1, 0x57, 0) | ||
| 102 | |||
| 103 | #define ZORRO_MANUF_CBM_UNIVERSITY_OF_LOWELL 0x0406 | ||
| 104 | #define ZORRO_PROD_CBM_A2410 ZORRO_ID(CBM_UNIVERSITY_OF_LOWELL, 0x00, 0) | ||
| 105 | |||
| 106 | #define ZORRO_MANUF_AMERISTAR 0x041D | ||
| 107 | #define ZORRO_PROD_AMERISTAR_A2065 ZORRO_ID(AMERISTAR, 0x01, 0) | ||
| 108 | #define ZORRO_PROD_AMERISTAR_A560 ZORRO_ID(AMERISTAR, 0x09, 0) | ||
| 109 | #define ZORRO_PROD_AMERISTAR_A4066 ZORRO_ID(AMERISTAR, 0x0A, 0) | ||
| 110 | |||
| 111 | #define ZORRO_MANUF_SUPRA 0x0420 | ||
| 112 | #define ZORRO_PROD_SUPRA_SUPRADRIVE_4x4 ZORRO_ID(SUPRA, 0x01, 0) | ||
| 113 | #define ZORRO_PROD_SUPRA_1000_RAM ZORRO_ID(SUPRA, 0x02, 0) | ||
| 114 | #define ZORRO_PROD_SUPRA_2000_DMA ZORRO_ID(SUPRA, 0x03, 0) | ||
| 115 | #define ZORRO_PROD_SUPRA_500 ZORRO_ID(SUPRA, 0x05, 0) | ||
| 116 | #define ZORRO_PROD_SUPRA_500_SCSI ZORRO_ID(SUPRA, 0x08, 0) | ||
| 117 | #define ZORRO_PROD_SUPRA_500XP_2000_RAM ZORRO_ID(SUPRA, 0x09, 0) | ||
| 118 | #define ZORRO_PROD_SUPRA_500RX_2000_RAM ZORRO_ID(SUPRA, 0x0A, 0) | ||
| 119 | #define ZORRO_PROD_SUPRA_2400ZI ZORRO_ID(SUPRA, 0x0B, 0) | ||
| 120 | #define ZORRO_PROD_SUPRA_500XP_SUPRADRIVE_WORDSYNC ZORRO_ID(SUPRA, 0x0C, 0) | ||
| 121 | #define ZORRO_PROD_SUPRA_SUPRADRIVE_WORDSYNC_II ZORRO_ID(SUPRA, 0x0D, 0) | ||
| 122 | #define ZORRO_PROD_SUPRA_2400ZIPLUS ZORRO_ID(SUPRA, 0x10, 0) | ||
| 123 | |||
| 124 | #define ZORRO_MANUF_COMPUTER_SYSTEMS_ASSOCIATES 0x0422 | ||
| 125 | #define ZORRO_PROD_CSA_MAGNUM ZORRO_ID(COMPUTER_SYSTEMS_ASSOCIATES, 0x11, 0) | ||
| 126 | #define ZORRO_PROD_CSA_12_GAUGE ZORRO_ID(COMPUTER_SYSTEMS_ASSOCIATES, 0x15, 0) | ||
| 127 | |||
| 128 | #define ZORRO_MANUF_MARC_MICHAEL_GROTH 0x0439 | ||
| 129 | |||
| 130 | #define ZORRO_MANUF_M_TECH 0x0502 | ||
| 131 | #define ZORRO_PROD_MTEC_AT500_1 ZORRO_ID(M_TECH, 0x03, 0) | ||
| 132 | |||
| 133 | #define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_1 0x06E1 | ||
| 134 | #define ZORRO_PROD_GVP_IMPACT_SERIES_I ZORRO_ID(GREAT_VALLEY_PRODUCTS_1, 0x08, 0) | ||
| 135 | |||
| 136 | #define ZORRO_MANUF_BYTEBOX 0x07DA | ||
| 137 | #define ZORRO_PROD_BYTEBOX_A500 ZORRO_ID(BYTEBOX, 0x00, 0) | ||
| 138 | |||
| 139 | #define ZORRO_MANUF_DKB_POWER_COMPUTING 0x07DC | ||
| 140 | #define ZORRO_PROD_DKB_POWER_COMPUTING_SECUREKEY ZORRO_ID(DKB_POWER_COMPUTING, 0x09, 0) | ||
| 141 | #define ZORRO_PROD_DKB_POWER_COMPUTING_DKM_3128 ZORRO_ID(DKB_POWER_COMPUTING, 0x0E, 0) | ||
| 142 | #define ZORRO_PROD_DKB_POWER_COMPUTING_RAPID_FIRE ZORRO_ID(DKB_POWER_COMPUTING, 0x0F, 0) | ||
| 143 | #define ZORRO_PROD_DKB_POWER_COMPUTING_DKM_1202 ZORRO_ID(DKB_POWER_COMPUTING, 0x10, 0) | ||
| 144 | #define ZORRO_PROD_DKB_POWER_COMPUTING_COBRA_VIPER_II_68EC030 ZORRO_ID(DKB_POWER_COMPUTING, 0x12, 0) | ||
| 145 | #define ZORRO_PROD_DKB_POWER_COMPUTING_WILDFIRE_060_1 ZORRO_ID(DKB_POWER_COMPUTING, 0x17, 0) | ||
| 146 | #define ZORRO_PROD_DKB_POWER_COMPUTING_WILDFIRE_060_2 ZORRO_ID(DKB_POWER_COMPUTING, 0xFF, 0) | ||
| 147 | |||
| 148 | #define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_2 0x07E1 | ||
| 149 | #define ZORRO_PROD_GVP_IMPACT_SERIES_I_4K ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x01, 0) | ||
| 150 | #define ZORRO_PROD_GVP_IMPACT_SERIES_I_16K_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x02, 0) | ||
| 151 | #define ZORRO_PROD_GVP_IMPACT_SERIES_I_16K_3 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x03, 0) | ||
| 152 | #define ZORRO_PROD_GVP_IMPACT_3001_IDE_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x08, 0) | ||
| 153 | #define ZORRO_PROD_GVP_IMPACT_3001_RAM ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x09, 0) | ||
| 154 | #define ZORRO_PROD_GVP_IMPACT_SERIES_II_RAM_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0A, 0) | ||
| 155 | #define ZORRO_PROD_GVP_EPC_BASE ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0) | ||
| 156 | #define ZORRO_PROD_GVP_GFORCE_040_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x20) | ||
| 157 | #define ZORRO_PROD_GVP_GFORCE_040_SCSI_1 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x30) | ||
| 158 | #define ZORRO_PROD_GVP_A1291 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x40) | ||
| 159 | #define ZORRO_PROD_GVP_COMBO_030_R4 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x60) | ||
| 160 | #define ZORRO_PROD_GVP_COMBO_030_R4_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x70) | ||
| 161 | #define ZORRO_PROD_GVP_PHONEPAK ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x78) | ||
| 162 | #define ZORRO_PROD_GVP_IO_EXTENDER ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0x98) | ||
| 163 | #define ZORRO_PROD_GVP_GFORCE_030 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xa0) | ||
| 164 | #define ZORRO_PROD_GVP_GFORCE_030_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xb0) | ||
| 165 | #define ZORRO_PROD_GVP_A530 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xc0) | ||
| 166 | #define ZORRO_PROD_GVP_A530_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xd0) | ||
| 167 | #define ZORRO_PROD_GVP_COMBO_030_R3 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xe0) | ||
| 168 | #define ZORRO_PROD_GVP_COMBO_030_R3_SCSI ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xf0) | ||
| 169 | #define ZORRO_PROD_GVP_SERIES_II ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0B, 0xf8) | ||
| 170 | #define ZORRO_PROD_GVP_IMPACT_3001_IDE_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0) | ||
| 171 | /*#define ZORRO_PROD_GVP_A2000_030 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0)*/ | ||
| 172 | /*#define ZORRO_PROD_GVP_GFORCE_040_SCSI_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x0D, 0)*/ | ||
| 173 | #define ZORRO_PROD_GVP_GFORCE_040_060 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x16, 0) | ||
| 174 | #define ZORRO_PROD_GVP_IMPACT_VISION_24 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0x20, 0) | ||
| 175 | #define ZORRO_PROD_GVP_GFORCE_040_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_2, 0xFF, 0) | ||
| 176 | |||
| 177 | #define ZORRO_MANUF_CALIFORNIA_ACCESS_SYNERGY 0x07E5 | ||
| 178 | #define ZORRO_PROD_CALIFORNIA_ACCESS_SYNERGY_MALIBU ZORRO_ID(CALIFORNIA_ACCESS_SYNERGY, 0x01, 0) | ||
| 179 | |||
| 180 | #define ZORRO_MANUF_XETEC 0x07E6 | ||
| 181 | #define ZORRO_PROD_XETEC_FASTCARD ZORRO_ID(XETEC, 0x01, 0) | ||
| 182 | #define ZORRO_PROD_XETEC_FASTCARD_RAM ZORRO_ID(XETEC, 0x02, 0) | ||
| 183 | #define ZORRO_PROD_XETEC_FASTCARD_PLUS ZORRO_ID(XETEC, 0x03, 0) | ||
| 184 | |||
| 185 | #define ZORRO_MANUF_PROGRESSIVE_PERIPHERALS_AND_SYSTEMS 0x07EA | ||
| 186 | #define ZORRO_PROD_PPS_MERCURY ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x00, 0) | ||
| 187 | #define ZORRO_PROD_PPS_A3000_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x01, 0) | ||
| 188 | #define ZORRO_PROD_PPS_A2000_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x69, 0) | ||
| 189 | #define ZORRO_PROD_PPS_ZEUS ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0x96, 0) | ||
| 190 | #define ZORRO_PROD_PPS_A500_68040 ZORRO_ID(PROGRESSIVE_PERIPHERALS_AND_SYSTEMS, 0xBB, 0) | ||
| 191 | |||
| 192 | #define ZORRO_MANUF_XEBEC 0x07EC | ||
| 193 | |||
| 194 | #define ZORRO_MANUF_SPIRIT_TECHNOLOGY 0x07F2 | ||
| 195 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_INSIDER_IN1000 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x01, 0) | ||
| 196 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_INSIDER_IN500 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x02, 0) | ||
| 197 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_SIN500 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x03, 0) | ||
| 198 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_HDA_506 ZORRO_ID(SPIRIT_TECHNOLOGY, 0x04, 0) | ||
| 199 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_AX_S ZORRO_ID(SPIRIT_TECHNOLOGY, 0x05, 0) | ||
| 200 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_OCTABYTE ZORRO_ID(SPIRIT_TECHNOLOGY, 0x06, 0) | ||
| 201 | #define ZORRO_PROD_SPIRIT_TECHNOLOGY_INMATE ZORRO_ID(SPIRIT_TECHNOLOGY, 0x08, 0) | ||
| 202 | |||
| 203 | #define ZORRO_MANUF_SPIRIT_TECHNOLOGY_2 0x07F3 | ||
| 204 | |||
| 205 | #define ZORRO_MANUF_BSC_ALFADATA_1 0x07FE | ||
| 206 | #define ZORRO_PROD_BSC_ALF_3_1 ZORRO_ID(BSC_ALFADATA_1, 0x03, 0) | ||
| 207 | |||
| 208 | #define ZORRO_MANUF_BSC_ALFADATA_2 0x0801 | ||
| 209 | #define ZORRO_PROD_BSC_ALF_2_1 ZORRO_ID(BSC_ALFADATA_2, 0x01, 0) | ||
| 210 | #define ZORRO_PROD_BSC_ALF_2_2 ZORRO_ID(BSC_ALFADATA_2, 0x02, 0) | ||
| 211 | #define ZORRO_PROD_BSC_ALF_3_2 ZORRO_ID(BSC_ALFADATA_2, 0x03, 0) | ||
| 212 | |||
| 213 | #define ZORRO_MANUF_CARDCO_2 0x0802 | ||
| 214 | #define ZORRO_PROD_CARDCO_KRONOS_2000_2 ZORRO_ID(CARDCO_2, 0x04, 0) | ||
| 215 | #define ZORRO_PROD_CARDCO_A1000_2 ZORRO_ID(CARDCO_2, 0x0C, 0) | ||
| 216 | |||
| 217 | #define ZORRO_MANUF_JOCHHEIM 0x0804 | ||
| 218 | #define ZORRO_PROD_JOCHHEIM_RAM ZORRO_ID(JOCHHEIM, 0x01, 0) | ||
| 219 | |||
| 220 | #define ZORRO_MANUF_CHECKPOINT_TECHNOLOGIES 0x0807 | ||
| 221 | #define ZORRO_PROD_CHECKPOINT_TECHNOLOGIES_SERIAL_SOLUTION ZORRO_ID(CHECKPOINT_TECHNOLOGIES, 0x00, 0) | ||
| 222 | |||
| 223 | #define ZORRO_MANUF_EDOTRONIK 0x0810 | ||
| 224 | #define ZORRO_PROD_EDOTRONIK_IEEE_488 ZORRO_ID(EDOTRONIK, 0x01, 0) | ||
| 225 | #define ZORRO_PROD_EDOTRONIK_8032 ZORRO_ID(EDOTRONIK, 0x02, 0) | ||
| 226 | #define ZORRO_PROD_EDOTRONIK_MULTISERIAL ZORRO_ID(EDOTRONIK, 0x03, 0) | ||
| 227 | #define ZORRO_PROD_EDOTRONIK_VIDEODIGITIZER ZORRO_ID(EDOTRONIK, 0x04, 0) | ||
| 228 | #define ZORRO_PROD_EDOTRONIK_PARALLEL_IO ZORRO_ID(EDOTRONIK, 0x05, 0) | ||
| 229 | #define ZORRO_PROD_EDOTRONIK_PIC_PROTOYPING ZORRO_ID(EDOTRONIK, 0x06, 0) | ||
| 230 | #define ZORRO_PROD_EDOTRONIK_ADC ZORRO_ID(EDOTRONIK, 0x07, 0) | ||
| 231 | #define ZORRO_PROD_EDOTRONIK_VME ZORRO_ID(EDOTRONIK, 0x08, 0) | ||
| 232 | #define ZORRO_PROD_EDOTRONIK_DSP96000 ZORRO_ID(EDOTRONIK, 0x09, 0) | ||
| 233 | |||
| 234 | #define ZORRO_MANUF_NES_INC 0x0813 | ||
| 235 | #define ZORRO_PROD_NES_INC_RAM ZORRO_ID(NES_INC, 0x00, 0) | ||
| 236 | |||
| 237 | #define ZORRO_MANUF_ICD 0x0817 | ||
| 238 | #define ZORRO_PROD_ICD_ADVANTAGE_2000_SCSI ZORRO_ID(ICD, 0x01, 0) | ||
| 239 | #define ZORRO_PROD_ICD_ADVANTAGE_IDE ZORRO_ID(ICD, 0x03, 0) | ||
| 240 | #define ZORRO_PROD_ICD_ADVANTAGE_2080_RAM ZORRO_ID(ICD, 0x04, 0) | ||
| 241 | |||
| 242 | #define ZORRO_MANUF_KUPKE_2 0x0819 | ||
| 243 | #define ZORRO_PROD_KUPKE_OMTI ZORRO_ID(KUPKE_2, 0x01, 0) | ||
| 244 | #define ZORRO_PROD_KUPKE_SCSI_II ZORRO_ID(KUPKE_2, 0x02, 0) | ||
| 245 | #define ZORRO_PROD_KUPKE_GOLEM_BOX ZORRO_ID(KUPKE_2, 0x03, 0) | ||
| 246 | #define ZORRO_PROD_KUPKE_030_882 ZORRO_ID(KUPKE_2, 0x04, 0) | ||
| 247 | #define ZORRO_PROD_KUPKE_SCSI_AT ZORRO_ID(KUPKE_2, 0x05, 0) | ||
| 248 | |||
| 249 | #define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_3 0x081D | ||
| 250 | #define ZORRO_PROD_GVP_A2000_RAM8 ZORRO_ID(GREAT_VALLEY_PRODUCTS_3, 0x09, 0) | ||
| 251 | #define ZORRO_PROD_GVP_IMPACT_SERIES_II_RAM_2 ZORRO_ID(GREAT_VALLEY_PRODUCTS_3, 0x0A, 0) | ||
| 252 | |||
| 253 | #define ZORRO_MANUF_INTERWORKS_NETWORK 0x081E | ||
| 254 | |||
| 255 | #define ZORRO_MANUF_HARDITAL_SYNTHESIS 0x0820 | ||
| 256 | #define ZORRO_PROD_HARDITAL_SYNTHESIS_TQM_68030_68882 ZORRO_ID(HARDITAL_SYNTHESIS, 0x14, 0) | ||
| 257 | |||
| 258 | #define ZORRO_MANUF_APPLIED_ENGINEERING 0x0828 | ||
| 259 | #define ZORRO_PROD_APPLIED_ENGINEERING_DL2000 ZORRO_ID(APPLIED_ENGINEERING, 0x10, 0) | ||
| 260 | #define ZORRO_PROD_APPLIED_ENGINEERING_RAM_WORKS ZORRO_ID(APPLIED_ENGINEERING, 0xE0, 0) | ||
| 261 | |||
| 262 | #define ZORRO_MANUF_BSC_ALFADATA_3 0x082C | ||
| 263 | #define ZORRO_PROD_BSC_OKTAGON_2008 ZORRO_ID(BSC_ALFADATA_3, 0x05, 0) | ||
| 264 | #define ZORRO_PROD_BSC_TANDEM_AT_2008_508 ZORRO_ID(BSC_ALFADATA_3, 0x06, 0) | ||
| 265 | #define ZORRO_PROD_BSC_ALFA_RAM_1200 ZORRO_ID(BSC_ALFADATA_3, 0x07, 0) | ||
| 266 | #define ZORRO_PROD_BSC_OKTAGON_2008_RAM ZORRO_ID(BSC_ALFADATA_3, 0x08, 0) | ||
| 267 | #define ZORRO_PROD_BSC_MULTIFACE_I ZORRO_ID(BSC_ALFADATA_3, 0x10, 0) | ||
| 268 | #define ZORRO_PROD_BSC_MULTIFACE_II ZORRO_ID(BSC_ALFADATA_3, 0x11, 0) | ||
| 269 | #define ZORRO_PROD_BSC_MULTIFACE_III ZORRO_ID(BSC_ALFADATA_3, 0x12, 0) | ||
| 270 | #define ZORRO_PROD_BSC_FRAMEMASTER_II ZORRO_ID(BSC_ALFADATA_3, 0x20, 0) | ||
| 271 | #define ZORRO_PROD_BSC_GRAFFITI_RAM ZORRO_ID(BSC_ALFADATA_3, 0x21, 0) | ||
| 272 | #define ZORRO_PROD_BSC_GRAFFITI_REG ZORRO_ID(BSC_ALFADATA_3, 0x22, 0) | ||
| 273 | #define ZORRO_PROD_BSC_ISDN_MASTERCARD ZORRO_ID(BSC_ALFADATA_3, 0x40, 0) | ||
| 274 | #define ZORRO_PROD_BSC_ISDN_MASTERCARD_II ZORRO_ID(BSC_ALFADATA_3, 0x41, 0) | ||
| 275 | |||
| 276 | #define ZORRO_MANUF_PHOENIX 0x0835 | ||
| 277 | #define ZORRO_PROD_PHOENIX_ST506 ZORRO_ID(PHOENIX, 0x21, 0) | ||
| 278 | #define ZORRO_PROD_PHOENIX_SCSI ZORRO_ID(PHOENIX, 0x22, 0) | ||
| 279 | #define ZORRO_PROD_PHOENIX_RAM ZORRO_ID(PHOENIX, 0xBE, 0) | ||
| 280 | |||
| 281 | #define ZORRO_MANUF_ADVANCED_STORAGE_SYSTEMS 0x0836 | ||
| 282 | #define ZORRO_PROD_ADVANCED_STORAGE_SYSTEMS_NEXUS ZORRO_ID(ADVANCED_STORAGE_SYSTEMS, 0x01, 0) | ||
| 283 | #define ZORRO_PROD_ADVANCED_STORAGE_SYSTEMS_NEXUS_RAM ZORRO_ID(ADVANCED_STORAGE_SYSTEMS, 0x08, 0) | ||
| 284 | |||
| 285 | #define ZORRO_MANUF_IMPULSE 0x0838 | ||
| 286 | #define ZORRO_PROD_IMPULSE_FIRECRACKER_24 ZORRO_ID(IMPULSE, 0x00, 0) | ||
| 287 | |||
| 288 | #define ZORRO_MANUF_IVS 0x0840 | ||
| 289 | #define ZORRO_PROD_IVS_GRANDSLAM_PIC_2 ZORRO_ID(IVS, 0x02, 0) | ||
| 290 | #define ZORRO_PROD_IVS_GRANDSLAM_PIC_1 ZORRO_ID(IVS, 0x04, 0) | ||
| 291 | #define ZORRO_PROD_IVS_OVERDRIVE ZORRO_ID(IVS, 0x10, 0) | ||
| 292 | #define ZORRO_PROD_IVS_TRUMPCARD_CLASSIC ZORRO_ID(IVS, 0x30, 0) | ||
| 293 | #define ZORRO_PROD_IVS_TRUMPCARD_PRO_GRANDSLAM ZORRO_ID(IVS, 0x34, 0) | ||
| 294 | #define ZORRO_PROD_IVS_META_4 ZORRO_ID(IVS, 0x40, 0) | ||
| 295 | #define ZORRO_PROD_IVS_WAVETOOLS ZORRO_ID(IVS, 0xBF, 0) | ||
| 296 | #define ZORRO_PROD_IVS_VECTOR_1 ZORRO_ID(IVS, 0xF3, 0) | ||
| 297 | #define ZORRO_PROD_IVS_VECTOR_2 ZORRO_ID(IVS, 0xF4, 0) | ||
| 298 | |||
| 299 | #define ZORRO_MANUF_VECTOR_1 0x0841 | ||
| 300 | #define ZORRO_PROD_VECTOR_CONNECTION_1 ZORRO_ID(VECTOR_1, 0xE3, 0) | ||
| 301 | |||
| 302 | #define ZORRO_MANUF_XPERT_PRODEV 0x0845 | ||
| 303 | #define ZORRO_PROD_XPERT_PRODEV_VISIONA_RAM ZORRO_ID(XPERT_PRODEV, 0x01, 0) | ||
| 304 | #define ZORRO_PROD_XPERT_PRODEV_VISIONA_REG ZORRO_ID(XPERT_PRODEV, 0x02, 0) | ||
| 305 | #define ZORRO_PROD_XPERT_PRODEV_MERLIN_RAM ZORRO_ID(XPERT_PRODEV, 0x03, 0) | ||
| 306 | #define ZORRO_PROD_XPERT_PRODEV_MERLIN_REG_1 ZORRO_ID(XPERT_PRODEV, 0x04, 0) | ||
| 307 | #define ZORRO_PROD_XPERT_PRODEV_MERLIN_REG_2 ZORRO_ID(XPERT_PRODEV, 0xC9, 0) | ||
| 308 | |||
| 309 | #define ZORRO_MANUF_HYDRA_SYSTEMS 0x0849 | ||
| 310 | #define ZORRO_PROD_HYDRA_SYSTEMS_AMIGANET ZORRO_ID(HYDRA_SYSTEMS, 0x01, 0) | ||
| 311 | |||
| 312 | #define ZORRO_MANUF_SUNRIZE_INDUSTRIES 0x084F | ||
| 313 | #define ZORRO_PROD_SUNRIZE_INDUSTRIES_AD1012 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x01, 0) | ||
| 314 | #define ZORRO_PROD_SUNRIZE_INDUSTRIES_AD516 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x02, 0) | ||
| 315 | #define ZORRO_PROD_SUNRIZE_INDUSTRIES_DD512 ZORRO_ID(SUNRIZE_INDUSTRIES, 0x03, 0) | ||
| 316 | |||
| 317 | #define ZORRO_MANUF_TRICERATOPS 0x0850 | ||
| 318 | #define ZORRO_PROD_TRICERATOPS_MULTI_IO ZORRO_ID(TRICERATOPS, 0x01, 0) | ||
| 319 | |||
| 320 | #define ZORRO_MANUF_APPLIED_MAGIC 0x0851 | ||
| 321 | #define ZORRO_PROD_APPLIED_MAGIC_DMI_RESOLVER ZORRO_ID(APPLIED_MAGIC, 0x01, 0) | ||
| 322 | #define ZORRO_PROD_APPLIED_MAGIC_DIGITAL_BROADCASTER ZORRO_ID(APPLIED_MAGIC, 0x06, 0) | ||
| 323 | |||
| 324 | #define ZORRO_MANUF_GFX_BASE 0x085E | ||
| 325 | #define ZORRO_PROD_GFX_BASE_GDA_1_VRAM ZORRO_ID(GFX_BASE, 0x00, 0) | ||
| 326 | #define ZORRO_PROD_GFX_BASE_GDA_1 ZORRO_ID(GFX_BASE, 0x01, 0) | ||
| 327 | |||
| 328 | #define ZORRO_MANUF_ROCTEC 0x0860 | ||
| 329 | #define ZORRO_PROD_ROCTEC_RH_800C ZORRO_ID(ROCTEC, 0x01, 0) | ||
| 330 | #define ZORRO_PROD_ROCTEC_RH_800C_RAM ZORRO_ID(ROCTEC, 0x01, 0) | ||
| 331 | |||
| 332 | #define ZORRO_MANUF_KATO 0x0861 | ||
| 333 | #define ZORRO_PROD_KATO_MELODY ZORRO_ID(KATO, 0x80, 0) | ||
| 334 | /* ID clash!! */ | ||
| 335 | #define ZORRO_MANUF_HELFRICH_1 0x0861 | ||
| 336 | #define ZORRO_PROD_HELFRICH_RAINBOW_II ZORRO_ID(HELFRICH_1, 0x20, 0) | ||
| 337 | #define ZORRO_PROD_HELFRICH_RAINBOW_III ZORRO_ID(HELFRICH_1, 0x21, 0) | ||
| 338 | |||
| 339 | #define ZORRO_MANUF_ATLANTIS 0x0862 | ||
| 340 | |||
| 341 | #define ZORRO_MANUF_PROTAR 0x0864 | ||
| 342 | |||
| 343 | #define ZORRO_MANUF_ACS 0x0865 | ||
| 344 | |||
| 345 | #define ZORRO_MANUF_SOFTWARE_RESULTS_ENTERPRISES 0x0866 | ||
| 346 | #define ZORRO_PROD_SOFTWARE_RESULTS_ENTERPRISES_GOLDEN_GATE_2_BUS_PLUS ZORRO_ID(SOFTWARE_RESULTS_ENTERPRISES, 0x01, 0) | ||
| 347 | |||
| 348 | #define ZORRO_MANUF_MASOBOSHI 0x086D | ||
| 349 | #define ZORRO_PROD_MASOBOSHI_MASTER_CARD_SC201 ZORRO_ID(MASOBOSHI, 0x03, 0) | ||
| 350 | #define ZORRO_PROD_MASOBOSHI_MASTER_CARD_MC702 ZORRO_ID(MASOBOSHI, 0x04, 0) | ||
| 351 | #define ZORRO_PROD_MASOBOSHI_MVD_819 ZORRO_ID(MASOBOSHI, 0x07, 0) | ||
| 352 | |||
| 353 | #define ZORRO_MANUF_MAINHATTAN_DATA 0x086F | ||
| 354 | #define ZORRO_PROD_MAINHATTAN_DATA_IDE ZORRO_ID(MAINHATTAN_DATA, 0x01, 0) | ||
| 355 | |||
| 356 | #define ZORRO_MANUF_VILLAGE_TRONIC 0x0877 | ||
| 357 | #define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_RAM ZORRO_ID(VILLAGE_TRONIC, 0x01, 0) | ||
| 358 | #define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_REG ZORRO_ID(VILLAGE_TRONIC, 0x02, 0) | ||
| 359 | #define ZORRO_PROD_VILLAGE_TRONIC_DOMINO_16M_PROTOTYPE ZORRO_ID(VILLAGE_TRONIC, 0x03, 0) | ||
| 360 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM ZORRO_ID(VILLAGE_TRONIC, 0x0B, 0) | ||
| 361 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG ZORRO_ID(VILLAGE_TRONIC, 0x0C, 0) | ||
| 362 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_SEGMENTED_MODE ZORRO_ID(VILLAGE_TRONIC, 0x0D, 0) | ||
| 363 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1 ZORRO_ID(VILLAGE_TRONIC, 0x15, 0) | ||
| 364 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2 ZORRO_ID(VILLAGE_TRONIC, 0x16, 0) | ||
| 365 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG ZORRO_ID(VILLAGE_TRONIC, 0x17, 0) | ||
| 366 | #define ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3 ZORRO_ID(VILLAGE_TRONIC, 0x18, 0) | ||
| 367 | #define ZORRO_PROD_VILLAGE_TRONIC_ARIADNE ZORRO_ID(VILLAGE_TRONIC, 0xC9, 0) | ||
| 368 | #define ZORRO_PROD_VILLAGE_TRONIC_ARIADNE2 ZORRO_ID(VILLAGE_TRONIC, 0xCA, 0) | ||
| 369 | |||
| 370 | #define ZORRO_MANUF_UTILITIES_UNLIMITED 0x087B | ||
| 371 | #define ZORRO_PROD_UTILITIES_UNLIMITED_EMPLANT_DELUXE ZORRO_ID(UTILITIES_UNLIMITED, 0x15, 0) | ||
| 372 | #define ZORRO_PROD_UTILITIES_UNLIMITED_EMPLANT_DELUXE2 ZORRO_ID(UTILITIES_UNLIMITED, 0x20, 0) | ||
| 373 | |||
| 374 | #define ZORRO_MANUF_AMITRIX 0x0880 | ||
| 375 | #define ZORRO_PROD_AMITRIX_MULTI_IO ZORRO_ID(AMITRIX, 0x01, 0) | ||
| 376 | #define ZORRO_PROD_AMITRIX_CD_RAM ZORRO_ID(AMITRIX, 0x02, 0) | ||
| 377 | |||
| 378 | #define ZORRO_MANUF_ARMAX 0x0885 | ||
| 379 | #define ZORRO_PROD_ARMAX_OMNIBUS ZORRO_ID(ARMAX, 0x00, 0) | ||
| 380 | |||
| 381 | #define ZORRO_MANUF_ZEUS 0x088D | ||
| 382 | #define ZORRO_PROD_ZEUS_SPIDER ZORRO_ID(ZEUS, 0x04, 0) | ||
| 383 | |||
| 384 | #define ZORRO_MANUF_NEWTEK 0x088F | ||
| 385 | #define ZORRO_PROD_NEWTEK_VIDEOTOASTER ZORRO_ID(NEWTEK, 0x00, 0) | ||
| 386 | |||
| 387 | #define ZORRO_MANUF_M_TECH_GERMANY 0x0890 | ||
| 388 | #define ZORRO_PROD_MTEC_AT500_2 ZORRO_ID(M_TECH_GERMANY, 0x01, 0) | ||
| 389 | #define ZORRO_PROD_MTEC_68030 ZORRO_ID(M_TECH_GERMANY, 0x03, 0) | ||
| 390 | #define ZORRO_PROD_MTEC_68020I ZORRO_ID(M_TECH_GERMANY, 0x06, 0) | ||
| 391 | #define ZORRO_PROD_MTEC_A1200_T68030_RTC ZORRO_ID(M_TECH_GERMANY, 0x20, 0) | ||
| 392 | #define ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530 ZORRO_ID(M_TECH_GERMANY, 0x21, 0) | ||
| 393 | #define ZORRO_PROD_MTEC_8_MB_RAM ZORRO_ID(M_TECH_GERMANY, 0x22, 0) | ||
| 394 | #define ZORRO_PROD_MTEC_VIPER_MK_V_E_MATRIX_530_SCSI_IDE ZORRO_ID(M_TECH_GERMANY, 0x24, 0) | ||
| 395 | |||
| 396 | #define ZORRO_MANUF_GREAT_VALLEY_PRODUCTS_4 0x0891 | ||
| 397 | #define ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM ZORRO_ID(GREAT_VALLEY_PRODUCTS_4, 0x01, 0) | ||
| 398 | #define ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG ZORRO_ID(GREAT_VALLEY_PRODUCTS_4, 0x02, 0) | ||
| 399 | |||
| 400 | #define ZORRO_MANUF_APOLLO_1 0x0892 | ||
| 401 | #define ZORRO_PROD_APOLLO_A1200 ZORRO_ID(APOLLO_1, 0x01, 0) | ||
| 402 | |||
| 403 | #define ZORRO_MANUF_HELFRICH_2 0x0893 | ||
| 404 | #define ZORRO_PROD_HELFRICH_PICCOLO_RAM ZORRO_ID(HELFRICH_2, 0x05, 0) | ||
| 405 | #define ZORRO_PROD_HELFRICH_PICCOLO_REG ZORRO_ID(HELFRICH_2, 0x06, 0) | ||
| 406 | #define ZORRO_PROD_HELFRICH_PEGGY_PLUS_MPEG ZORRO_ID(HELFRICH_2, 0x07, 0) | ||
| 407 | #define ZORRO_PROD_HELFRICH_VIDEOCRUNCHER ZORRO_ID(HELFRICH_2, 0x08, 0) | ||
| 408 | #define ZORRO_PROD_HELFRICH_SD64_RAM ZORRO_ID(HELFRICH_2, 0x0A, 0) | ||
| 409 | #define ZORRO_PROD_HELFRICH_SD64_REG ZORRO_ID(HELFRICH_2, 0x0B, 0) | ||
| 410 | |||
| 411 | #define ZORRO_MANUF_MACROSYSTEMS_USA 0x089B | ||
| 412 | #define ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx ZORRO_ID(MACROSYSTEMS_USA, 0x13, 0) | ||
| 413 | |||
| 414 | #define ZORRO_MANUF_ELBOX_COMPUTER 0x089E | ||
| 415 | #define ZORRO_PROD_ELBOX_COMPUTER_1200_4 ZORRO_ID(ELBOX_COMPUTER, 0x06, 0) | ||
| 416 | |||
| 417 | #define ZORRO_MANUF_HARMS_PROFESSIONAL 0x0A00 | ||
| 418 | #define ZORRO_PROD_HARMS_PROFESSIONAL_030_PLUS ZORRO_ID(HARMS_PROFESSIONAL, 0x10, 0) | ||
| 419 | #define ZORRO_PROD_HARMS_PROFESSIONAL_3500 ZORRO_ID(HARMS_PROFESSIONAL, 0xD0, 0) | ||
| 420 | |||
| 421 | #define ZORRO_MANUF_MICRONIK 0x0A50 | ||
| 422 | #define ZORRO_PROD_MICRONIK_RCA_120 ZORRO_ID(MICRONIK, 0x0A, 0) | ||
| 423 | |||
| 424 | #define ZORRO_MANUF_MICRONIK2 0x0F0F | ||
| 425 | #define ZORRO_PROD_MICRONIK2_Z3I ZORRO_ID(MICRONIK2, 0x01, 0) | ||
| 426 | |||
| 427 | #define ZORRO_MANUF_MEGAMICRO 0x1000 | ||
| 428 | #define ZORRO_PROD_MEGAMICRO_SCRAM_500 ZORRO_ID(MEGAMICRO, 0x03, 0) | ||
| 429 | #define ZORRO_PROD_MEGAMICRO_SCRAM_500_RAM ZORRO_ID(MEGAMICRO, 0x04, 0) | ||
| 430 | |||
| 431 | #define ZORRO_MANUF_IMTRONICS_2 0x1028 | ||
| 432 | #define ZORRO_PROD_IMTRONICS_HURRICANE_2800_3 ZORRO_ID(IMTRONICS_2, 0x39, 0) | ||
| 433 | #define ZORRO_PROD_IMTRONICS_HURRICANE_2800_4 ZORRO_ID(IMTRONICS_2, 0x57, 0) | ||
| 434 | |||
| 435 | /* unofficial ID */ | ||
| 436 | #define ZORRO_MANUF_INDIVIDUAL_COMPUTERS 0x1212 | ||
| 437 | #define ZORRO_PROD_INDIVIDUAL_COMPUTERS_BUDDHA ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x00, 0) | ||
| 438 | #define ZORRO_PROD_INDIVIDUAL_COMPUTERS_X_SURF ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x17, 0) | ||
| 439 | #define ZORRO_PROD_INDIVIDUAL_COMPUTERS_CATWEASEL ZORRO_ID(INDIVIDUAL_COMPUTERS, 0x2A, 0) | ||
| 440 | |||
| 441 | #define ZORRO_MANUF_KUPKE_3 0x1248 | ||
| 442 | #define ZORRO_PROD_KUPKE_GOLEM_HD_3000 ZORRO_ID(KUPKE_3, 0x01, 0) | ||
| 443 | |||
| 444 | #define ZORRO_MANUF_ITH 0x1388 | ||
| 445 | #define ZORRO_PROD_ITH_ISDN_MASTER_II ZORRO_ID(ITH, 0x01, 0) | ||
| 446 | |||
| 447 | #define ZORRO_MANUF_VMC 0x1389 | ||
| 448 | #define ZORRO_PROD_VMC_ISDN_BLASTER_Z2 ZORRO_ID(VMC, 0x01, 0) | ||
| 449 | #define ZORRO_PROD_VMC_HYPERCOM_4 ZORRO_ID(VMC, 0x02, 0) | ||
| 450 | |||
| 451 | #define ZORRO_MANUF_INFORMATION 0x157C | ||
| 452 | #define ZORRO_PROD_INFORMATION_ISDN_ENGINE_I ZORRO_ID(INFORMATION, 0x64, 0) | ||
| 453 | |||
| 454 | #define ZORRO_MANUF_VORTEX 0x2017 | ||
| 455 | #define ZORRO_PROD_VORTEX_GOLDEN_GATE_80386SX ZORRO_ID(VORTEX, 0x07, 0) | ||
| 456 | #define ZORRO_PROD_VORTEX_GOLDEN_GATE_RAM ZORRO_ID(VORTEX, 0x08, 0) | ||
| 457 | #define ZORRO_PROD_VORTEX_GOLDEN_GATE_80486 ZORRO_ID(VORTEX, 0x09, 0) | ||
| 458 | |||
| 459 | #define ZORRO_MANUF_EXPANSION_SYSTEMS 0x2062 | ||
| 460 | #define ZORRO_PROD_EXPANSION_SYSTEMS_DATAFLYER_4000SX ZORRO_ID(EXPANSION_SYSTEMS, 0x01, 0) | ||
| 461 | #define ZORRO_PROD_EXPANSION_SYSTEMS_DATAFLYER_4000SX_RAM ZORRO_ID(EXPANSION_SYSTEMS, 0x02, 0) | ||
| 462 | |||
| 463 | #define ZORRO_MANUF_READYSOFT 0x2100 | ||
| 464 | #define ZORRO_PROD_READYSOFT_AMAX_II_IV ZORRO_ID(READYSOFT, 0x01, 0) | ||
| 465 | |||
| 466 | #define ZORRO_MANUF_PHASE5 0x2140 | ||
| 467 | #define ZORRO_PROD_PHASE5_BLIZZARD_RAM ZORRO_ID(PHASE5, 0x01, 0) | ||
| 468 | #define ZORRO_PROD_PHASE5_BLIZZARD ZORRO_ID(PHASE5, 0x02, 0) | ||
| 469 | #define ZORRO_PROD_PHASE5_BLIZZARD_1220_IV ZORRO_ID(PHASE5, 0x06, 0) | ||
| 470 | #define ZORRO_PROD_PHASE5_FASTLANE_Z3_RAM ZORRO_ID(PHASE5, 0x0A, 0) | ||
| 471 | #define ZORRO_PROD_PHASE5_BLIZZARD_1230_II_FASTLANE_Z3_CYBERSCSI_CYBERSTORM060 ZORRO_ID(PHASE5, 0x0B, 0) | ||
| 472 | #define ZORRO_PROD_PHASE5_BLIZZARD_1220_CYBERSTORM ZORRO_ID(PHASE5, 0x0C, 0) | ||
| 473 | #define ZORRO_PROD_PHASE5_BLIZZARD_1230 ZORRO_ID(PHASE5, 0x0D, 0) | ||
| 474 | #define ZORRO_PROD_PHASE5_BLIZZARD_1230_IV_1260 ZORRO_ID(PHASE5, 0x11, 0) | ||
| 475 | #define ZORRO_PROD_PHASE5_BLIZZARD_2060 ZORRO_ID(PHASE5, 0x18, 0) | ||
| 476 | #define ZORRO_PROD_PHASE5_CYBERSTORM_MK_II ZORRO_ID(PHASE5, 0x19, 0) | ||
| 477 | #define ZORRO_PROD_PHASE5_CYBERVISION64 ZORRO_ID(PHASE5, 0x22, 0) | ||
| 478 | #define ZORRO_PROD_PHASE5_CYBERVISION64_3D_PROTOTYPE ZORRO_ID(PHASE5, 0x32, 0) | ||
| 479 | #define ZORRO_PROD_PHASE5_CYBERVISION64_3D ZORRO_ID(PHASE5, 0x43, 0) | ||
| 480 | #define ZORRO_PROD_PHASE5_CYBERSTORM_MK_III ZORRO_ID(PHASE5, 0x64, 0) | ||
| 481 | #define ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS ZORRO_ID(PHASE5, 0x6e, 0) | ||
| 482 | |||
| 483 | #define ZORRO_MANUF_DPS 0x2169 | ||
| 484 | #define ZORRO_PROD_DPS_PERSONAL_ANIMATION_RECORDER ZORRO_ID(DPS, 0x01, 0) | ||
| 485 | |||
| 486 | #define ZORRO_MANUF_APOLLO_2 0x2200 | ||
| 487 | #define ZORRO_PROD_APOLLO_A620_68020_1 ZORRO_ID(APOLLO_2, 0x00, 0) | ||
| 488 | #define ZORRO_PROD_APOLLO_A620_68020_2 ZORRO_ID(APOLLO_2, 0x01, 0) | ||
| 489 | |||
| 490 | #define ZORRO_MANUF_APOLLO_3 0x2222 | ||
| 491 | #define ZORRO_PROD_APOLLO_AT_APOLLO ZORRO_ID(APOLLO_3, 0x22, 0) | ||
| 492 | #define ZORRO_PROD_APOLLO_1230_1240_1260_2030_4040_4060 ZORRO_ID(APOLLO_3, 0x23, 0) | ||
| 493 | |||
| 494 | #define ZORRO_MANUF_PETSOFF_LP 0x38A5 | ||
| 495 | #define ZORRO_PROD_PETSOFF_LP_DELFINA ZORRO_ID(PETSOFF_LP, 0x00, 0) | ||
| 496 | #define ZORRO_PROD_PETSOFF_LP_DELFINA_LITE ZORRO_ID(PETSOFF_LP, 0x01, 0) | ||
| 497 | |||
| 498 | #define ZORRO_MANUF_UWE_GERLACH 0x3FF7 | ||
| 499 | #define ZORRO_PROD_UWE_GERLACH_RAM_ROM ZORRO_ID(UWE_GERLACH, 0xd4, 0) | ||
| 500 | |||
| 501 | #define ZORRO_MANUF_ACT 0x4231 | ||
| 502 | #define ZORRO_PROD_ACT_PRELUDE ZORRO_ID(ACT, 0x01, 0) | ||
| 503 | |||
| 504 | #define ZORRO_MANUF_MACROSYSTEMS_GERMANY 0x4754 | ||
| 505 | #define ZORRO_PROD_MACROSYSTEMS_MAESTRO ZORRO_ID(MACROSYSTEMS_GERMANY, 0x03, 0) | ||
| 506 | #define ZORRO_PROD_MACROSYSTEMS_VLAB ZORRO_ID(MACROSYSTEMS_GERMANY, 0x04, 0) | ||
| 507 | #define ZORRO_PROD_MACROSYSTEMS_MAESTRO_PRO ZORRO_ID(MACROSYSTEMS_GERMANY, 0x05, 0) | ||
| 508 | #define ZORRO_PROD_MACROSYSTEMS_RETINA ZORRO_ID(MACROSYSTEMS_GERMANY, 0x06, 0) | ||
| 509 | #define ZORRO_PROD_MACROSYSTEMS_MULTI_EVOLUTION ZORRO_ID(MACROSYSTEMS_GERMANY, 0x08, 0) | ||
| 510 | #define ZORRO_PROD_MACROSYSTEMS_TOCCATA ZORRO_ID(MACROSYSTEMS_GERMANY, 0x0C, 0) | ||
| 511 | #define ZORRO_PROD_MACROSYSTEMS_RETINA_Z3 ZORRO_ID(MACROSYSTEMS_GERMANY, 0x10, 0) | ||
| 512 | #define ZORRO_PROD_MACROSYSTEMS_VLAB_MOTION ZORRO_ID(MACROSYSTEMS_GERMANY, 0x12, 0) | ||
| 513 | #define ZORRO_PROD_MACROSYSTEMS_ALTAIS ZORRO_ID(MACROSYSTEMS_GERMANY, 0x13, 0) | ||
| 514 | #define ZORRO_PROD_MACROSYSTEMS_FALCON_040 ZORRO_ID(MACROSYSTEMS_GERMANY, 0xFD, 0) | ||
| 515 | |||
| 516 | #define ZORRO_MANUF_COMBITEC 0x6766 | ||
| 517 | |||
| 518 | #define ZORRO_MANUF_SKI_PERIPHERALS 0x8000 | ||
| 519 | #define ZORRO_PROD_SKI_PERIPHERALS_MAST_FIREBALL ZORRO_ID(SKI_PERIPHERALS, 0x08, 0) | ||
| 520 | #define ZORRO_PROD_SKI_PERIPHERALS_SCSI_DUAL_SERIAL ZORRO_ID(SKI_PERIPHERALS, 0x80, 0) | ||
| 521 | |||
| 522 | #define ZORRO_MANUF_REIS_WARE_2 0xA9AD | ||
| 523 | #define ZORRO_PROD_REIS_WARE_SCAN_KING ZORRO_ID(REIS_WARE_2, 0x11, 0) | ||
| 524 | |||
| 525 | #define ZORRO_MANUF_CAMERON 0xAA01 | ||
| 526 | #define ZORRO_PROD_CAMERON_PERSONAL_A4 ZORRO_ID(CAMERON, 0x10, 0) | ||
| 527 | |||
| 528 | #define ZORRO_MANUF_REIS_WARE 0xAA11 | ||
| 529 | #define ZORRO_PROD_REIS_WARE_HANDYSCANNER ZORRO_ID(REIS_WARE, 0x11, 0) | ||
| 530 | |||
| 531 | #define ZORRO_MANUF_PHOENIX_2 0xB5A8 | ||
| 532 | #define ZORRO_PROD_PHOENIX_ST506_2 ZORRO_ID(PHOENIX_2, 0x21, 0) | ||
| 533 | #define ZORRO_PROD_PHOENIX_SCSI_2 ZORRO_ID(PHOENIX_2, 0x22, 0) | ||
| 534 | #define ZORRO_PROD_PHOENIX_RAM_2 ZORRO_ID(PHOENIX_2, 0xBE, 0) | ||
| 535 | |||
| 536 | #define ZORRO_MANUF_COMBITEC_2 0xC008 | ||
| 537 | #define ZORRO_PROD_COMBITEC_HD ZORRO_ID(COMBITEC_2, 0x2A, 0) | ||
| 538 | #define ZORRO_PROD_COMBITEC_SRAM ZORRO_ID(COMBITEC_2, 0x2B, 0) | ||
| 539 | |||
| 540 | |||
| 541 | /* | ||
| 542 | * Test and illegal Manufacturer IDs. | ||
| 543 | */ | ||
| 544 | |||
| 545 | #define ZORRO_MANUF_HACKER 0x07DB | ||
| 546 | #define ZORRO_PROD_GENERAL_PROTOTYPE ZORRO_ID(HACKER, 0x00, 0) | ||
| 547 | #define ZORRO_PROD_HACKER_SCSI ZORRO_ID(HACKER, 0x01, 0) | ||
| 548 | #define ZORRO_PROD_RESOURCE_MANAGEMENT_FORCE_QUICKNET_QN2000 ZORRO_ID(HACKER, 0x02, 0) | ||
| 549 | #define ZORRO_PROD_VECTOR_CONNECTION_2 ZORRO_ID(HACKER, 0xE0, 0) | ||
| 550 | #define ZORRO_PROD_VECTOR_CONNECTION_3 ZORRO_ID(HACKER, 0xE1, 0) | ||
| 551 | #define ZORRO_PROD_VECTOR_CONNECTION_4 ZORRO_ID(HACKER, 0xE2, 0) | ||
| 552 | #define ZORRO_PROD_VECTOR_CONNECTION_5 ZORRO_ID(HACKER, 0xE3, 0) | ||
