diff options
Diffstat (limited to 'include')
29 files changed, 729 insertions, 430 deletions
diff --git a/include/asm-cris/arch-v10/ide.h b/include/asm-cris/arch-v10/ide.h index 78b301ed7b12..ea34e0d0a388 100644 --- a/include/asm-cris/arch-v10/ide.h +++ b/include/asm-cris/arch-v10/ide.h | |||
@@ -89,11 +89,6 @@ static inline void ide_init_default_hwifs(void) | |||
89 | } | 89 | } |
90 | } | 90 | } |
91 | 91 | ||
92 | /* some configuration options we don't need */ | ||
93 | |||
94 | #undef SUPPORT_VLB_SYNC | ||
95 | #define SUPPORT_VLB_SYNC 0 | ||
96 | |||
97 | #endif /* __KERNEL__ */ | 92 | #endif /* __KERNEL__ */ |
98 | 93 | ||
99 | #endif /* __ASMCRIS_IDE_H */ | 94 | #endif /* __ASMCRIS_IDE_H */ |
diff --git a/include/asm-cris/arch-v32/ide.h b/include/asm-cris/arch-v32/ide.h index 11296170d057..fb9c3627a5b4 100644 --- a/include/asm-cris/arch-v32/ide.h +++ b/include/asm-cris/arch-v32/ide.h | |||
@@ -48,11 +48,6 @@ static inline unsigned long ide_default_io_base(int index) | |||
48 | return REG_TYPE_CONV(unsigned long, reg_ata_rw_ctrl2, ctrl2); | 48 | return REG_TYPE_CONV(unsigned long, reg_ata_rw_ctrl2, ctrl2); |
49 | } | 49 | } |
50 | 50 | ||
51 | /* some configuration options we don't need */ | ||
52 | |||
53 | #undef SUPPORT_VLB_SYNC | ||
54 | #define SUPPORT_VLB_SYNC 0 | ||
55 | |||
56 | #define IDE_ARCH_ACK_INTR | 51 | #define IDE_ARCH_ACK_INTR |
57 | #define ide_ack_intr(hwif) ((hwif)->ack_intr(hwif)) | 52 | #define ide_ack_intr(hwif) ((hwif)->ack_intr(hwif)) |
58 | 53 | ||
diff --git a/include/asm-frv/ide.h b/include/asm-frv/ide.h index f0bd2cb250c1..8c9a540d4344 100644 --- a/include/asm-frv/ide.h +++ b/include/asm-frv/ide.h | |||
@@ -18,12 +18,6 @@ | |||
18 | #include <asm/io.h> | 18 | #include <asm/io.h> |
19 | #include <asm/irq.h> | 19 | #include <asm/irq.h> |
20 | 20 | ||
21 | #undef SUPPORT_SLOW_DATA_PORTS | ||
22 | #define SUPPORT_SLOW_DATA_PORTS 0 | ||
23 | |||
24 | #undef SUPPORT_VLB_SYNC | ||
25 | #define SUPPORT_VLB_SYNC 0 | ||
26 | |||
27 | #ifndef MAX_HWIFS | 21 | #ifndef MAX_HWIFS |
28 | #define MAX_HWIFS 8 | 22 | #define MAX_HWIFS 8 |
29 | #endif | 23 | #endif |
diff --git a/include/asm-generic/resource.h b/include/asm-generic/resource.h index a4a22cc35898..587566f95f6c 100644 --- a/include/asm-generic/resource.h +++ b/include/asm-generic/resource.h | |||
@@ -44,8 +44,8 @@ | |||
44 | #define RLIMIT_NICE 13 /* max nice prio allowed to raise to | 44 | #define RLIMIT_NICE 13 /* max nice prio allowed to raise to |
45 | 0-39 for nice level 19 .. -20 */ | 45 | 0-39 for nice level 19 .. -20 */ |
46 | #define RLIMIT_RTPRIO 14 /* maximum realtime priority */ | 46 | #define RLIMIT_RTPRIO 14 /* maximum realtime priority */ |
47 | 47 | #define RLIMIT_RTTIME 15 /* timeout for RT tasks in us */ | |
48 | #define RLIM_NLIMITS 15 | 48 | #define RLIM_NLIMITS 16 |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * SuS says limits have to be unsigned. | 51 | * SuS says limits have to be unsigned. |
@@ -86,6 +86,7 @@ | |||
86 | [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ | 86 | [RLIMIT_MSGQUEUE] = { MQ_BYTES_MAX, MQ_BYTES_MAX }, \ |
87 | [RLIMIT_NICE] = { 0, 0 }, \ | 87 | [RLIMIT_NICE] = { 0, 0 }, \ |
88 | [RLIMIT_RTPRIO] = { 0, 0 }, \ | 88 | [RLIMIT_RTPRIO] = { 0, 0 }, \ |
89 | [RLIMIT_RTTIME] = { RLIM_INFINITY, RLIM_INFINITY }, \ | ||
89 | } | 90 | } |
90 | 91 | ||
91 | #endif /* __KERNEL__ */ | 92 | #endif /* __KERNEL__ */ |
diff --git a/include/asm-powerpc/ide.h b/include/asm-powerpc/ide.h index fd7f5a430f0a..6d50310ecaea 100644 --- a/include/asm-powerpc/ide.h +++ b/include/asm-powerpc/ide.h | |||
@@ -42,9 +42,6 @@ struct ide_machdep_calls { | |||
42 | 42 | ||
43 | extern struct ide_machdep_calls ppc_ide_md; | 43 | extern struct ide_machdep_calls ppc_ide_md; |
44 | 44 | ||
45 | #undef SUPPORT_SLOW_DATA_PORTS | ||
46 | #define SUPPORT_SLOW_DATA_PORTS 0 | ||
47 | |||
48 | #define IDE_ARCH_OBSOLETE_DEFAULTS | 45 | #define IDE_ARCH_OBSOLETE_DEFAULTS |
49 | 46 | ||
50 | static __inline__ int ide_default_irq(unsigned long base) | 47 | static __inline__ int ide_default_irq(unsigned long base) |
diff --git a/include/asm-x86/thread_info_32.h b/include/asm-x86/thread_info_32.h index 22a8cbcd35e2..ef58fd2a6eb0 100644 --- a/include/asm-x86/thread_info_32.h +++ b/include/asm-x86/thread_info_32.h | |||
@@ -132,6 +132,7 @@ static inline struct thread_info *current_thread_info(void) | |||
132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ | 132 | #define TIF_SYSCALL_AUDIT 6 /* syscall auditing active */ |
133 | #define TIF_SECCOMP 7 /* secure computing */ | 133 | #define TIF_SECCOMP 7 /* secure computing */ |
134 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ | 134 | #define TIF_RESTORE_SIGMASK 8 /* restore signal mask in do_signal() */ |
135 | #define TIF_HRTICK_RESCHED 9 /* reprogram hrtick timer */ | ||
135 | #define TIF_MEMDIE 16 | 136 | #define TIF_MEMDIE 16 |
136 | #define TIF_DEBUG 17 /* uses debug registers */ | 137 | #define TIF_DEBUG 17 /* uses debug registers */ |
137 | #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ | 138 | #define TIF_IO_BITMAP 18 /* uses I/O bitmap */ |
@@ -147,6 +148,7 @@ static inline struct thread_info *current_thread_info(void) | |||
147 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) | 148 | #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) |
148 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 149 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
149 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 150 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
151 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | ||
150 | #define _TIF_DEBUG (1<<TIF_DEBUG) | 152 | #define _TIF_DEBUG (1<<TIF_DEBUG) |
151 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) | 153 | #define _TIF_IO_BITMAP (1<<TIF_IO_BITMAP) |
152 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 154 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
diff --git a/include/asm-x86/thread_info_64.h b/include/asm-x86/thread_info_64.h index beae2bfb62ca..7f6ee68f0002 100644 --- a/include/asm-x86/thread_info_64.h +++ b/include/asm-x86/thread_info_64.h | |||
@@ -115,6 +115,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
115 | #define TIF_SECCOMP 8 /* secure computing */ | 115 | #define TIF_SECCOMP 8 /* secure computing */ |
116 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ | 116 | #define TIF_RESTORE_SIGMASK 9 /* restore signal mask in do_signal */ |
117 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ | 117 | #define TIF_MCE_NOTIFY 10 /* notify userspace of an MCE */ |
118 | #define TIF_HRTICK_RESCHED 11 /* reprogram hrtick timer */ | ||
118 | /* 16 free */ | 119 | /* 16 free */ |
119 | #define TIF_IA32 17 /* 32bit process */ | 120 | #define TIF_IA32 17 /* 32bit process */ |
120 | #define TIF_FORK 18 /* ret_from_fork */ | 121 | #define TIF_FORK 18 /* ret_from_fork */ |
@@ -133,6 +134,7 @@ static inline struct thread_info *stack_thread_info(void) | |||
133 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) | 134 | #define _TIF_SECCOMP (1<<TIF_SECCOMP) |
134 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) | 135 | #define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) |
135 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) | 136 | #define _TIF_MCE_NOTIFY (1<<TIF_MCE_NOTIFY) |
137 | #define _TIF_HRTICK_RESCHED (1<<TIF_HRTICK_RESCHED) | ||
136 | #define _TIF_IA32 (1<<TIF_IA32) | 138 | #define _TIF_IA32 (1<<TIF_IA32) |
137 | #define _TIF_FORK (1<<TIF_FORK) | 139 | #define _TIF_FORK (1<<TIF_FORK) |
138 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) | 140 | #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) |
@@ -146,6 +148,9 @@ static inline struct thread_info *stack_thread_info(void) | |||
146 | /* work to do on any return to user space */ | 148 | /* work to do on any return to user space */ |
147 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) | 149 | #define _TIF_ALLWORK_MASK (0x0000FFFF & ~_TIF_SECCOMP) |
148 | 150 | ||
151 | #define _TIF_DO_NOTIFY_MASK \ | ||
152 | (_TIF_SIGPENDING|_TIF_SINGLESTEP|_TIF_MCE_NOTIFY|_TIF_HRTICK_RESCHED) | ||
153 | |||
149 | /* flags to check in __switch_to() */ | 154 | /* flags to check in __switch_to() */ |
150 | #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) | 155 | #define _TIF_WORK_CTXSW (_TIF_DEBUG|_TIF_IO_BITMAP) |
151 | 156 | ||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d18ee67b40f8..40ee1706caa3 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -144,7 +144,6 @@ enum rq_cmd_type_bits { | |||
144 | * private REQ_LB opcodes to differentiate what type of request this is | 144 | * private REQ_LB opcodes to differentiate what type of request this is |
145 | */ | 145 | */ |
146 | REQ_TYPE_ATA_CMD, | 146 | REQ_TYPE_ATA_CMD, |
147 | REQ_TYPE_ATA_TASK, | ||
148 | REQ_TYPE_ATA_TASKFILE, | 147 | REQ_TYPE_ATA_TASKFILE, |
149 | REQ_TYPE_ATA_PC, | 148 | REQ_TYPE_ATA_PC, |
150 | }; | 149 | }; |
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 92f2029a34f3..0be8d65bc3c8 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h | |||
@@ -71,18 +71,27 @@ static inline void unregister_cpu_notifier(struct notifier_block *nb) | |||
71 | 71 | ||
72 | int cpu_up(unsigned int cpu); | 72 | int cpu_up(unsigned int cpu); |
73 | 73 | ||
74 | extern void cpu_hotplug_init(void); | ||
75 | |||
74 | #else | 76 | #else |
75 | 77 | ||
76 | static inline int register_cpu_notifier(struct notifier_block *nb) | 78 | static inline int register_cpu_notifier(struct notifier_block *nb) |
77 | { | 79 | { |
78 | return 0; | 80 | return 0; |
79 | } | 81 | } |
82 | |||
80 | static inline void unregister_cpu_notifier(struct notifier_block *nb) | 83 | static inline void unregister_cpu_notifier(struct notifier_block *nb) |
81 | { | 84 | { |
82 | } | 85 | } |
83 | 86 | ||
87 | static inline void cpu_hotplug_init(void) | ||
88 | { | ||
89 | } | ||
90 | |||
84 | #endif /* CONFIG_SMP */ | 91 | #endif /* CONFIG_SMP */ |
85 | extern struct sysdev_class cpu_sysdev_class; | 92 | extern struct sysdev_class cpu_sysdev_class; |
93 | extern void cpu_maps_update_begin(void); | ||
94 | extern void cpu_maps_update_done(void); | ||
86 | 95 | ||
87 | #ifdef CONFIG_HOTPLUG_CPU | 96 | #ifdef CONFIG_HOTPLUG_CPU |
88 | /* Stop CPUs going up and down. */ | 97 | /* Stop CPUs going up and down. */ |
@@ -97,8 +106,8 @@ static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | |||
97 | mutex_unlock(cpu_hp_mutex); | 106 | mutex_unlock(cpu_hp_mutex); |
98 | } | 107 | } |
99 | 108 | ||
100 | extern void lock_cpu_hotplug(void); | 109 | extern void get_online_cpus(void); |
101 | extern void unlock_cpu_hotplug(void); | 110 | extern void put_online_cpus(void); |
102 | #define hotcpu_notifier(fn, pri) { \ | 111 | #define hotcpu_notifier(fn, pri) { \ |
103 | static struct notifier_block fn##_nb = \ | 112 | static struct notifier_block fn##_nb = \ |
104 | { .notifier_call = fn, .priority = pri }; \ | 113 | { .notifier_call = fn, .priority = pri }; \ |
@@ -115,8 +124,8 @@ static inline void cpuhotplug_mutex_lock(struct mutex *cpu_hp_mutex) | |||
115 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) | 124 | static inline void cpuhotplug_mutex_unlock(struct mutex *cpu_hp_mutex) |
116 | { } | 125 | { } |
117 | 126 | ||
118 | #define lock_cpu_hotplug() do { } while (0) | 127 | #define get_online_cpus() do { } while (0) |
119 | #define unlock_cpu_hotplug() do { } while (0) | 128 | #define put_online_cpus() do { } while (0) |
120 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) | 129 | #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) |
121 | /* These aren't inline functions due to a GCC bug. */ | 130 | /* These aren't inline functions due to a GCC bug. */ |
122 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) | 131 | #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; }) |
diff --git a/include/linux/debug_locks.h b/include/linux/debug_locks.h index 1678a5de7013..f4a5871767f5 100644 --- a/include/linux/debug_locks.h +++ b/include/linux/debug_locks.h | |||
@@ -47,6 +47,7 @@ struct task_struct; | |||
47 | 47 | ||
48 | #ifdef CONFIG_LOCKDEP | 48 | #ifdef CONFIG_LOCKDEP |
49 | extern void debug_show_all_locks(void); | 49 | extern void debug_show_all_locks(void); |
50 | extern void __debug_show_held_locks(struct task_struct *task); | ||
50 | extern void debug_show_held_locks(struct task_struct *task); | 51 | extern void debug_show_held_locks(struct task_struct *task); |
51 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); | 52 | extern void debug_check_no_locks_freed(const void *from, unsigned long len); |
52 | extern void debug_check_no_locks_held(struct task_struct *task); | 53 | extern void debug_check_no_locks_held(struct task_struct *task); |
@@ -55,6 +56,10 @@ static inline void debug_show_all_locks(void) | |||
55 | { | 56 | { |
56 | } | 57 | } |
57 | 58 | ||
59 | static inline void __debug_show_held_locks(struct task_struct *task) | ||
60 | { | ||
61 | } | ||
62 | |||
58 | static inline void debug_show_held_locks(struct task_struct *task) | 63 | static inline void debug_show_held_locks(struct task_struct *task) |
59 | { | 64 | { |
60 | } | 65 | } |
diff --git a/include/linux/futex.h b/include/linux/futex.h index 92d420fe03f8..1a15f8e237a7 100644 --- a/include/linux/futex.h +++ b/include/linux/futex.h | |||
@@ -1,8 +1,12 @@ | |||
1 | #ifndef _LINUX_FUTEX_H | 1 | #ifndef _LINUX_FUTEX_H |
2 | #define _LINUX_FUTEX_H | 2 | #define _LINUX_FUTEX_H |
3 | 3 | ||
4 | #include <linux/sched.h> | 4 | #include <linux/compiler.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
7 | struct inode; | ||
8 | struct mm_struct; | ||
9 | struct task_struct; | ||
6 | union ktime; | 10 | union ktime; |
7 | 11 | ||
8 | /* Second argument to futex syscall */ | 12 | /* Second argument to futex syscall */ |
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8d302298a161..2961ec788046 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h | |||
@@ -72,11 +72,7 @@ | |||
72 | #define in_softirq() (softirq_count()) | 72 | #define in_softirq() (softirq_count()) |
73 | #define in_interrupt() (irq_count()) | 73 | #define in_interrupt() (irq_count()) |
74 | 74 | ||
75 | #if defined(CONFIG_PREEMPT) && !defined(CONFIG_PREEMPT_BKL) | 75 | #define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) |
76 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked()) | ||
77 | #else | ||
78 | # define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != 0) | ||
79 | #endif | ||
80 | 76 | ||
81 | #ifdef CONFIG_PREEMPT | 77 | #ifdef CONFIG_PREEMPT |
82 | # define PREEMPT_CHECK_OFFSET 1 | 78 | # define PREEMPT_CHECK_OFFSET 1 |
diff --git a/include/linux/hdreg.h b/include/linux/hdreg.h index 818c6afc1091..ff43f8d6b5b3 100644 --- a/include/linux/hdreg.h +++ b/include/linux/hdreg.h | |||
@@ -44,7 +44,9 @@ | |||
44 | 44 | ||
45 | /* Bits for HD_ERROR */ | 45 | /* Bits for HD_ERROR */ |
46 | #define MARK_ERR 0x01 /* Bad address mark */ | 46 | #define MARK_ERR 0x01 /* Bad address mark */ |
47 | #define ILI_ERR 0x01 /* Illegal Length Indication (ATAPI) */ | ||
47 | #define TRK0_ERR 0x02 /* couldn't find track 0 */ | 48 | #define TRK0_ERR 0x02 /* couldn't find track 0 */ |
49 | #define EOM_ERR 0x02 /* End Of Media (ATAPI) */ | ||
48 | #define ABRT_ERR 0x04 /* Command aborted */ | 50 | #define ABRT_ERR 0x04 /* Command aborted */ |
49 | #define MCR_ERR 0x08 /* media change request */ | 51 | #define MCR_ERR 0x08 /* media change request */ |
50 | #define ID_ERR 0x10 /* ID field not found */ | 52 | #define ID_ERR 0x10 /* ID field not found */ |
@@ -52,6 +54,7 @@ | |||
52 | #define ECC_ERR 0x40 /* Uncorrectable ECC error */ | 54 | #define ECC_ERR 0x40 /* Uncorrectable ECC error */ |
53 | #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ | 55 | #define BBD_ERR 0x80 /* pre-EIDE meaning: block marked bad */ |
54 | #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ | 56 | #define ICRC_ERR 0x80 /* new meaning: CRC error during transfer */ |
57 | #define LFS_ERR 0xf0 /* Last Failed Sense (ATAPI) */ | ||
55 | 58 | ||
56 | /* Bits of HD_NSECTOR */ | 59 | /* Bits of HD_NSECTOR */ |
57 | #define CD 0x01 | 60 | #define CD 0x01 |
@@ -70,13 +73,13 @@ | |||
70 | #define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) | 73 | #define HDIO_DRIVE_HOB_HDR_SIZE (8 * sizeof(__u8)) |
71 | #define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) | 74 | #define HDIO_DRIVE_TASK_HDR_SIZE (8 * sizeof(__u8)) |
72 | 75 | ||
73 | #define IDE_DRIVE_TASK_INVALID -1 | ||
74 | #define IDE_DRIVE_TASK_NO_DATA 0 | 76 | #define IDE_DRIVE_TASK_NO_DATA 0 |
77 | #ifndef __KERNEL__ | ||
78 | #define IDE_DRIVE_TASK_INVALID -1 | ||
75 | #define IDE_DRIVE_TASK_SET_XFER 1 | 79 | #define IDE_DRIVE_TASK_SET_XFER 1 |
76 | |||
77 | #define IDE_DRIVE_TASK_IN 2 | 80 | #define IDE_DRIVE_TASK_IN 2 |
78 | |||
79 | #define IDE_DRIVE_TASK_OUT 3 | 81 | #define IDE_DRIVE_TASK_OUT 3 |
82 | #endif | ||
80 | #define IDE_DRIVE_TASK_RAW_WRITE 4 | 83 | #define IDE_DRIVE_TASK_RAW_WRITE 4 |
81 | 84 | ||
82 | /* | 85 | /* |
@@ -87,10 +90,10 @@ | |||
87 | #ifndef __KERNEL__ | 90 | #ifndef __KERNEL__ |
88 | #define IDE_TASKFILE_STD_OUT_FLAGS 0xFE | 91 | #define IDE_TASKFILE_STD_OUT_FLAGS 0xFE |
89 | #define IDE_HOB_STD_OUT_FLAGS 0x3C | 92 | #define IDE_HOB_STD_OUT_FLAGS 0x3C |
90 | #endif | ||
91 | 93 | ||
92 | typedef unsigned char task_ioreg_t; | 94 | typedef unsigned char task_ioreg_t; |
93 | typedef unsigned long sata_ioreg_t; | 95 | typedef unsigned long sata_ioreg_t; |
96 | #endif | ||
94 | 97 | ||
95 | typedef union ide_reg_valid_s { | 98 | typedef union ide_reg_valid_s { |
96 | unsigned all : 16; | 99 | unsigned all : 16; |
@@ -116,8 +119,8 @@ typedef union ide_reg_valid_s { | |||
116 | } ide_reg_valid_t; | 119 | } ide_reg_valid_t; |
117 | 120 | ||
118 | typedef struct ide_task_request_s { | 121 | typedef struct ide_task_request_s { |
119 | task_ioreg_t io_ports[8]; | 122 | __u8 io_ports[8]; |
120 | task_ioreg_t hob_ports[8]; | 123 | __u8 hob_ports[8]; /* bytes 6 and 7 are unused */ |
121 | ide_reg_valid_t out_flags; | 124 | ide_reg_valid_t out_flags; |
122 | ide_reg_valid_t in_flags; | 125 | ide_reg_valid_t in_flags; |
123 | int data_phase; | 126 | int data_phase; |
@@ -133,36 +136,35 @@ typedef struct ide_ioctl_request_s { | |||
133 | } ide_ioctl_request_t; | 136 | } ide_ioctl_request_t; |
134 | 137 | ||
135 | struct hd_drive_cmd_hdr { | 138 | struct hd_drive_cmd_hdr { |
136 | task_ioreg_t command; | 139 | __u8 command; |
137 | task_ioreg_t sector_number; | 140 | __u8 sector_number; |
138 | task_ioreg_t feature; | 141 | __u8 feature; |
139 | task_ioreg_t sector_count; | 142 | __u8 sector_count; |
140 | }; | 143 | }; |
141 | 144 | ||
145 | #ifndef __KERNEL__ | ||
142 | typedef struct hd_drive_task_hdr { | 146 | typedef struct hd_drive_task_hdr { |
143 | task_ioreg_t data; | 147 | __u8 data; |
144 | task_ioreg_t feature; | 148 | __u8 feature; |
145 | task_ioreg_t sector_count; | 149 | __u8 sector_count; |
146 | task_ioreg_t sector_number; | 150 | __u8 sector_number; |
147 | task_ioreg_t low_cylinder; | 151 | __u8 low_cylinder; |
148 | task_ioreg_t high_cylinder; | 152 | __u8 high_cylinder; |
149 | task_ioreg_t device_head; | 153 | __u8 device_head; |
150 | task_ioreg_t command; | 154 | __u8 command; |
151 | } task_struct_t; | 155 | } task_struct_t; |
152 | 156 | ||
153 | typedef struct hd_drive_hob_hdr { | 157 | typedef struct hd_drive_hob_hdr { |
154 | task_ioreg_t data; | 158 | __u8 data; |
155 | task_ioreg_t feature; | 159 | __u8 feature; |
156 | task_ioreg_t sector_count; | 160 | __u8 sector_count; |
157 | task_ioreg_t sector_number; | 161 | __u8 sector_number; |
158 | task_ioreg_t low_cylinder; | 162 | __u8 low_cylinder; |
159 | task_ioreg_t high_cylinder; | 163 | __u8 high_cylinder; |
160 | task_ioreg_t device_head; | 164 | __u8 device_head; |
161 | task_ioreg_t control; | 165 | __u8 control; |
162 | } hob_struct_t; | 166 | } hob_struct_t; |
163 | 167 | #endif | |
164 | #define TASKFILE_INVALID 0x7fff | ||
165 | #define TASKFILE_48 0x8000 | ||
166 | 168 | ||
167 | #define TASKFILE_NO_DATA 0x0000 | 169 | #define TASKFILE_NO_DATA 0x0000 |
168 | 170 | ||
@@ -178,12 +180,16 @@ typedef struct hd_drive_hob_hdr { | |||
178 | #define TASKFILE_IN_DMAQ 0x0080 | 180 | #define TASKFILE_IN_DMAQ 0x0080 |
179 | #define TASKFILE_OUT_DMAQ 0x0100 | 181 | #define TASKFILE_OUT_DMAQ 0x0100 |
180 | 182 | ||
183 | #ifndef __KERNEL__ | ||
181 | #define TASKFILE_P_IN 0x0200 | 184 | #define TASKFILE_P_IN 0x0200 |
182 | #define TASKFILE_P_OUT 0x0400 | 185 | #define TASKFILE_P_OUT 0x0400 |
183 | #define TASKFILE_P_IN_DMA 0x0800 | 186 | #define TASKFILE_P_IN_DMA 0x0800 |
184 | #define TASKFILE_P_OUT_DMA 0x1000 | 187 | #define TASKFILE_P_OUT_DMA 0x1000 |
185 | #define TASKFILE_P_IN_DMAQ 0x2000 | 188 | #define TASKFILE_P_IN_DMAQ 0x2000 |
186 | #define TASKFILE_P_OUT_DMAQ 0x4000 | 189 | #define TASKFILE_P_OUT_DMAQ 0x4000 |
190 | #define TASKFILE_48 0x8000 | ||
191 | #define TASKFILE_INVALID 0x7fff | ||
192 | #endif | ||
187 | 193 | ||
188 | /* ATA/ATAPI Commands pre T13 Spec */ | 194 | /* ATA/ATAPI Commands pre T13 Spec */ |
189 | #define WIN_NOP 0x00 | 195 | #define WIN_NOP 0x00 |
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h index 7a9398e19704..49067f14fac1 100644 --- a/include/linux/hrtimer.h +++ b/include/linux/hrtimer.h | |||
@@ -115,10 +115,8 @@ struct hrtimer { | |||
115 | enum hrtimer_restart (*function)(struct hrtimer *); | 115 | enum hrtimer_restart (*function)(struct hrtimer *); |
116 | struct hrtimer_clock_base *base; | 116 | struct hrtimer_clock_base *base; |
117 | unsigned long state; | 117 | unsigned long state; |
118 | #ifdef CONFIG_HIGH_RES_TIMERS | ||
119 | enum hrtimer_cb_mode cb_mode; | 118 | enum hrtimer_cb_mode cb_mode; |
120 | struct list_head cb_entry; | 119 | struct list_head cb_entry; |
121 | #endif | ||
122 | #ifdef CONFIG_TIMER_STATS | 120 | #ifdef CONFIG_TIMER_STATS |
123 | void *start_site; | 121 | void *start_site; |
124 | char start_comm[16]; | 122 | char start_comm[16]; |
@@ -194,10 +192,10 @@ struct hrtimer_cpu_base { | |||
194 | spinlock_t lock; | 192 | spinlock_t lock; |
195 | struct lock_class_key lock_key; | 193 | struct lock_class_key lock_key; |
196 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; | 194 | struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; |
195 | struct list_head cb_pending; | ||
197 | #ifdef CONFIG_HIGH_RES_TIMERS | 196 | #ifdef CONFIG_HIGH_RES_TIMERS |
198 | ktime_t expires_next; | 197 | ktime_t expires_next; |
199 | int hres_active; | 198 | int hres_active; |
200 | struct list_head cb_pending; | ||
201 | unsigned long nr_events; | 199 | unsigned long nr_events; |
202 | #endif | 200 | #endif |
203 | }; | 201 | }; |
@@ -217,6 +215,11 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |||
217 | return timer->base->get_time(); | 215 | return timer->base->get_time(); |
218 | } | 216 | } |
219 | 217 | ||
218 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
219 | { | ||
220 | return timer->base->cpu_base->hres_active; | ||
221 | } | ||
222 | |||
220 | /* | 223 | /* |
221 | * The resolution of the clocks. The resolution value is returned in | 224 | * The resolution of the clocks. The resolution value is returned in |
222 | * the clock_getres() system call to give application programmers an | 225 | * the clock_getres() system call to give application programmers an |
@@ -248,6 +251,10 @@ static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer) | |||
248 | return timer->base->softirq_time; | 251 | return timer->base->softirq_time; |
249 | } | 252 | } |
250 | 253 | ||
254 | static inline int hrtimer_is_hres_active(struct hrtimer *timer) | ||
255 | { | ||
256 | return 0; | ||
257 | } | ||
251 | #endif | 258 | #endif |
252 | 259 | ||
253 | extern ktime_t ktime_get(void); | 260 | extern ktime_t ktime_get(void); |
@@ -310,6 +317,7 @@ extern void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, | |||
310 | 317 | ||
311 | /* Soft interrupt function to run the hrtimer queues: */ | 318 | /* Soft interrupt function to run the hrtimer queues: */ |
312 | extern void hrtimer_run_queues(void); | 319 | extern void hrtimer_run_queues(void); |
320 | extern void hrtimer_run_pending(void); | ||
313 | 321 | ||
314 | /* Bootup initialization: */ | 322 | /* Bootup initialization: */ |
315 | extern void __init hrtimers_init(void); | 323 | extern void __init hrtimers_init(void); |
diff --git a/include/linux/ide.h b/include/linux/ide.h index 9a6a41e7079f..1e4409937ec3 100644 --- a/include/linux/ide.h +++ b/include/linux/ide.h | |||
@@ -27,25 +27,10 @@ | |||
27 | #include <asm/semaphore.h> | 27 | #include <asm/semaphore.h> |
28 | #include <asm/mutex.h> | 28 | #include <asm/mutex.h> |
29 | 29 | ||
30 | /****************************************************************************** | 30 | #if defined(CRIS) || defined(FRV) |
31 | * IDE driver configuration options (play with these as desired): | 31 | # define SUPPORT_VLB_SYNC 0 |
32 | * | 32 | #else |
33 | * REALLY_SLOW_IO can be defined in ide.c and ide-cd.c, if necessary | 33 | # define SUPPORT_VLB_SYNC 1 |
34 | */ | ||
35 | #define INITIAL_MULT_COUNT 0 /* off=0; on=2,4,8,16,32, etc.. */ | ||
36 | |||
37 | #ifndef SUPPORT_SLOW_DATA_PORTS /* 1 to support slow data ports */ | ||
38 | #define SUPPORT_SLOW_DATA_PORTS 1 /* 0 to reduce kernel size */ | ||
39 | #endif | ||
40 | #ifndef SUPPORT_VLB_SYNC /* 1 to support weird 32-bit chips */ | ||
41 | #define SUPPORT_VLB_SYNC 1 /* 0 to reduce kernel size */ | ||
42 | #endif | ||
43 | #ifndef OK_TO_RESET_CONTROLLER /* 1 needed for good error recovery */ | ||
44 | #define OK_TO_RESET_CONTROLLER 1 /* 0 for use with AH2372A/B interface */ | ||
45 | #endif | ||
46 | |||
47 | #ifndef DISABLE_IRQ_NOSYNC | ||
48 | #define DISABLE_IRQ_NOSYNC 0 | ||
49 | #endif | 34 | #endif |
50 | 35 | ||
51 | /* | 36 | /* |
@@ -55,10 +40,6 @@ | |||
55 | 40 | ||
56 | #define IDE_NO_IRQ (-1) | 41 | #define IDE_NO_IRQ (-1) |
57 | 42 | ||
58 | /* | ||
59 | * "No user-serviceable parts" beyond this point :) | ||
60 | *****************************************************************************/ | ||
61 | |||
62 | typedef unsigned char byte; /* used everywhere */ | 43 | typedef unsigned char byte; /* used everywhere */ |
63 | 44 | ||
64 | /* | 45 | /* |
@@ -103,8 +84,6 @@ typedef unsigned char byte; /* used everywhere */ | |||
103 | #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET | 84 | #define IDE_FEATURE_OFFSET IDE_ERROR_OFFSET |
104 | #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET | 85 | #define IDE_COMMAND_OFFSET IDE_STATUS_OFFSET |
105 | 86 | ||
106 | #define IDE_CONTROL_OFFSET_HOB (7) | ||
107 | |||
108 | #define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) | 87 | #define IDE_DATA_REG (HWIF(drive)->io_ports[IDE_DATA_OFFSET]) |
109 | #define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) | 88 | #define IDE_ERROR_REG (HWIF(drive)->io_ports[IDE_ERROR_OFFSET]) |
110 | #define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) | 89 | #define IDE_NSECTOR_REG (HWIF(drive)->io_ports[IDE_NSECTOR_OFFSET]) |
@@ -327,47 +306,16 @@ static inline void ide_init_hwif_ports(hw_regs_t *hw, | |||
327 | typedef union { | 306 | typedef union { |
328 | unsigned all : 8; | 307 | unsigned all : 8; |
329 | struct { | 308 | struct { |
330 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
331 | unsigned set_geometry : 1; | 309 | unsigned set_geometry : 1; |
332 | unsigned recalibrate : 1; | 310 | unsigned recalibrate : 1; |
333 | unsigned set_multmode : 1; | 311 | unsigned set_multmode : 1; |
334 | unsigned set_tune : 1; | 312 | unsigned set_tune : 1; |
335 | unsigned serviced : 1; | 313 | unsigned serviced : 1; |
336 | unsigned reserved : 3; | 314 | unsigned reserved : 3; |
337 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
338 | unsigned reserved : 3; | ||
339 | unsigned serviced : 1; | ||
340 | unsigned set_tune : 1; | ||
341 | unsigned set_multmode : 1; | ||
342 | unsigned recalibrate : 1; | ||
343 | unsigned set_geometry : 1; | ||
344 | #else | ||
345 | #error "Please fix <asm/byteorder.h>" | ||
346 | #endif | ||
347 | } b; | 315 | } b; |
348 | } special_t; | 316 | } special_t; |
349 | 317 | ||
350 | /* | 318 | /* |
351 | * ATA DATA Register Special. | ||
352 | * ATA NSECTOR Count Register(). | ||
353 | * ATAPI Byte Count Register. | ||
354 | */ | ||
355 | typedef union { | ||
356 | unsigned all :16; | ||
357 | struct { | ||
358 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
359 | unsigned low :8; /* LSB */ | ||
360 | unsigned high :8; /* MSB */ | ||
361 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
362 | unsigned high :8; /* MSB */ | ||
363 | unsigned low :8; /* LSB */ | ||
364 | #else | ||
365 | #error "Please fix <asm/byteorder.h>" | ||
366 | #endif | ||
367 | } b; | ||
368 | } ata_nsector_t, ata_data_t, atapi_bcount_t; | ||
369 | |||
370 | /* | ||
371 | * ATA-IDE Select Register, aka Device-Head | 319 | * ATA-IDE Select Register, aka Device-Head |
372 | * | 320 | * |
373 | * head : always zeros here | 321 | * head : always zeros here |
@@ -398,131 +346,6 @@ typedef union { | |||
398 | } select_t, ata_select_t; | 346 | } select_t, ata_select_t; |
399 | 347 | ||
400 | /* | 348 | /* |
401 | * The ATA-IDE Status Register. | ||
402 | * The ATAPI Status Register. | ||
403 | * | ||
404 | * check : Error occurred | ||
405 | * idx : Index Error | ||
406 | * corr : Correctable error occurred | ||
407 | * drq : Data is request by the device | ||
408 | * dsc : Disk Seek Complete : ata | ||
409 | * : Media access command finished : atapi | ||
410 | * df : Device Fault : ata | ||
411 | * : Reserved : atapi | ||
412 | * drdy : Ready, Command Mode Capable : ata | ||
413 | * : Ignored for ATAPI commands : atapi | ||
414 | * bsy : Disk is Busy | ||
415 | * : The device has access to the command block | ||
416 | */ | ||
417 | typedef union { | ||
418 | unsigned all :8; | ||
419 | struct { | ||
420 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
421 | unsigned check :1; | ||
422 | unsigned idx :1; | ||
423 | unsigned corr :1; | ||
424 | unsigned drq :1; | ||
425 | unsigned dsc :1; | ||
426 | unsigned df :1; | ||
427 | unsigned drdy :1; | ||
428 | unsigned bsy :1; | ||
429 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
430 | unsigned bsy :1; | ||
431 | unsigned drdy :1; | ||
432 | unsigned df :1; | ||
433 | unsigned dsc :1; | ||
434 | unsigned drq :1; | ||
435 | unsigned corr :1; | ||
436 | unsigned idx :1; | ||
437 | unsigned check :1; | ||
438 | #else | ||
439 | #error "Please fix <asm/byteorder.h>" | ||
440 | #endif | ||
441 | } b; | ||
442 | } ata_status_t, atapi_status_t; | ||
443 | |||
444 | /* | ||
445 | * ATAPI Feature Register | ||
446 | * | ||
447 | * dma : Using DMA or PIO | ||
448 | * reserved321 : Reserved | ||
449 | * reserved654 : Reserved (Tag Type) | ||
450 | * reserved7 : Reserved | ||
451 | */ | ||
452 | typedef union { | ||
453 | unsigned all :8; | ||
454 | struct { | ||
455 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
456 | unsigned dma :1; | ||
457 | unsigned reserved321 :3; | ||
458 | unsigned reserved654 :3; | ||
459 | unsigned reserved7 :1; | ||
460 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
461 | unsigned reserved7 :1; | ||
462 | unsigned reserved654 :3; | ||
463 | unsigned reserved321 :3; | ||
464 | unsigned dma :1; | ||
465 | #else | ||
466 | #error "Please fix <asm/byteorder.h>" | ||
467 | #endif | ||
468 | } b; | ||
469 | } atapi_feature_t; | ||
470 | |||
471 | /* | ||
472 | * ATAPI Interrupt Reason Register. | ||
473 | * | ||
474 | * cod : Information transferred is command (1) or data (0) | ||
475 | * io : The device requests us to read (1) or write (0) | ||
476 | * reserved : Reserved | ||
477 | */ | ||
478 | typedef union { | ||
479 | unsigned all :8; | ||
480 | struct { | ||
481 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
482 | unsigned cod :1; | ||
483 | unsigned io :1; | ||
484 | unsigned reserved :6; | ||
485 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
486 | unsigned reserved :6; | ||
487 | unsigned io :1; | ||
488 | unsigned cod :1; | ||
489 | #else | ||
490 | #error "Please fix <asm/byteorder.h>" | ||
491 | #endif | ||
492 | } b; | ||
493 | } atapi_ireason_t; | ||
494 | |||
495 | /* | ||
496 | * The ATAPI error register. | ||
497 | * | ||
498 | * ili : Illegal Length Indication | ||
499 | * eom : End Of Media Detected | ||
500 | * abrt : Aborted command - As defined by ATA | ||
501 | * mcr : Media Change Requested - As defined by ATA | ||
502 | * sense_key : Sense key of the last failed packet command | ||
503 | */ | ||
504 | typedef union { | ||
505 | unsigned all :8; | ||
506 | struct { | ||
507 | #if defined(__LITTLE_ENDIAN_BITFIELD) | ||
508 | unsigned ili :1; | ||
509 | unsigned eom :1; | ||
510 | unsigned abrt :1; | ||
511 | unsigned mcr :1; | ||
512 | unsigned sense_key :4; | ||
513 | #elif defined(__BIG_ENDIAN_BITFIELD) | ||
514 | unsigned sense_key :4; | ||
515 | unsigned mcr :1; | ||
516 | unsigned abrt :1; | ||
517 | unsigned eom :1; | ||
518 | unsigned ili :1; | ||
519 | #else | ||
520 | #error "Please fix <asm/byteorder.h>" | ||
521 | #endif | ||
522 | } b; | ||
523 | } atapi_error_t; | ||
524 | |||
525 | /* | ||
526 | * Status returned from various ide_ functions | 349 | * Status returned from various ide_ functions |
527 | */ | 350 | */ |
528 | typedef enum { | 351 | typedef enum { |
@@ -701,8 +524,6 @@ typedef struct hwif_s { | |||
701 | void (*pre_reset)(ide_drive_t *); | 524 | void (*pre_reset)(ide_drive_t *); |
702 | /* routine to reset controller after a disk reset */ | 525 | /* routine to reset controller after a disk reset */ |
703 | void (*resetproc)(ide_drive_t *); | 526 | void (*resetproc)(ide_drive_t *); |
704 | /* special interrupt handling for shared pci interrupts */ | ||
705 | void (*intrproc)(ide_drive_t *); | ||
706 | /* special host masking for drive selection */ | 527 | /* special host masking for drive selection */ |
707 | void (*maskproc)(ide_drive_t *, int); | 528 | void (*maskproc)(ide_drive_t *, int); |
708 | /* check host's drive quirk list */ | 529 | /* check host's drive quirk list */ |
@@ -766,7 +587,6 @@ typedef struct hwif_s { | |||
766 | int rqsize; /* max sectors per request */ | 587 | int rqsize; /* max sectors per request */ |
767 | int irq; /* our irq number */ | 588 | int irq; /* our irq number */ |
768 | 589 | ||
769 | unsigned long dma_master; /* reference base addr dmabase */ | ||
770 | unsigned long dma_base; /* base addr for dma ports */ | 590 | unsigned long dma_base; /* base addr for dma ports */ |
771 | unsigned long dma_command; /* dma command register */ | 591 | unsigned long dma_command; /* dma command register */ |
772 | unsigned long dma_vendor1; /* dma vendor 1 register */ | 592 | unsigned long dma_vendor1; /* dma vendor 1 register */ |
@@ -806,7 +626,6 @@ typedef struct hwif_s { | |||
806 | /* | 626 | /* |
807 | * internal ide interrupt handler type | 627 | * internal ide interrupt handler type |
808 | */ | 628 | */ |
809 | typedef ide_startstop_t (ide_pre_handler_t)(ide_drive_t *, struct request *); | ||
810 | typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); | 629 | typedef ide_startstop_t (ide_handler_t)(ide_drive_t *); |
811 | typedef int (ide_expiry_t)(ide_drive_t *); | 630 | typedef int (ide_expiry_t)(ide_drive_t *); |
812 | 631 | ||
@@ -1020,7 +839,8 @@ int ide_end_dequeued_request(ide_drive_t *drive, struct request *rq, | |||
1020 | 839 | ||
1021 | extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); | 840 | extern void ide_set_handler (ide_drive_t *drive, ide_handler_t *handler, unsigned int timeout, ide_expiry_t *expiry); |
1022 | 841 | ||
1023 | extern void ide_execute_command(ide_drive_t *, task_ioreg_t cmd, ide_handler_t *, unsigned int, ide_expiry_t *); | 842 | void ide_execute_command(ide_drive_t *, u8, ide_handler_t *, unsigned int, |
843 | ide_expiry_t *); | ||
1024 | 844 | ||
1025 | ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); | 845 | ide_startstop_t __ide_error(ide_drive_t *, struct request *, u8, u8); |
1026 | 846 | ||
@@ -1062,52 +882,114 @@ extern void ide_end_drive_cmd(ide_drive_t *, u8, u8); | |||
1062 | */ | 882 | */ |
1063 | extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *); | 883 | extern int ide_wait_cmd(ide_drive_t *, u8, u8, u8, u8, u8 *); |
1064 | 884 | ||
885 | enum { | ||
886 | IDE_TFLAG_LBA48 = (1 << 0), | ||
887 | IDE_TFLAG_NO_SELECT_MASK = (1 << 1), | ||
888 | IDE_TFLAG_FLAGGED = (1 << 2), | ||
889 | IDE_TFLAG_OUT_DATA = (1 << 3), | ||
890 | IDE_TFLAG_OUT_HOB_FEATURE = (1 << 4), | ||
891 | IDE_TFLAG_OUT_HOB_NSECT = (1 << 5), | ||
892 | IDE_TFLAG_OUT_HOB_LBAL = (1 << 6), | ||
893 | IDE_TFLAG_OUT_HOB_LBAM = (1 << 7), | ||
894 | IDE_TFLAG_OUT_HOB_LBAH = (1 << 8), | ||
895 | IDE_TFLAG_OUT_HOB = IDE_TFLAG_OUT_HOB_FEATURE | | ||
896 | IDE_TFLAG_OUT_HOB_NSECT | | ||
897 | IDE_TFLAG_OUT_HOB_LBAL | | ||
898 | IDE_TFLAG_OUT_HOB_LBAM | | ||
899 | IDE_TFLAG_OUT_HOB_LBAH, | ||
900 | IDE_TFLAG_OUT_FEATURE = (1 << 9), | ||
901 | IDE_TFLAG_OUT_NSECT = (1 << 10), | ||
902 | IDE_TFLAG_OUT_LBAL = (1 << 11), | ||
903 | IDE_TFLAG_OUT_LBAM = (1 << 12), | ||
904 | IDE_TFLAG_OUT_LBAH = (1 << 13), | ||
905 | IDE_TFLAG_OUT_TF = IDE_TFLAG_OUT_FEATURE | | ||
906 | IDE_TFLAG_OUT_NSECT | | ||
907 | IDE_TFLAG_OUT_LBAL | | ||
908 | IDE_TFLAG_OUT_LBAM | | ||
909 | IDE_TFLAG_OUT_LBAH, | ||
910 | IDE_TFLAG_OUT_DEVICE = (1 << 14), | ||
911 | IDE_TFLAG_WRITE = (1 << 15), | ||
912 | IDE_TFLAG_FLAGGED_SET_IN_FLAGS = (1 << 16), | ||
913 | IDE_TFLAG_IN_DATA = (1 << 17), | ||
914 | IDE_TFLAG_CUSTOM_HANDLER = (1 << 18), | ||
915 | IDE_TFLAG_DMA_PIO_FALLBACK = (1 << 19), | ||
916 | IDE_TFLAG_IN_HOB_FEATURE = (1 << 20), | ||
917 | IDE_TFLAG_IN_HOB_NSECT = (1 << 21), | ||
918 | IDE_TFLAG_IN_HOB_LBAL = (1 << 22), | ||
919 | IDE_TFLAG_IN_HOB_LBAM = (1 << 23), | ||
920 | IDE_TFLAG_IN_HOB_LBAH = (1 << 24), | ||
921 | IDE_TFLAG_IN_HOB_LBA = IDE_TFLAG_IN_HOB_LBAL | | ||
922 | IDE_TFLAG_IN_HOB_LBAM | | ||
923 | IDE_TFLAG_IN_HOB_LBAH, | ||
924 | IDE_TFLAG_IN_HOB = IDE_TFLAG_IN_HOB_FEATURE | | ||
925 | IDE_TFLAG_IN_HOB_NSECT | | ||
926 | IDE_TFLAG_IN_HOB_LBA, | ||
927 | IDE_TFLAG_IN_NSECT = (1 << 25), | ||
928 | IDE_TFLAG_IN_LBAL = (1 << 26), | ||
929 | IDE_TFLAG_IN_LBAM = (1 << 27), | ||
930 | IDE_TFLAG_IN_LBAH = (1 << 28), | ||
931 | IDE_TFLAG_IN_LBA = IDE_TFLAG_IN_LBAL | | ||
932 | IDE_TFLAG_IN_LBAM | | ||
933 | IDE_TFLAG_IN_LBAH, | ||
934 | IDE_TFLAG_IN_TF = IDE_TFLAG_IN_NSECT | | ||
935 | IDE_TFLAG_IN_LBA, | ||
936 | IDE_TFLAG_IN_DEVICE = (1 << 29), | ||
937 | }; | ||
938 | |||
939 | struct ide_taskfile { | ||
940 | u8 hob_data; /* 0: high data byte (for TASKFILE IOCTL) */ | ||
941 | |||
942 | u8 hob_feature; /* 1-5: additional data to support LBA48 */ | ||
943 | u8 hob_nsect; | ||
944 | u8 hob_lbal; | ||
945 | u8 hob_lbam; | ||
946 | u8 hob_lbah; | ||
947 | |||
948 | u8 data; /* 6: low data byte (for TASKFILE IOCTL) */ | ||
949 | |||
950 | union { /* Â 7: */ | ||
951 | u8 error; /* read: error */ | ||
952 | u8 feature; /* write: feature */ | ||
953 | }; | ||
954 | |||
955 | u8 nsect; /* 8: number of sectors */ | ||
956 | u8 lbal; /* 9: LBA low */ | ||
957 | u8 lbam; /* 10: LBA mid */ | ||
958 | u8 lbah; /* 11: LBA high */ | ||
959 | |||
960 | u8 device; /* 12: device select */ | ||
961 | |||
962 | union { /* 13: */ | ||
963 | u8 status; /*  read: status  */ | ||
964 | u8 command; /* write: command */ | ||
965 | }; | ||
966 | }; | ||
967 | |||
1065 | typedef struct ide_task_s { | 968 | typedef struct ide_task_s { |
1066 | /* | 969 | union { |
1067 | * struct hd_drive_task_hdr tf; | 970 | struct ide_taskfile tf; |
1068 | * task_struct_t tf; | 971 | u8 tf_array[14]; |
1069 | * struct hd_drive_hob_hdr hobf; | 972 | }; |
1070 | * hob_struct_t hobf; | 973 | u32 tf_flags; |
1071 | */ | ||
1072 | task_ioreg_t tfRegister[8]; | ||
1073 | task_ioreg_t hobRegister[8]; | ||
1074 | ide_reg_valid_t tf_out_flags; | ||
1075 | ide_reg_valid_t tf_in_flags; | ||
1076 | int data_phase; | 974 | int data_phase; |
1077 | int command_type; | ||
1078 | ide_pre_handler_t *prehandler; | ||
1079 | ide_handler_t *handler; | ||
1080 | struct request *rq; /* copy of request */ | 975 | struct request *rq; /* copy of request */ |
1081 | void *special; /* valid_t generally */ | 976 | void *special; /* valid_t generally */ |
1082 | } ide_task_t; | 977 | } ide_task_t; |
1083 | 978 | ||
1084 | extern u32 ide_read_24(ide_drive_t *); | 979 | void ide_tf_load(ide_drive_t *, ide_task_t *); |
980 | void ide_tf_read(ide_drive_t *, ide_task_t *); | ||
1085 | 981 | ||
1086 | extern void SELECT_DRIVE(ide_drive_t *); | 982 | extern void SELECT_DRIVE(ide_drive_t *); |
1087 | extern void SELECT_INTERRUPT(ide_drive_t *); | ||
1088 | extern void SELECT_MASK(ide_drive_t *, int); | 983 | extern void SELECT_MASK(ide_drive_t *, int); |
1089 | extern void QUIRK_LIST(ide_drive_t *); | ||
1090 | 984 | ||
1091 | extern int drive_is_ready(ide_drive_t *); | 985 | extern int drive_is_ready(ide_drive_t *); |
1092 | 986 | ||
1093 | /* | 987 | void ide_pktcmd_tf_load(ide_drive_t *, u32, u16, u8); |
1094 | * taskfile io for disks for now...and builds request from ide_ioctl | ||
1095 | */ | ||
1096 | extern ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); | ||
1097 | |||
1098 | /* | ||
1099 | * Special Flagged Register Validation Caller | ||
1100 | */ | ||
1101 | extern ide_startstop_t flagged_taskfile(ide_drive_t *, ide_task_t *); | ||
1102 | 988 | ||
1103 | extern ide_startstop_t set_multmode_intr(ide_drive_t *); | 989 | ide_startstop_t do_rw_taskfile(ide_drive_t *, ide_task_t *); |
1104 | extern ide_startstop_t set_geometry_intr(ide_drive_t *); | ||
1105 | extern ide_startstop_t recal_intr(ide_drive_t *); | ||
1106 | extern ide_startstop_t task_no_data_intr(ide_drive_t *); | ||
1107 | extern ide_startstop_t task_in_intr(ide_drive_t *); | ||
1108 | extern ide_startstop_t pre_task_out_intr(ide_drive_t *, struct request *); | ||
1109 | 990 | ||
1110 | extern int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *); | 991 | int ide_raw_taskfile(ide_drive_t *, ide_task_t *, u8 *, u16); |
992 | int ide_no_data_taskfile(ide_drive_t *, ide_task_t *); | ||
1111 | 993 | ||
1112 | int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); | 994 | int ide_taskfile_ioctl(ide_drive_t *, unsigned int, unsigned long); |
1113 | int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); | 995 | int ide_cmd_ioctl(ide_drive_t *, unsigned int, unsigned long); |
@@ -1212,6 +1094,7 @@ enum { | |||
1212 | IDE_HFLAG_IO_32BIT = (1 << 24), | 1094 | IDE_HFLAG_IO_32BIT = (1 << 24), |
1213 | /* unmask IRQs */ | 1095 | /* unmask IRQs */ |
1214 | IDE_HFLAG_UNMASK_IRQS = (1 << 25), | 1096 | IDE_HFLAG_UNMASK_IRQS = (1 << 25), |
1097 | IDE_HFLAG_ABUSE_SET_DMA_MODE = (1 << 26), | ||
1215 | }; | 1098 | }; |
1216 | 1099 | ||
1217 | #ifdef CONFIG_BLK_DEV_OFFBOARD | 1100 | #ifdef CONFIG_BLK_DEV_OFFBOARD |
@@ -1229,7 +1112,7 @@ struct ide_port_info { | |||
1229 | void (*fixup)(ide_hwif_t *); | 1112 | void (*fixup)(ide_hwif_t *); |
1230 | ide_pci_enablebit_t enablebits[2]; | 1113 | ide_pci_enablebit_t enablebits[2]; |
1231 | hwif_chipset_t chipset; | 1114 | hwif_chipset_t chipset; |
1232 | unsigned int extra; | 1115 | u8 extra; |
1233 | u32 host_flags; | 1116 | u32 host_flags; |
1234 | u8 pio_mask; | 1117 | u8 pio_mask; |
1235 | u8 swdma_mask; | 1118 | u8 swdma_mask; |
@@ -1356,6 +1239,7 @@ static inline int ide_dev_is_sata(struct hd_driveid *id) | |||
1356 | return 0; | 1239 | return 0; |
1357 | } | 1240 | } |
1358 | 1241 | ||
1242 | u64 ide_get_lba_addr(struct ide_taskfile *, int); | ||
1359 | u8 ide_dump_status(ide_drive_t *, const char *, u8); | 1243 | u8 ide_dump_status(ide_drive_t *, const char *, u8); |
1360 | 1244 | ||
1361 | typedef struct ide_pio_timings_s { | 1245 | typedef struct ide_pio_timings_s { |
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index cae35b6b9aec..796019b22b6f 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -132,9 +132,12 @@ extern struct group_info init_groups; | |||
132 | .cpus_allowed = CPU_MASK_ALL, \ | 132 | .cpus_allowed = CPU_MASK_ALL, \ |
133 | .mm = NULL, \ | 133 | .mm = NULL, \ |
134 | .active_mm = &init_mm, \ | 134 | .active_mm = &init_mm, \ |
135 | .run_list = LIST_HEAD_INIT(tsk.run_list), \ | 135 | .rt = { \ |
136 | .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ | ||
137 | .time_slice = HZ, \ | ||
138 | .nr_cpus_allowed = NR_CPUS, \ | ||
139 | }, \ | ||
136 | .ioprio = 0, \ | 140 | .ioprio = 0, \ |
137 | .time_slice = HZ, \ | ||
138 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ | 141 | .tasks = LIST_HEAD_INIT(tsk.tasks), \ |
139 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ | 142 | .ptrace_children= LIST_HEAD_INIT(tsk.ptrace_children), \ |
140 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ | 143 | .ptrace_list = LIST_HEAD_INIT(tsk.ptrace_list), \ |
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 2306920fa388..c3db4a00f1fa 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h | |||
@@ -256,6 +256,7 @@ enum | |||
256 | #ifdef CONFIG_HIGH_RES_TIMERS | 256 | #ifdef CONFIG_HIGH_RES_TIMERS |
257 | HRTIMER_SOFTIRQ, | 257 | HRTIMER_SOFTIRQ, |
258 | #endif | 258 | #endif |
259 | RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ | ||
259 | }; | 260 | }; |
260 | 261 | ||
261 | /* softirq mask and active fields moved to irq_cpustat_t in | 262 | /* softirq mask and active fields moved to irq_cpustat_t in |
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h index 8b080024bbc1..7ba9e47bf061 100644 --- a/include/linux/jiffies.h +++ b/include/linux/jiffies.h | |||
@@ -29,6 +29,12 @@ | |||
29 | # define SHIFT_HZ 9 | 29 | # define SHIFT_HZ 9 |
30 | #elif HZ >= 768 && HZ < 1536 | 30 | #elif HZ >= 768 && HZ < 1536 |
31 | # define SHIFT_HZ 10 | 31 | # define SHIFT_HZ 10 |
32 | #elif HZ >= 1536 && HZ < 3072 | ||
33 | # define SHIFT_HZ 11 | ||
34 | #elif HZ >= 3072 && HZ < 6144 | ||
35 | # define SHIFT_HZ 12 | ||
36 | #elif HZ >= 6144 && HZ < 12288 | ||
37 | # define SHIFT_HZ 13 | ||
32 | #else | 38 | #else |
33 | # error You lose. | 39 | # error You lose. |
34 | #endif | 40 | #endif |
diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 94bc99656963..a7283c9beadf 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h | |||
@@ -105,8 +105,8 @@ struct user; | |||
105 | * supposed to. | 105 | * supposed to. |
106 | */ | 106 | */ |
107 | #ifdef CONFIG_PREEMPT_VOLUNTARY | 107 | #ifdef CONFIG_PREEMPT_VOLUNTARY |
108 | extern int cond_resched(void); | 108 | extern int _cond_resched(void); |
109 | # define might_resched() cond_resched() | 109 | # define might_resched() _cond_resched() |
110 | #else | 110 | #else |
111 | # define might_resched() do { } while (0) | 111 | # define might_resched() do { } while (0) |
112 | #endif | 112 | #endif |
diff --git a/include/linux/latencytop.h b/include/linux/latencytop.h new file mode 100644 index 000000000000..901c2d6377a8 --- /dev/null +++ b/include/linux/latencytop.h | |||
@@ -0,0 +1,44 @@ | |||
1 | /* | ||
2 | * latencytop.h: Infrastructure for displaying latency | ||
3 | * | ||
4 | * (C) Copyright 2008 Intel Corporation | ||
5 | * Author: Arjan van de Ven <arjan@linux.intel.com> | ||
6 | * | ||
7 | */ | ||
8 | |||
9 | #ifndef _INCLUDE_GUARD_LATENCYTOP_H_ | ||
10 | #define _INCLUDE_GUARD_LATENCYTOP_H_ | ||
11 | |||
12 | #ifdef CONFIG_LATENCYTOP | ||
13 | |||
14 | #define LT_SAVECOUNT 32 | ||
15 | #define LT_BACKTRACEDEPTH 12 | ||
16 | |||
17 | struct latency_record { | ||
18 | unsigned long backtrace[LT_BACKTRACEDEPTH]; | ||
19 | unsigned int count; | ||
20 | unsigned long time; | ||
21 | unsigned long max; | ||
22 | }; | ||
23 | |||
24 | |||
25 | struct task_struct; | ||
26 | |||
27 | void account_scheduler_latency(struct task_struct *task, int usecs, int inter); | ||
28 | |||
29 | void clear_all_latency_tracing(struct task_struct *p); | ||
30 | |||
31 | #else | ||
32 | |||
33 | static inline void | ||
34 | account_scheduler_latency(struct task_struct *task, int usecs, int inter) | ||
35 | { | ||
36 | } | ||
37 | |||
38 | static inline void clear_all_latency_tracing(struct task_struct *p) | ||
39 | { | ||
40 | } | ||
41 | |||
42 | #endif | ||
43 | |||
44 | #endif | ||
diff --git a/include/linux/notifier.h b/include/linux/notifier.h index 0c40cc0b4a36..5dfbc684ce7d 100644 --- a/include/linux/notifier.h +++ b/include/linux/notifier.h | |||
@@ -207,9 +207,7 @@ static inline int notifier_to_errno(int ret) | |||
207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ | 207 | #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */ |
208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ | 208 | #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */ |
209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ | 209 | #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */ |
210 | #define CPU_LOCK_ACQUIRE 0x0008 /* Acquire all hotcpu locks */ | 210 | #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task, |
211 | #define CPU_LOCK_RELEASE 0x0009 /* Release all hotcpu locks */ | ||
212 | #define CPU_DYING 0x000A /* CPU (unsigned)v not running any task, | ||
213 | * not handling interrupts, soon dead */ | 211 | * not handling interrupts, soon dead */ |
214 | 212 | ||
215 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend | 213 | /* Used for CPU hotplug events occuring while tasks are frozen due to a suspend |
diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h new file mode 100644 index 000000000000..4d6624260b4c --- /dev/null +++ b/include/linux/rcuclassic.h | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (classic version) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright IBM Corporation, 2001 | ||
19 | * | ||
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paulmck@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUCLASSIC_H | ||
34 | #define __LINUX_RCUCLASSIC_H | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/threads.h> | ||
41 | #include <linux/percpu.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/seqlock.h> | ||
44 | |||
45 | |||
46 | /* Global control variables for rcupdate callback mechanism. */ | ||
47 | struct rcu_ctrlblk { | ||
48 | long cur; /* Current batch number. */ | ||
49 | long completed; /* Number of the last completed batch */ | ||
50 | int next_pending; /* Is the next batch already waiting? */ | ||
51 | |||
52 | int signaled; | ||
53 | |||
54 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
55 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
56 | /* for current batch to proceed. */ | ||
57 | } ____cacheline_internodealigned_in_smp; | ||
58 | |||
59 | /* Is batch a before batch b ? */ | ||
60 | static inline int rcu_batch_before(long a, long b) | ||
61 | { | ||
62 | return (a - b) < 0; | ||
63 | } | ||
64 | |||
65 | /* Is batch a after batch b ? */ | ||
66 | static inline int rcu_batch_after(long a, long b) | ||
67 | { | ||
68 | return (a - b) > 0; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Per-CPU data for Read-Copy UPdate. | ||
73 | * nxtlist - new callbacks are added here | ||
74 | * curlist - current batch for which quiescent cycle started if any | ||
75 | */ | ||
76 | struct rcu_data { | ||
77 | /* 1) quiescent state handling : */ | ||
78 | long quiescbatch; /* Batch # for grace period */ | ||
79 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
80 | int qs_pending; /* core waits for quiesc state */ | ||
81 | |||
82 | /* 2) batch handling */ | ||
83 | long batch; /* Batch # for current RCU batch */ | ||
84 | struct rcu_head *nxtlist; | ||
85 | struct rcu_head **nxttail; | ||
86 | long qlen; /* # of queued callbacks */ | ||
87 | struct rcu_head *curlist; | ||
88 | struct rcu_head **curtail; | ||
89 | struct rcu_head *donelist; | ||
90 | struct rcu_head **donetail; | ||
91 | long blimit; /* Upper limit on a processed batch */ | ||
92 | int cpu; | ||
93 | struct rcu_head barrier; | ||
94 | }; | ||
95 | |||
96 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
97 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
98 | |||
99 | /* | ||
100 | * Increment the quiescent state counter. | ||
101 | * The counter is a bit degenerated: We do not need to know | ||
102 | * how many quiescent states passed, just if there was at least | ||
103 | * one since the start of the grace period. Thus just a flag. | ||
104 | */ | ||
105 | static inline void rcu_qsctr_inc(int cpu) | ||
106 | { | ||
107 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
108 | rdp->passed_quiesc = 1; | ||
109 | } | ||
110 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
111 | { | ||
112 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
113 | rdp->passed_quiesc = 1; | ||
114 | } | ||
115 | |||
116 | extern int rcu_pending(int cpu); | ||
117 | extern int rcu_needs_cpu(int cpu); | ||
118 | |||
119 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
120 | extern struct lockdep_map rcu_lock_map; | ||
121 | # define rcu_read_acquire() \ | ||
122 | lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
123 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
124 | #else | ||
125 | # define rcu_read_acquire() do { } while (0) | ||
126 | # define rcu_read_release() do { } while (0) | ||
127 | #endif | ||
128 | |||
129 | #define __rcu_read_lock() \ | ||
130 | do { \ | ||
131 | preempt_disable(); \ | ||
132 | __acquire(RCU); \ | ||
133 | rcu_read_acquire(); \ | ||
134 | } while (0) | ||
135 | #define __rcu_read_unlock() \ | ||
136 | do { \ | ||
137 | rcu_read_release(); \ | ||
138 | __release(RCU); \ | ||
139 | preempt_enable(); \ | ||
140 | } while (0) | ||
141 | #define __rcu_read_lock_bh() \ | ||
142 | do { \ | ||
143 | local_bh_disable(); \ | ||
144 | __acquire(RCU_BH); \ | ||
145 | rcu_read_acquire(); \ | ||
146 | } while (0) | ||
147 | #define __rcu_read_unlock_bh() \ | ||
148 | do { \ | ||
149 | rcu_read_release(); \ | ||
150 | __release(RCU_BH); \ | ||
151 | local_bh_enable(); \ | ||
152 | } while (0) | ||
153 | |||
154 | #define __synchronize_sched() synchronize_rcu() | ||
155 | |||
156 | extern void __rcu_init(void); | ||
157 | extern void rcu_check_callbacks(int cpu, int user); | ||
158 | extern void rcu_restart_cpu(int cpu); | ||
159 | |||
160 | extern long rcu_batches_completed(void); | ||
161 | extern long rcu_batches_completed_bh(void); | ||
162 | |||
163 | #endif /* __KERNEL__ */ | ||
164 | #endif /* __LINUX_RCUCLASSIC_H */ | ||
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index cc24a01df940..d32c14de270e 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h | |||
@@ -15,7 +15,7 @@ | |||
15 | * along with this program; if not, write to the Free Software | 15 | * along with this program; if not, write to the Free Software |
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | 16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. |
17 | * | 17 | * |
18 | * Copyright (C) IBM Corporation, 2001 | 18 | * Copyright IBM Corporation, 2001 |
19 | * | 19 | * |
20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> | 20 | * Author: Dipankar Sarma <dipankar@in.ibm.com> |
21 | * | 21 | * |
@@ -53,96 +53,18 @@ struct rcu_head { | |||
53 | void (*func)(struct rcu_head *head); | 53 | void (*func)(struct rcu_head *head); |
54 | }; | 54 | }; |
55 | 55 | ||
56 | #ifdef CONFIG_CLASSIC_RCU | ||
57 | #include <linux/rcuclassic.h> | ||
58 | #else /* #ifdef CONFIG_CLASSIC_RCU */ | ||
59 | #include <linux/rcupreempt.h> | ||
60 | #endif /* #else #ifdef CONFIG_CLASSIC_RCU */ | ||
61 | |||
56 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } | 62 | #define RCU_HEAD_INIT { .next = NULL, .func = NULL } |
57 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT | 63 | #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT |
58 | #define INIT_RCU_HEAD(ptr) do { \ | 64 | #define INIT_RCU_HEAD(ptr) do { \ |
59 | (ptr)->next = NULL; (ptr)->func = NULL; \ | 65 | (ptr)->next = NULL; (ptr)->func = NULL; \ |
60 | } while (0) | 66 | } while (0) |
61 | 67 | ||
62 | |||
63 | |||
64 | /* Global control variables for rcupdate callback mechanism. */ | ||
65 | struct rcu_ctrlblk { | ||
66 | long cur; /* Current batch number. */ | ||
67 | long completed; /* Number of the last completed batch */ | ||
68 | int next_pending; /* Is the next batch already waiting? */ | ||
69 | |||
70 | int signaled; | ||
71 | |||
72 | spinlock_t lock ____cacheline_internodealigned_in_smp; | ||
73 | cpumask_t cpumask; /* CPUs that need to switch in order */ | ||
74 | /* for current batch to proceed. */ | ||
75 | } ____cacheline_internodealigned_in_smp; | ||
76 | |||
77 | /* Is batch a before batch b ? */ | ||
78 | static inline int rcu_batch_before(long a, long b) | ||
79 | { | ||
80 | return (a - b) < 0; | ||
81 | } | ||
82 | |||
83 | /* Is batch a after batch b ? */ | ||
84 | static inline int rcu_batch_after(long a, long b) | ||
85 | { | ||
86 | return (a - b) > 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Per-CPU data for Read-Copy UPdate. | ||
91 | * nxtlist - new callbacks are added here | ||
92 | * curlist - current batch for which quiescent cycle started if any | ||
93 | */ | ||
94 | struct rcu_data { | ||
95 | /* 1) quiescent state handling : */ | ||
96 | long quiescbatch; /* Batch # for grace period */ | ||
97 | int passed_quiesc; /* User-mode/idle loop etc. */ | ||
98 | int qs_pending; /* core waits for quiesc state */ | ||
99 | |||
100 | /* 2) batch handling */ | ||
101 | long batch; /* Batch # for current RCU batch */ | ||
102 | struct rcu_head *nxtlist; | ||
103 | struct rcu_head **nxttail; | ||
104 | long qlen; /* # of queued callbacks */ | ||
105 | struct rcu_head *curlist; | ||
106 | struct rcu_head **curtail; | ||
107 | struct rcu_head *donelist; | ||
108 | struct rcu_head **donetail; | ||
109 | long blimit; /* Upper limit on a processed batch */ | ||
110 | int cpu; | ||
111 | struct rcu_head barrier; | ||
112 | }; | ||
113 | |||
114 | DECLARE_PER_CPU(struct rcu_data, rcu_data); | ||
115 | DECLARE_PER_CPU(struct rcu_data, rcu_bh_data); | ||
116 | |||
117 | /* | ||
118 | * Increment the quiescent state counter. | ||
119 | * The counter is a bit degenerated: We do not need to know | ||
120 | * how many quiescent states passed, just if there was at least | ||
121 | * one since the start of the grace period. Thus just a flag. | ||
122 | */ | ||
123 | static inline void rcu_qsctr_inc(int cpu) | ||
124 | { | ||
125 | struct rcu_data *rdp = &per_cpu(rcu_data, cpu); | ||
126 | rdp->passed_quiesc = 1; | ||
127 | } | ||
128 | static inline void rcu_bh_qsctr_inc(int cpu) | ||
129 | { | ||
130 | struct rcu_data *rdp = &per_cpu(rcu_bh_data, cpu); | ||
131 | rdp->passed_quiesc = 1; | ||
132 | } | ||
133 | |||
134 | extern int rcu_pending(int cpu); | ||
135 | extern int rcu_needs_cpu(int cpu); | ||
136 | |||
137 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
138 | extern struct lockdep_map rcu_lock_map; | ||
139 | # define rcu_read_acquire() lock_acquire(&rcu_lock_map, 0, 0, 2, 1, _THIS_IP_) | ||
140 | # define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) | ||
141 | #else | ||
142 | # define rcu_read_acquire() do { } while (0) | ||
143 | # define rcu_read_release() do { } while (0) | ||
144 | #endif | ||
145 | |||
146 | /** | 68 | /** |
147 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. | 69 | * rcu_read_lock - mark the beginning of an RCU read-side critical section. |
148 | * | 70 | * |
@@ -172,24 +94,13 @@ extern struct lockdep_map rcu_lock_map; | |||
172 | * | 94 | * |
173 | * It is illegal to block while in an RCU read-side critical section. | 95 | * It is illegal to block while in an RCU read-side critical section. |
174 | */ | 96 | */ |
175 | #define rcu_read_lock() \ | 97 | #define rcu_read_lock() __rcu_read_lock() |
176 | do { \ | ||
177 | preempt_disable(); \ | ||
178 | __acquire(RCU); \ | ||
179 | rcu_read_acquire(); \ | ||
180 | } while(0) | ||
181 | 98 | ||
182 | /** | 99 | /** |
183 | * rcu_read_unlock - marks the end of an RCU read-side critical section. | 100 | * rcu_read_unlock - marks the end of an RCU read-side critical section. |
184 | * | 101 | * |
185 | * See rcu_read_lock() for more information. | 102 | * See rcu_read_lock() for more information. |
186 | */ | 103 | */ |
187 | #define rcu_read_unlock() \ | ||
188 | do { \ | ||
189 | rcu_read_release(); \ | ||
190 | __release(RCU); \ | ||
191 | preempt_enable(); \ | ||
192 | } while(0) | ||
193 | 104 | ||
194 | /* | 105 | /* |
195 | * So where is rcu_write_lock()? It does not exist, as there is no | 106 | * So where is rcu_write_lock()? It does not exist, as there is no |
@@ -200,6 +111,7 @@ extern struct lockdep_map rcu_lock_map; | |||
200 | * used as well. RCU does not care how the writers keep out of each | 111 | * used as well. RCU does not care how the writers keep out of each |
201 | * others' way, as long as they do so. | 112 | * others' way, as long as they do so. |
202 | */ | 113 | */ |
114 | #define rcu_read_unlock() __rcu_read_unlock() | ||
203 | 115 | ||
204 | /** | 116 | /** |
205 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section | 117 | * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section |
@@ -212,24 +124,14 @@ extern struct lockdep_map rcu_lock_map; | |||
212 | * can use just rcu_read_lock(). | 124 | * can use just rcu_read_lock(). |
213 | * | 125 | * |
214 | */ | 126 | */ |
215 | #define rcu_read_lock_bh() \ | 127 | #define rcu_read_lock_bh() __rcu_read_lock_bh() |
216 | do { \ | ||
217 | local_bh_disable(); \ | ||
218 | __acquire(RCU_BH); \ | ||
219 | rcu_read_acquire(); \ | ||
220 | } while(0) | ||
221 | 128 | ||
222 | /* | 129 | /* |
223 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section | 130 | * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section |
224 | * | 131 | * |
225 | * See rcu_read_lock_bh() for more information. | 132 | * See rcu_read_lock_bh() for more information. |
226 | */ | 133 | */ |
227 | #define rcu_read_unlock_bh() \ | 134 | #define rcu_read_unlock_bh() __rcu_read_unlock_bh() |
228 | do { \ | ||
229 | rcu_read_release(); \ | ||
230 | __release(RCU_BH); \ | ||
231 | local_bh_enable(); \ | ||
232 | } while(0) | ||
233 | 135 | ||
234 | /* | 136 | /* |
235 | * Prevent the compiler from merging or refetching accesses. The compiler | 137 | * Prevent the compiler from merging or refetching accesses. The compiler |
@@ -293,21 +195,52 @@ extern struct lockdep_map rcu_lock_map; | |||
293 | * In "classic RCU", these two guarantees happen to be one and | 195 | * In "classic RCU", these two guarantees happen to be one and |
294 | * the same, but can differ in realtime RCU implementations. | 196 | * the same, but can differ in realtime RCU implementations. |
295 | */ | 197 | */ |
296 | #define synchronize_sched() synchronize_rcu() | 198 | #define synchronize_sched() __synchronize_sched() |
297 | 199 | ||
298 | extern void rcu_init(void); | 200 | /** |
299 | extern void rcu_check_callbacks(int cpu, int user); | 201 | * call_rcu - Queue an RCU callback for invocation after a grace period. |
300 | extern void rcu_restart_cpu(int cpu); | 202 | * @head: structure to be used for queueing the RCU updates. |
301 | extern long rcu_batches_completed(void); | 203 | * @func: actual update function to be invoked after the grace period |
302 | extern long rcu_batches_completed_bh(void); | 204 | * |
205 | * The update function will be invoked some time after a full grace | ||
206 | * period elapses, in other words after all currently executing RCU | ||
207 | * read-side critical sections have completed. RCU read-side critical | ||
208 | * sections are delimited by rcu_read_lock() and rcu_read_unlock(), | ||
209 | * and may be nested. | ||
210 | */ | ||
211 | extern void call_rcu(struct rcu_head *head, | ||
212 | void (*func)(struct rcu_head *head)); | ||
303 | 213 | ||
304 | /* Exported interfaces */ | 214 | /** |
305 | extern void FASTCALL(call_rcu(struct rcu_head *head, | 215 | * call_rcu_bh - Queue an RCU for invocation after a quicker grace period. |
306 | void (*func)(struct rcu_head *head))); | 216 | * @head: structure to be used for queueing the RCU updates. |
307 | extern void FASTCALL(call_rcu_bh(struct rcu_head *head, | 217 | * @func: actual update function to be invoked after the grace period |
308 | void (*func)(struct rcu_head *head))); | 218 | * |
219 | * The update function will be invoked some time after a full grace | ||
220 | * period elapses, in other words after all currently executing RCU | ||
221 | * read-side critical sections have completed. call_rcu_bh() assumes | ||
222 | * that the read-side critical sections end on completion of a softirq | ||
223 | * handler. This means that read-side critical sections in process | ||
224 | * context must not be interrupted by softirqs. This interface is to be | ||
225 | * used when most of the read-side critical sections are in softirq context. | ||
226 | * RCU read-side critical sections are delimited by : | ||
227 | * - rcu_read_lock() and rcu_read_unlock(), if in interrupt context. | ||
228 | * OR | ||
229 | * - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context. | ||
230 | * These may be nested. | ||
231 | */ | ||
232 | extern void call_rcu_bh(struct rcu_head *head, | ||
233 | void (*func)(struct rcu_head *head)); | ||
234 | |||
235 | /* Exported common interfaces */ | ||
309 | extern void synchronize_rcu(void); | 236 | extern void synchronize_rcu(void); |
310 | extern void rcu_barrier(void); | 237 | extern void rcu_barrier(void); |
238 | extern long rcu_batches_completed(void); | ||
239 | extern long rcu_batches_completed_bh(void); | ||
240 | |||
241 | /* Internal to kernel */ | ||
242 | extern void rcu_init(void); | ||
243 | extern int rcu_needs_cpu(int cpu); | ||
311 | 244 | ||
312 | #endif /* __KERNEL__ */ | 245 | #endif /* __KERNEL__ */ |
313 | #endif /* __LINUX_RCUPDATE_H */ | 246 | #endif /* __LINUX_RCUPDATE_H */ |
diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h new file mode 100644 index 000000000000..ece8eb3e4151 --- /dev/null +++ b/include/linux/rcupreempt.h | |||
@@ -0,0 +1,86 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of Read-Copy Update mechanism see - | ||
29 | * Documentation/RCU | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #ifndef __LINUX_RCUPREEMPT_H | ||
34 | #define __LINUX_RCUPREEMPT_H | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/cache.h> | ||
39 | #include <linux/spinlock.h> | ||
40 | #include <linux/threads.h> | ||
41 | #include <linux/percpu.h> | ||
42 | #include <linux/cpumask.h> | ||
43 | #include <linux/seqlock.h> | ||
44 | |||
45 | #define rcu_qsctr_inc(cpu) | ||
46 | #define rcu_bh_qsctr_inc(cpu) | ||
47 | #define call_rcu_bh(head, rcu) call_rcu(head, rcu) | ||
48 | |||
49 | extern void __rcu_read_lock(void); | ||
50 | extern void __rcu_read_unlock(void); | ||
51 | extern int rcu_pending(int cpu); | ||
52 | extern int rcu_needs_cpu(int cpu); | ||
53 | |||
54 | #define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } | ||
55 | #define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } | ||
56 | |||
57 | extern void __synchronize_sched(void); | ||
58 | |||
59 | extern void __rcu_init(void); | ||
60 | extern void rcu_check_callbacks(int cpu, int user); | ||
61 | extern void rcu_restart_cpu(int cpu); | ||
62 | extern long rcu_batches_completed(void); | ||
63 | |||
64 | /* | ||
65 | * Return the number of RCU batches processed thus far. Useful for debug | ||
66 | * and statistic. The _bh variant is identifcal to straight RCU | ||
67 | */ | ||
68 | static inline long rcu_batches_completed_bh(void) | ||
69 | { | ||
70 | return rcu_batches_completed(); | ||
71 | } | ||
72 | |||
73 | #ifdef CONFIG_RCU_TRACE | ||
74 | struct rcupreempt_trace; | ||
75 | extern long *rcupreempt_flipctr(int cpu); | ||
76 | extern long rcupreempt_data_completed(void); | ||
77 | extern int rcupreempt_flip_flag(int cpu); | ||
78 | extern int rcupreempt_mb_flag(int cpu); | ||
79 | extern char *rcupreempt_try_flip_state_name(void); | ||
80 | extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); | ||
81 | #endif | ||
82 | |||
83 | struct softirq_action; | ||
84 | |||
85 | #endif /* __KERNEL__ */ | ||
86 | #endif /* __LINUX_RCUPREEMPT_H */ | ||
diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h new file mode 100644 index 000000000000..21cd6b2a5c42 --- /dev/null +++ b/include/linux/rcupreempt_trace.h | |||
@@ -0,0 +1,99 @@ | |||
1 | /* | ||
2 | * Read-Copy Update mechanism for mutual exclusion (RT implementation) | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License as published by | ||
6 | * the Free Software Foundation; either version 2 of the License, or | ||
7 | * (at your option) any later version. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | * GNU General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program; if not, write to the Free Software | ||
16 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
17 | * | ||
18 | * Copyright (C) IBM Corporation, 2006 | ||
19 | * | ||
20 | * Author: Paul McKenney <paulmck@us.ibm.com> | ||
21 | * | ||
22 | * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> | ||
23 | * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. | ||
24 | * Papers: | ||
25 | * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf | ||
26 | * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) | ||
27 | * | ||
28 | * For detailed explanation of the Preemptible Read-Copy Update mechanism see - | ||
29 | * http://lwn.net/Articles/253651/ | ||
30 | */ | ||
31 | |||
32 | #ifndef __LINUX_RCUPREEMPT_TRACE_H | ||
33 | #define __LINUX_RCUPREEMPT_TRACE_H | ||
34 | |||
35 | #ifdef __KERNEL__ | ||
36 | #include <linux/types.h> | ||
37 | #include <linux/kernel.h> | ||
38 | |||
39 | #include <asm/atomic.h> | ||
40 | |||
41 | /* | ||
42 | * PREEMPT_RCU data structures. | ||
43 | */ | ||
44 | |||
45 | struct rcupreempt_trace { | ||
46 | long next_length; | ||
47 | long next_add; | ||
48 | long wait_length; | ||
49 | long wait_add; | ||
50 | long done_length; | ||
51 | long done_add; | ||
52 | long done_remove; | ||
53 | atomic_t done_invoked; | ||
54 | long rcu_check_callbacks; | ||
55 | atomic_t rcu_try_flip_1; | ||
56 | atomic_t rcu_try_flip_e1; | ||
57 | long rcu_try_flip_i1; | ||
58 | long rcu_try_flip_ie1; | ||
59 | long rcu_try_flip_g1; | ||
60 | long rcu_try_flip_a1; | ||
61 | long rcu_try_flip_ae1; | ||
62 | long rcu_try_flip_a2; | ||
63 | long rcu_try_flip_z1; | ||
64 | long rcu_try_flip_ze1; | ||
65 | long rcu_try_flip_z2; | ||
66 | long rcu_try_flip_m1; | ||
67 | long rcu_try_flip_me1; | ||
68 | long rcu_try_flip_m2; | ||
69 | }; | ||
70 | |||
71 | #ifdef CONFIG_RCU_TRACE | ||
72 | #define RCU_TRACE(fn, arg) fn(arg); | ||
73 | #else | ||
74 | #define RCU_TRACE(fn, arg) | ||
75 | #endif | ||
76 | |||
77 | extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); | ||
78 | extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); | ||
79 | extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); | ||
80 | extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); | ||
81 | extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); | ||
82 | extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); | ||
83 | extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); | ||
84 | extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); | ||
85 | extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); | ||
86 | extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); | ||
87 | extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); | ||
88 | extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); | ||
89 | extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); | ||
90 | extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); | ||
91 | extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); | ||
92 | extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); | ||
93 | extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); | ||
94 | extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); | ||
95 | extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); | ||
96 | extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); | ||
97 | |||
98 | #endif /* __KERNEL__ */ | ||
99 | #endif /* __LINUX_RCUPREEMPT_TRACE_H */ | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index d6eacda765ca..df5b24ee80b3 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -78,7 +78,6 @@ struct sched_param { | |||
78 | #include <linux/proportions.h> | 78 | #include <linux/proportions.h> |
79 | #include <linux/seccomp.h> | 79 | #include <linux/seccomp.h> |
80 | #include <linux/rcupdate.h> | 80 | #include <linux/rcupdate.h> |
81 | #include <linux/futex.h> | ||
82 | #include <linux/rtmutex.h> | 81 | #include <linux/rtmutex.h> |
83 | 82 | ||
84 | #include <linux/time.h> | 83 | #include <linux/time.h> |
@@ -88,11 +87,13 @@ struct sched_param { | |||
88 | #include <linux/hrtimer.h> | 87 | #include <linux/hrtimer.h> |
89 | #include <linux/task_io_accounting.h> | 88 | #include <linux/task_io_accounting.h> |
90 | #include <linux/kobject.h> | 89 | #include <linux/kobject.h> |
90 | #include <linux/latencytop.h> | ||
91 | 91 | ||
92 | #include <asm/processor.h> | 92 | #include <asm/processor.h> |
93 | 93 | ||
94 | struct exec_domain; | 94 | struct exec_domain; |
95 | struct futex_pi_state; | 95 | struct futex_pi_state; |
96 | struct robust_list_head; | ||
96 | struct bio; | 97 | struct bio; |
97 | 98 | ||
98 | /* | 99 | /* |
@@ -230,6 +231,8 @@ static inline int select_nohz_load_balancer(int cpu) | |||
230 | } | 231 | } |
231 | #endif | 232 | #endif |
232 | 233 | ||
234 | extern unsigned long rt_needs_cpu(int cpu); | ||
235 | |||
233 | /* | 236 | /* |
234 | * Only dump TASK_* tasks. (0 for all tasks) | 237 | * Only dump TASK_* tasks. (0 for all tasks) |
235 | */ | 238 | */ |
@@ -257,13 +260,19 @@ extern void trap_init(void); | |||
257 | extern void account_process_tick(struct task_struct *task, int user); | 260 | extern void account_process_tick(struct task_struct *task, int user); |
258 | extern void update_process_times(int user); | 261 | extern void update_process_times(int user); |
259 | extern void scheduler_tick(void); | 262 | extern void scheduler_tick(void); |
263 | extern void hrtick_resched(void); | ||
264 | |||
265 | extern void sched_show_task(struct task_struct *p); | ||
260 | 266 | ||
261 | #ifdef CONFIG_DETECT_SOFTLOCKUP | 267 | #ifdef CONFIG_DETECT_SOFTLOCKUP |
262 | extern void softlockup_tick(void); | 268 | extern void softlockup_tick(void); |
263 | extern void spawn_softlockup_task(void); | 269 | extern void spawn_softlockup_task(void); |
264 | extern void touch_softlockup_watchdog(void); | 270 | extern void touch_softlockup_watchdog(void); |
265 | extern void touch_all_softlockup_watchdogs(void); | 271 | extern void touch_all_softlockup_watchdogs(void); |
266 | extern int softlockup_thresh; | 272 | extern unsigned long softlockup_thresh; |
273 | extern unsigned long sysctl_hung_task_check_count; | ||
274 | extern unsigned long sysctl_hung_task_timeout_secs; | ||
275 | extern unsigned long sysctl_hung_task_warnings; | ||
267 | #else | 276 | #else |
268 | static inline void softlockup_tick(void) | 277 | static inline void softlockup_tick(void) |
269 | { | 278 | { |
@@ -822,6 +831,7 @@ struct sched_class { | |||
822 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); | 831 | void (*enqueue_task) (struct rq *rq, struct task_struct *p, int wakeup); |
823 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); | 832 | void (*dequeue_task) (struct rq *rq, struct task_struct *p, int sleep); |
824 | void (*yield_task) (struct rq *rq); | 833 | void (*yield_task) (struct rq *rq); |
834 | int (*select_task_rq)(struct task_struct *p, int sync); | ||
825 | 835 | ||
826 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); | 836 | void (*check_preempt_curr) (struct rq *rq, struct task_struct *p); |
827 | 837 | ||
@@ -837,11 +847,25 @@ struct sched_class { | |||
837 | int (*move_one_task) (struct rq *this_rq, int this_cpu, | 847 | int (*move_one_task) (struct rq *this_rq, int this_cpu, |
838 | struct rq *busiest, struct sched_domain *sd, | 848 | struct rq *busiest, struct sched_domain *sd, |
839 | enum cpu_idle_type idle); | 849 | enum cpu_idle_type idle); |
850 | void (*pre_schedule) (struct rq *this_rq, struct task_struct *task); | ||
851 | void (*post_schedule) (struct rq *this_rq); | ||
852 | void (*task_wake_up) (struct rq *this_rq, struct task_struct *task); | ||
840 | #endif | 853 | #endif |
841 | 854 | ||
842 | void (*set_curr_task) (struct rq *rq); | 855 | void (*set_curr_task) (struct rq *rq); |
843 | void (*task_tick) (struct rq *rq, struct task_struct *p); | 856 | void (*task_tick) (struct rq *rq, struct task_struct *p, int queued); |
844 | void (*task_new) (struct rq *rq, struct task_struct *p); | 857 | void (*task_new) (struct rq *rq, struct task_struct *p); |
858 | void (*set_cpus_allowed)(struct task_struct *p, cpumask_t *newmask); | ||
859 | |||
860 | void (*join_domain)(struct rq *rq); | ||
861 | void (*leave_domain)(struct rq *rq); | ||
862 | |||
863 | void (*switched_from) (struct rq *this_rq, struct task_struct *task, | ||
864 | int running); | ||
865 | void (*switched_to) (struct rq *this_rq, struct task_struct *task, | ||
866 | int running); | ||
867 | void (*prio_changed) (struct rq *this_rq, struct task_struct *task, | ||
868 | int oldprio, int running); | ||
845 | }; | 869 | }; |
846 | 870 | ||
847 | struct load_weight { | 871 | struct load_weight { |
@@ -871,6 +895,8 @@ struct sched_entity { | |||
871 | #ifdef CONFIG_SCHEDSTATS | 895 | #ifdef CONFIG_SCHEDSTATS |
872 | u64 wait_start; | 896 | u64 wait_start; |
873 | u64 wait_max; | 897 | u64 wait_max; |
898 | u64 wait_count; | ||
899 | u64 wait_sum; | ||
874 | 900 | ||
875 | u64 sleep_start; | 901 | u64 sleep_start; |
876 | u64 sleep_max; | 902 | u64 sleep_max; |
@@ -909,6 +935,21 @@ struct sched_entity { | |||
909 | #endif | 935 | #endif |
910 | }; | 936 | }; |
911 | 937 | ||
938 | struct sched_rt_entity { | ||
939 | struct list_head run_list; | ||
940 | unsigned int time_slice; | ||
941 | unsigned long timeout; | ||
942 | int nr_cpus_allowed; | ||
943 | |||
944 | #ifdef CONFIG_FAIR_GROUP_SCHED | ||
945 | struct sched_rt_entity *parent; | ||
946 | /* rq on which this entity is (to be) queued: */ | ||
947 | struct rt_rq *rt_rq; | ||
948 | /* rq "owned" by this entity/group: */ | ||
949 | struct rt_rq *my_q; | ||
950 | #endif | ||
951 | }; | ||
952 | |||
912 | struct task_struct { | 953 | struct task_struct { |
913 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ | 954 | volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ |
914 | void *stack; | 955 | void *stack; |
@@ -925,9 +966,9 @@ struct task_struct { | |||
925 | #endif | 966 | #endif |
926 | 967 | ||
927 | int prio, static_prio, normal_prio; | 968 | int prio, static_prio, normal_prio; |
928 | struct list_head run_list; | ||
929 | const struct sched_class *sched_class; | 969 | const struct sched_class *sched_class; |
930 | struct sched_entity se; | 970 | struct sched_entity se; |
971 | struct sched_rt_entity rt; | ||
931 | 972 | ||
932 | #ifdef CONFIG_PREEMPT_NOTIFIERS | 973 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
933 | /* list of struct preempt_notifier: */ | 974 | /* list of struct preempt_notifier: */ |
@@ -951,7 +992,11 @@ struct task_struct { | |||
951 | 992 | ||
952 | unsigned int policy; | 993 | unsigned int policy; |
953 | cpumask_t cpus_allowed; | 994 | cpumask_t cpus_allowed; |
954 | unsigned int time_slice; | 995 | |
996 | #ifdef CONFIG_PREEMPT_RCU | ||
997 | int rcu_read_lock_nesting; | ||
998 | int rcu_flipctr_idx; | ||
999 | #endif /* #ifdef CONFIG_PREEMPT_RCU */ | ||
955 | 1000 | ||
956 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) | 1001 | #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) |
957 | struct sched_info sched_info; | 1002 | struct sched_info sched_info; |
@@ -1041,6 +1086,11 @@ struct task_struct { | |||
1041 | /* ipc stuff */ | 1086 | /* ipc stuff */ |
1042 | struct sysv_sem sysvsem; | 1087 | struct sysv_sem sysvsem; |
1043 | #endif | 1088 | #endif |
1089 | #ifdef CONFIG_DETECT_SOFTLOCKUP | ||
1090 | /* hung task detection */ | ||
1091 | unsigned long last_switch_timestamp; | ||
1092 | unsigned long last_switch_count; | ||
1093 | #endif | ||
1044 | /* CPU-specific state of this task */ | 1094 | /* CPU-specific state of this task */ |
1045 | struct thread_struct thread; | 1095 | struct thread_struct thread; |
1046 | /* filesystem information */ | 1096 | /* filesystem information */ |
@@ -1173,6 +1223,10 @@ struct task_struct { | |||
1173 | int make_it_fail; | 1223 | int make_it_fail; |
1174 | #endif | 1224 | #endif |
1175 | struct prop_local_single dirties; | 1225 | struct prop_local_single dirties; |
1226 | #ifdef CONFIG_LATENCYTOP | ||
1227 | int latency_record_count; | ||
1228 | struct latency_record latency_record[LT_SAVECOUNT]; | ||
1229 | #endif | ||
1176 | }; | 1230 | }; |
1177 | 1231 | ||
1178 | /* | 1232 | /* |
@@ -1453,6 +1507,12 @@ extern unsigned int sysctl_sched_child_runs_first; | |||
1453 | extern unsigned int sysctl_sched_features; | 1507 | extern unsigned int sysctl_sched_features; |
1454 | extern unsigned int sysctl_sched_migration_cost; | 1508 | extern unsigned int sysctl_sched_migration_cost; |
1455 | extern unsigned int sysctl_sched_nr_migrate; | 1509 | extern unsigned int sysctl_sched_nr_migrate; |
1510 | extern unsigned int sysctl_sched_rt_period; | ||
1511 | extern unsigned int sysctl_sched_rt_ratio; | ||
1512 | #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_SMP) | ||
1513 | extern unsigned int sysctl_sched_min_bal_int_shares; | ||
1514 | extern unsigned int sysctl_sched_max_bal_int_shares; | ||
1515 | #endif | ||
1456 | 1516 | ||
1457 | int sched_nr_latency_handler(struct ctl_table *table, int write, | 1517 | int sched_nr_latency_handler(struct ctl_table *table, int write, |
1458 | struct file *file, void __user *buffer, size_t *length, | 1518 | struct file *file, void __user *buffer, size_t *length, |
@@ -1845,7 +1905,18 @@ static inline int need_resched(void) | |||
1845 | * cond_resched_lock() will drop the spinlock before scheduling, | 1905 | * cond_resched_lock() will drop the spinlock before scheduling, |
1846 | * cond_resched_softirq() will enable bhs before scheduling. | 1906 | * cond_resched_softirq() will enable bhs before scheduling. |
1847 | */ | 1907 | */ |
1848 | extern int cond_resched(void); | 1908 | #ifdef CONFIG_PREEMPT |
1909 | static inline int cond_resched(void) | ||
1910 | { | ||
1911 | return 0; | ||
1912 | } | ||
1913 | #else | ||
1914 | extern int _cond_resched(void); | ||
1915 | static inline int cond_resched(void) | ||
1916 | { | ||
1917 | return _cond_resched(); | ||
1918 | } | ||
1919 | #endif | ||
1849 | extern int cond_resched_lock(spinlock_t * lock); | 1920 | extern int cond_resched_lock(spinlock_t * lock); |
1850 | extern int cond_resched_softirq(void); | 1921 | extern int cond_resched_softirq(void); |
1851 | 1922 | ||
diff --git a/include/linux/smp_lock.h b/include/linux/smp_lock.h index 58962c51dee1..aab3a4cff4e1 100644 --- a/include/linux/smp_lock.h +++ b/include/linux/smp_lock.h | |||
@@ -17,22 +17,10 @@ extern void __lockfunc __release_kernel_lock(void); | |||
17 | __release_kernel_lock(); \ | 17 | __release_kernel_lock(); \ |
18 | } while (0) | 18 | } while (0) |
19 | 19 | ||
20 | /* | ||
21 | * Non-SMP kernels will never block on the kernel lock, | ||
22 | * so we are better off returning a constant zero from | ||
23 | * reacquire_kernel_lock() so that the compiler can see | ||
24 | * it at compile-time. | ||
25 | */ | ||
26 | #if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_BKL) | ||
27 | # define return_value_on_smp return | ||
28 | #else | ||
29 | # define return_value_on_smp | ||
30 | #endif | ||
31 | |||
32 | static inline int reacquire_kernel_lock(struct task_struct *task) | 20 | static inline int reacquire_kernel_lock(struct task_struct *task) |
33 | { | 21 | { |
34 | if (unlikely(task->lock_depth >= 0)) | 22 | if (unlikely(task->lock_depth >= 0)) |
35 | return_value_on_smp __reacquire_kernel_lock(); | 23 | return __reacquire_kernel_lock(); |
36 | return 0; | 24 | return 0; |
37 | } | 25 | } |
38 | 26 | ||
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index e7fa657d0c49..5da9794b2d78 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -9,10 +9,13 @@ struct stack_trace { | |||
9 | }; | 9 | }; |
10 | 10 | ||
11 | extern void save_stack_trace(struct stack_trace *trace); | 11 | extern void save_stack_trace(struct stack_trace *trace); |
12 | extern void save_stack_trace_tsk(struct task_struct *tsk, | ||
13 | struct stack_trace *trace); | ||
12 | 14 | ||
13 | extern void print_stack_trace(struct stack_trace *trace, int spaces); | 15 | extern void print_stack_trace(struct stack_trace *trace, int spaces); |
14 | #else | 16 | #else |
15 | # define save_stack_trace(trace) do { } while (0) | 17 | # define save_stack_trace(trace) do { } while (0) |
18 | # define save_stack_trace_tsk(tsk, trace) do { } while (0) | ||
16 | # define print_stack_trace(trace, spaces) do { } while (0) | 19 | # define print_stack_trace(trace, spaces) do { } while (0) |
17 | #endif | 20 | #endif |
18 | 21 | ||
diff --git a/include/linux/topology.h b/include/linux/topology.h index 47729f18bfdf..2352f46160d3 100644 --- a/include/linux/topology.h +++ b/include/linux/topology.h | |||
@@ -5,7 +5,7 @@ | |||
5 | * | 5 | * |
6 | * Copyright (C) 2002, IBM Corp. | 6 | * Copyright (C) 2002, IBM Corp. |
7 | * | 7 | * |
8 | * All rights reserved. | 8 | * All rights reserved. |
9 | * | 9 | * |
10 | * This program is free software; you can redistribute it and/or modify | 10 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 11 | * it under the terms of the GNU General Public License as published by |
@@ -103,6 +103,7 @@ | |||
103 | .forkexec_idx = 0, \ | 103 | .forkexec_idx = 0, \ |
104 | .flags = SD_LOAD_BALANCE \ | 104 | .flags = SD_LOAD_BALANCE \ |
105 | | SD_BALANCE_NEWIDLE \ | 105 | | SD_BALANCE_NEWIDLE \ |
106 | | SD_BALANCE_FORK \ | ||
106 | | SD_BALANCE_EXEC \ | 107 | | SD_BALANCE_EXEC \ |
107 | | SD_WAKE_AFFINE \ | 108 | | SD_WAKE_AFFINE \ |
108 | | SD_WAKE_IDLE \ | 109 | | SD_WAKE_IDLE \ |
@@ -134,6 +135,7 @@ | |||
134 | .forkexec_idx = 1, \ | 135 | .forkexec_idx = 1, \ |
135 | .flags = SD_LOAD_BALANCE \ | 136 | .flags = SD_LOAD_BALANCE \ |
136 | | SD_BALANCE_NEWIDLE \ | 137 | | SD_BALANCE_NEWIDLE \ |
138 | | SD_BALANCE_FORK \ | ||
137 | | SD_BALANCE_EXEC \ | 139 | | SD_BALANCE_EXEC \ |
138 | | SD_WAKE_AFFINE \ | 140 | | SD_WAKE_AFFINE \ |
139 | | SD_WAKE_IDLE \ | 141 | | SD_WAKE_IDLE \ |
@@ -165,6 +167,7 @@ | |||
165 | .forkexec_idx = 1, \ | 167 | .forkexec_idx = 1, \ |
166 | .flags = SD_LOAD_BALANCE \ | 168 | .flags = SD_LOAD_BALANCE \ |
167 | | SD_BALANCE_NEWIDLE \ | 169 | | SD_BALANCE_NEWIDLE \ |
170 | | SD_BALANCE_FORK \ | ||
168 | | SD_BALANCE_EXEC \ | 171 | | SD_BALANCE_EXEC \ |
169 | | SD_WAKE_AFFINE \ | 172 | | SD_WAKE_AFFINE \ |
170 | | BALANCE_FOR_PKG_POWER,\ | 173 | | BALANCE_FOR_PKG_POWER,\ |