diff options
Diffstat (limited to 'include/linux')
| -rw-r--r-- | include/linux/ata.h | 2 | ||||
| -rw-r--r-- | include/linux/ftrace.h | 2 | ||||
| -rw-r--r-- | include/linux/libata.h | 1 | ||||
| -rw-r--r-- | include/linux/preempt.h | 22 | ||||
| -rw-r--r-- | include/linux/proc_fs.h | 2 | ||||
| -rw-r--r-- | include/linux/security.h | 12 | ||||
| -rw-r--r-- | include/linux/spinlock_up.h | 29 |
7 files changed, 50 insertions, 20 deletions
diff --git a/include/linux/ata.h b/include/linux/ata.h index 8f7a3d68371a..ee0bd9524055 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h | |||
| @@ -954,7 +954,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) | |||
| 954 | } | 954 | } |
| 955 | } | 955 | } |
| 956 | 956 | ||
| 957 | static inline bool atapi_command_packet_set(const u16 *dev_id) | 957 | static inline int atapi_command_packet_set(const u16 *dev_id) |
| 958 | { | 958 | { |
| 959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; | 959 | return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; |
| 960 | } | 960 | } |
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index e5ca8ef50e9b..167abf907802 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
| @@ -89,6 +89,7 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, | |||
| 89 | * that the call back has its own recursion protection. If it does | 89 | * that the call back has its own recursion protection. If it does |
| 90 | * not set this, then the ftrace infrastructure will add recursion | 90 | * not set this, then the ftrace infrastructure will add recursion |
| 91 | * protection for the caller. | 91 | * protection for the caller. |
| 92 | * STUB - The ftrace_ops is just a place holder. | ||
| 92 | */ | 93 | */ |
| 93 | enum { | 94 | enum { |
| 94 | FTRACE_OPS_FL_ENABLED = 1 << 0, | 95 | FTRACE_OPS_FL_ENABLED = 1 << 0, |
| @@ -98,6 +99,7 @@ enum { | |||
| 98 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, | 99 | FTRACE_OPS_FL_SAVE_REGS = 1 << 4, |
| 99 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, | 100 | FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, |
| 100 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, | 101 | FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6, |
| 102 | FTRACE_OPS_FL_STUB = 1 << 7, | ||
| 101 | }; | 103 | }; |
| 102 | 104 | ||
| 103 | struct ftrace_ops { | 105 | struct ftrace_ops { |
diff --git a/include/linux/libata.h b/include/linux/libata.h index 91c9d109e5f1..eae7a053dc51 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h | |||
| @@ -398,6 +398,7 @@ enum { | |||
| 398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ | 398 | ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ |
| 399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ | 399 | ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ |
| 400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ | 400 | ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ |
| 401 | ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ | ||
| 401 | 402 | ||
| 402 | /* DMA mask for user DMA control: User visible values; DO NOT | 403 | /* DMA mask for user DMA control: User visible values; DO NOT |
| 403 | renumber */ | 404 | renumber */ |
diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 5a710b9c578e..87a03c746f17 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h | |||
| @@ -93,14 +93,20 @@ do { \ | |||
| 93 | 93 | ||
| 94 | #else /* !CONFIG_PREEMPT_COUNT */ | 94 | #else /* !CONFIG_PREEMPT_COUNT */ |
| 95 | 95 | ||
| 96 | #define preempt_disable() do { } while (0) | 96 | /* |
| 97 | #define sched_preempt_enable_no_resched() do { } while (0) | 97 | * Even if we don't have any preemption, we need preempt disable/enable |
| 98 | #define preempt_enable_no_resched() do { } while (0) | 98 | * to be barriers, so that we don't have things like get_user/put_user |
| 99 | #define preempt_enable() do { } while (0) | 99 | * that can cause faults and scheduling migrate into our preempt-protected |
| 100 | 100 | * region. | |
| 101 | #define preempt_disable_notrace() do { } while (0) | 101 | */ |
| 102 | #define preempt_enable_no_resched_notrace() do { } while (0) | 102 | #define preempt_disable() barrier() |
| 103 | #define preempt_enable_notrace() do { } while (0) | 103 | #define sched_preempt_enable_no_resched() barrier() |
| 104 | #define preempt_enable_no_resched() barrier() | ||
| 105 | #define preempt_enable() barrier() | ||
| 106 | |||
| 107 | #define preempt_disable_notrace() barrier() | ||
| 108 | #define preempt_enable_no_resched_notrace() barrier() | ||
| 109 | #define preempt_enable_notrace() barrier() | ||
| 104 | 110 | ||
| 105 | #endif /* CONFIG_PREEMPT_COUNT */ | 111 | #endif /* CONFIG_PREEMPT_COUNT */ |
| 106 | 112 | ||
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 8307f2f94d86..94dfb2aa5533 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
| @@ -117,6 +117,7 @@ struct proc_dir_entry *proc_create_data(const char *name, umode_t mode, | |||
| 117 | const struct file_operations *proc_fops, | 117 | const struct file_operations *proc_fops, |
| 118 | void *data); | 118 | void *data); |
| 119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); | 119 | extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent); |
| 120 | extern int remove_proc_subtree(const char *name, struct proc_dir_entry *parent); | ||
| 120 | 121 | ||
| 121 | struct pid_namespace; | 122 | struct pid_namespace; |
| 122 | 123 | ||
| @@ -202,6 +203,7 @@ static inline struct proc_dir_entry *proc_create_data(const char *name, | |||
| 202 | return NULL; | 203 | return NULL; |
| 203 | } | 204 | } |
| 204 | #define remove_proc_entry(name, parent) do {} while (0) | 205 | #define remove_proc_entry(name, parent) do {} while (0) |
| 206 | #define remove_proc_subtree(name, parent) do {} while (0) | ||
| 205 | 207 | ||
| 206 | static inline struct proc_dir_entry *proc_symlink(const char *name, | 208 | static inline struct proc_dir_entry *proc_symlink(const char *name, |
| 207 | struct proc_dir_entry *parent,const char *dest) {return NULL;} | 209 | struct proc_dir_entry *parent,const char *dest) {return NULL;} |
diff --git a/include/linux/security.h b/include/linux/security.h index eee7478cda70..032c366ef1c6 100644 --- a/include/linux/security.h +++ b/include/linux/security.h | |||
| @@ -1012,6 +1012,10 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts) | |||
| 1012 | * This hook can be used by the module to update any security state | 1012 | * This hook can be used by the module to update any security state |
| 1013 | * associated with the TUN device's security structure. | 1013 | * associated with the TUN device's security structure. |
| 1014 | * @security pointer to the TUN devices's security structure. | 1014 | * @security pointer to the TUN devices's security structure. |
| 1015 | * @skb_owned_by: | ||
| 1016 | * This hook sets the packet's owning sock. | ||
| 1017 | * @skb is the packet. | ||
| 1018 | * @sk the sock which owns the packet. | ||
| 1015 | * | 1019 | * |
| 1016 | * Security hooks for XFRM operations. | 1020 | * Security hooks for XFRM operations. |
| 1017 | * | 1021 | * |
| @@ -1638,6 +1642,7 @@ struct security_operations { | |||
| 1638 | int (*tun_dev_attach_queue) (void *security); | 1642 | int (*tun_dev_attach_queue) (void *security); |
| 1639 | int (*tun_dev_attach) (struct sock *sk, void *security); | 1643 | int (*tun_dev_attach) (struct sock *sk, void *security); |
| 1640 | int (*tun_dev_open) (void *security); | 1644 | int (*tun_dev_open) (void *security); |
| 1645 | void (*skb_owned_by) (struct sk_buff *skb, struct sock *sk); | ||
| 1641 | #endif /* CONFIG_SECURITY_NETWORK */ | 1646 | #endif /* CONFIG_SECURITY_NETWORK */ |
| 1642 | 1647 | ||
| 1643 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 1648 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
| @@ -2588,6 +2593,8 @@ int security_tun_dev_attach_queue(void *security); | |||
| 2588 | int security_tun_dev_attach(struct sock *sk, void *security); | 2593 | int security_tun_dev_attach(struct sock *sk, void *security); |
| 2589 | int security_tun_dev_open(void *security); | 2594 | int security_tun_dev_open(void *security); |
| 2590 | 2595 | ||
| 2596 | void security_skb_owned_by(struct sk_buff *skb, struct sock *sk); | ||
| 2597 | |||
| 2591 | #else /* CONFIG_SECURITY_NETWORK */ | 2598 | #else /* CONFIG_SECURITY_NETWORK */ |
| 2592 | static inline int security_unix_stream_connect(struct sock *sock, | 2599 | static inline int security_unix_stream_connect(struct sock *sock, |
| 2593 | struct sock *other, | 2600 | struct sock *other, |
| @@ -2779,6 +2786,11 @@ static inline int security_tun_dev_open(void *security) | |||
| 2779 | { | 2786 | { |
| 2780 | return 0; | 2787 | return 0; |
| 2781 | } | 2788 | } |
| 2789 | |||
| 2790 | static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) | ||
| 2791 | { | ||
| 2792 | } | ||
| 2793 | |||
| 2782 | #endif /* CONFIG_SECURITY_NETWORK */ | 2794 | #endif /* CONFIG_SECURITY_NETWORK */ |
| 2783 | 2795 | ||
| 2784 | #ifdef CONFIG_SECURITY_NETWORK_XFRM | 2796 | #ifdef CONFIG_SECURITY_NETWORK_XFRM |
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb604e6..e2369c167dbd 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h | |||
| @@ -16,7 +16,10 @@ | |||
| 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values | 16 | * In the debug case, 1 means unlocked, 0 means locked. (the values |
| 17 | * are inverted, to catch initialization bugs) | 17 | * are inverted, to catch initialization bugs) |
| 18 | * | 18 | * |
| 19 | * No atomicity anywhere, we are on UP. | 19 | * No atomicity anywhere, we are on UP. However, we still need |
| 20 | * the compiler barriers, because we do not want the compiler to | ||
| 21 | * move potentially faulting instructions (notably user accesses) | ||
| 22 | * into the locked sequence, resulting in non-atomic execution. | ||
| 20 | */ | 23 | */ |
| 21 | 24 | ||
| 22 | #ifdef CONFIG_DEBUG_SPINLOCK | 25 | #ifdef CONFIG_DEBUG_SPINLOCK |
| @@ -25,6 +28,7 @@ | |||
| 25 | static inline void arch_spin_lock(arch_spinlock_t *lock) | 28 | static inline void arch_spin_lock(arch_spinlock_t *lock) |
| 26 | { | 29 | { |
| 27 | lock->slock = 0; | 30 | lock->slock = 0; |
| 31 | barrier(); | ||
| 28 | } | 32 | } |
| 29 | 33 | ||
| 30 | static inline void | 34 | static inline void |
| @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) | |||
| 32 | { | 36 | { |
| 33 | local_irq_save(flags); | 37 | local_irq_save(flags); |
| 34 | lock->slock = 0; | 38 | lock->slock = 0; |
| 39 | barrier(); | ||
| 35 | } | 40 | } |
| 36 | 41 | ||
| 37 | static inline int arch_spin_trylock(arch_spinlock_t *lock) | 42 | static inline int arch_spin_trylock(arch_spinlock_t *lock) |
| @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) | |||
| 39 | char oldval = lock->slock; | 44 | char oldval = lock->slock; |
| 40 | 45 | ||
| 41 | lock->slock = 0; | 46 | lock->slock = 0; |
| 47 | barrier(); | ||
| 42 | 48 | ||
| 43 | return oldval > 0; | 49 | return oldval > 0; |
| 44 | } | 50 | } |
| 45 | 51 | ||
| 46 | static inline void arch_spin_unlock(arch_spinlock_t *lock) | 52 | static inline void arch_spin_unlock(arch_spinlock_t *lock) |
| 47 | { | 53 | { |
| 54 | barrier(); | ||
| 48 | lock->slock = 1; | 55 | lock->slock = 1; |
| 49 | } | 56 | } |
| 50 | 57 | ||
| 51 | /* | 58 | /* |
| 52 | * Read-write spinlocks. No debug version. | 59 | * Read-write spinlocks. No debug version. |
| 53 | */ | 60 | */ |
| 54 | #define arch_read_lock(lock) do { (void)(lock); } while (0) | 61 | #define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) |
| 55 | #define arch_write_lock(lock) do { (void)(lock); } while (0) | 62 | #define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) |
| 56 | #define arch_read_trylock(lock) ({ (void)(lock); 1; }) | 63 | #define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
| 57 | #define arch_write_trylock(lock) ({ (void)(lock); 1; }) | 64 | #define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
| 58 | #define arch_read_unlock(lock) do { (void)(lock); } while (0) | 65 | #define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) |
| 59 | #define arch_write_unlock(lock) do { (void)(lock); } while (0) | 66 | #define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) |
| 60 | 67 | ||
| 61 | #else /* DEBUG_SPINLOCK */ | 68 | #else /* DEBUG_SPINLOCK */ |
| 62 | #define arch_spin_is_locked(lock) ((void)(lock), 0) | 69 | #define arch_spin_is_locked(lock) ((void)(lock), 0) |
| 63 | /* for sched.c and kernel_lock.c: */ | 70 | /* for sched.c and kernel_lock.c: */ |
| 64 | # define arch_spin_lock(lock) do { (void)(lock); } while (0) | 71 | # define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) |
| 65 | # define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) | 72 | # define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) |
| 66 | # define arch_spin_unlock(lock) do { (void)(lock); } while (0) | 73 | # define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) |
| 67 | # define arch_spin_trylock(lock) ({ (void)(lock); 1; }) | 74 | # define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) |
| 68 | #endif /* DEBUG_SPINLOCK */ | 75 | #endif /* DEBUG_SPINLOCK */ |
| 69 | 76 | ||
| 70 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) | 77 | #define arch_spin_is_contended(lock) (((void)(lock), 0)) |
