diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2013-04-08 12:26:15 -0400 |
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2013-04-08 12:26:15 -0400 |
| commit | e9069cf8b74b50d804fd540a9fd1383504f4af93 (patch) | |
| tree | 0b3a30308ffc00a73f681bfdf19214b5ba9ae5a6 /kernel | |
| parent | 4680ebc2c90f663ba70c6bb3d8596b0f2c4dfa9e (diff) | |
| parent | ce63d6d4bb9f601de32d4b99f925a65182521873 (diff) | |
Merge tag 'vt8500/pinctrl' of git://server.prisktech.co.nz/git/linuxwmt into next/drivers
From Tony Prisk <linux@prisktech.co.nz>:
arm: vt8500: Add pinctrl driver for arch-vt8500
This series adds support for the pinctrl/gpio module on all arch-vt8500
supported SoCs.
As part of the review process, some tidy up is also done to
drivers/of/base.c to remove some code that is being constantly duplicated.
Also, a patch for the bcm2835 pinctrl driver is included to take advantage
of the new of/base.c code.
* tag 'vt8500/pinctrl' of git://server.prisktech.co.nz/git/linuxwmt: (606 commits)
pinctrl: bcm2835: make use of of_property_read_u32_index()
gpio: vt8500: Remove arch-vt8500 gpio driver
arm: vt8500: Remove gpio devicetree nodes
arm: dts: vt8500: Update Wondermedia SoC dtsi files for pinctrl driver
pinctrl: gpio: vt8500: Add pincontrol driver for arch-vt8500
arm: vt8500: Increase available GPIOs on arch-vt8500
of: Remove duplicated code for validating property and value
of: Add support for reading a u32 from a multi-value property.
Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 8 | ||||
| -rw-r--r-- | kernel/fork.c | 5 | ||||
| -rw-r--r-- | kernel/futex.c | 46 | ||||
| -rw-r--r-- | kernel/printk.c | 80 | ||||
| -rw-r--r-- | kernel/signal.c | 5 | ||||
| -rw-r--r-- | kernel/sys.c | 57 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 24 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 86 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 6 | ||||
| -rw-r--r-- | kernel/trace/trace_irqsoff.c | 19 | ||||
| -rw-r--r-- | kernel/trace/trace_sched_wakeup.c | 18 | ||||
| -rw-r--r-- | kernel/user_namespace.c | 4 | ||||
| -rw-r--r-- | kernel/workqueue.c | 51 |
14 files changed, 261 insertions, 152 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index b0cd86501c30..59412d037eed 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
| @@ -4434,12 +4434,15 @@ static void perf_event_task_event(struct perf_task_event *task_event) | |||
| 4434 | if (ctxn < 0) | 4434 | if (ctxn < 0) |
| 4435 | goto next; | 4435 | goto next; |
| 4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); | 4436 | ctx = rcu_dereference(current->perf_event_ctxp[ctxn]); |
| 4437 | if (ctx) | ||
| 4438 | perf_event_task_ctx(ctx, task_event); | ||
| 4437 | } | 4439 | } |
| 4438 | if (ctx) | ||
| 4439 | perf_event_task_ctx(ctx, task_event); | ||
| 4440 | next: | 4440 | next: |
| 4441 | put_cpu_ptr(pmu->pmu_cpu_context); | 4441 | put_cpu_ptr(pmu->pmu_cpu_context); |
| 4442 | } | 4442 | } |
| 4443 | if (task_event->task_ctx) | ||
| 4444 | perf_event_task_ctx(task_event->task_ctx, task_event); | ||
| 4445 | |||
| 4443 | rcu_read_unlock(); | 4446 | rcu_read_unlock(); |
| 4444 | } | 4447 | } |
| 4445 | 4448 | ||
| @@ -5647,6 +5650,7 @@ static void perf_swevent_init_hrtimer(struct perf_event *event) | |||
| 5647 | event->attr.sample_period = NSEC_PER_SEC / freq; | 5650 | event->attr.sample_period = NSEC_PER_SEC / freq; |
| 5648 | hwc->sample_period = event->attr.sample_period; | 5651 | hwc->sample_period = event->attr.sample_period; |
| 5649 | local64_set(&hwc->period_left, hwc->sample_period); | 5652 | local64_set(&hwc->period_left, hwc->sample_period); |
| 5653 | hwc->last_period = hwc->sample_period; | ||
| 5650 | event->attr.freq = 0; | 5654 | event->attr.freq = 0; |
| 5651 | } | 5655 | } |
| 5652 | } | 5656 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index 8d932b1c9056..1766d324d5e3 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -1141,6 +1141,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1141 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) | 1141 | if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS)) |
| 1142 | return ERR_PTR(-EINVAL); | 1142 | return ERR_PTR(-EINVAL); |
| 1143 | 1143 | ||
| 1144 | if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS)) | ||
| 1145 | return ERR_PTR(-EINVAL); | ||
| 1146 | |||
| 1144 | /* | 1147 | /* |
| 1145 | * Thread groups must share signals as well, and detached threads | 1148 | * Thread groups must share signals as well, and detached threads |
| 1146 | * can only be started up within the thread group. | 1149 | * can only be started up within the thread group. |
| @@ -1807,7 +1810,7 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
| 1807 | * If unsharing a user namespace must also unshare the thread. | 1810 | * If unsharing a user namespace must also unshare the thread. |
| 1808 | */ | 1811 | */ |
| 1809 | if (unshare_flags & CLONE_NEWUSER) | 1812 | if (unshare_flags & CLONE_NEWUSER) |
| 1810 | unshare_flags |= CLONE_THREAD; | 1813 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
| 1811 | /* | 1814 | /* |
| 1812 | * If unsharing a pid namespace must also unshare the thread. | 1815 | * If unsharing a pid namespace must also unshare the thread. |
| 1813 | */ | 1816 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index f0090a993dab..b26dcfc02c94 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -223,7 +223,8 @@ static void drop_futex_key_refs(union futex_key *key) | |||
| 223 | * @rw: mapping needs to be read/write (values: VERIFY_READ, | 223 | * @rw: mapping needs to be read/write (values: VERIFY_READ, |
| 224 | * VERIFY_WRITE) | 224 | * VERIFY_WRITE) |
| 225 | * | 225 | * |
| 226 | * Returns a negative error code or 0 | 226 | * Return: a negative error code or 0 |
| 227 | * | ||
| 227 | * The key words are stored in *key on success. | 228 | * The key words are stored in *key on success. |
| 228 | * | 229 | * |
| 229 | * For shared mappings, it's (page->index, file_inode(vma->vm_file), | 230 | * For shared mappings, it's (page->index, file_inode(vma->vm_file), |
| @@ -705,9 +706,9 @@ lookup_pi_state(u32 uval, struct futex_hash_bucket *hb, | |||
| 705 | * be "current" except in the case of requeue pi. | 706 | * be "current" except in the case of requeue pi. |
| 706 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) | 707 | * @set_waiters: force setting the FUTEX_WAITERS bit (1) or not (0) |
| 707 | * | 708 | * |
| 708 | * Returns: | 709 | * Return: |
| 709 | * 0 - ready to wait | 710 | * 0 - ready to wait; |
| 710 | * 1 - acquired the lock | 711 | * 1 - acquired the lock; |
| 711 | * <0 - error | 712 | * <0 - error |
| 712 | * | 713 | * |
| 713 | * The hb->lock and futex_key refs shall be held by the caller. | 714 | * The hb->lock and futex_key refs shall be held by the caller. |
| @@ -1191,9 +1192,9 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, | |||
| 1191 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. | 1192 | * then direct futex_lock_pi_atomic() to force setting the FUTEX_WAITERS bit. |
| 1192 | * hb1 and hb2 must be held by the caller. | 1193 | * hb1 and hb2 must be held by the caller. |
| 1193 | * | 1194 | * |
| 1194 | * Returns: | 1195 | * Return: |
| 1195 | * 0 - failed to acquire the lock atomicly | 1196 | * 0 - failed to acquire the lock atomically; |
| 1196 | * 1 - acquired the lock | 1197 | * 1 - acquired the lock; |
| 1197 | * <0 - error | 1198 | * <0 - error |
| 1198 | */ | 1199 | */ |
| 1199 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, | 1200 | static int futex_proxy_trylock_atomic(u32 __user *pifutex, |
| @@ -1254,8 +1255,8 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex, | |||
| 1254 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire | 1255 | * Requeue waiters on uaddr1 to uaddr2. In the requeue_pi case, try to acquire |
| 1255 | * uaddr2 atomically on behalf of the top waiter. | 1256 | * uaddr2 atomically on behalf of the top waiter. |
| 1256 | * | 1257 | * |
| 1257 | * Returns: | 1258 | * Return: |
| 1258 | * >=0 - on success, the number of tasks requeued or woken | 1259 | * >=0 - on success, the number of tasks requeued or woken; |
| 1259 | * <0 - on error | 1260 | * <0 - on error |
| 1260 | */ | 1261 | */ |
| 1261 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, | 1262 | static int futex_requeue(u32 __user *uaddr1, unsigned int flags, |
| @@ -1536,8 +1537,8 @@ static inline void queue_me(struct futex_q *q, struct futex_hash_bucket *hb) | |||
| 1536 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must | 1537 | * The q->lock_ptr must not be held by the caller. A call to unqueue_me() must |
| 1537 | * be paired with exactly one earlier call to queue_me(). | 1538 | * be paired with exactly one earlier call to queue_me(). |
| 1538 | * | 1539 | * |
| 1539 | * Returns: | 1540 | * Return: |
| 1540 | * 1 - if the futex_q was still queued (and we removed unqueued it) | 1541 | * 1 - if the futex_q was still queued (and we removed unqueued it); |
| 1541 | * 0 - if the futex_q was already removed by the waking thread | 1542 | * 0 - if the futex_q was already removed by the waking thread |
| 1542 | */ | 1543 | */ |
| 1543 | static int unqueue_me(struct futex_q *q) | 1544 | static int unqueue_me(struct futex_q *q) |
| @@ -1707,9 +1708,9 @@ static long futex_wait_restart(struct restart_block *restart); | |||
| 1707 | * the pi_state owner as well as handle race conditions that may allow us to | 1708 | * the pi_state owner as well as handle race conditions that may allow us to |
| 1708 | * acquire the lock. Must be called with the hb lock held. | 1709 | * acquire the lock. Must be called with the hb lock held. |
| 1709 | * | 1710 | * |
| 1710 | * Returns: | 1711 | * Return: |
| 1711 | * 1 - success, lock taken | 1712 | * 1 - success, lock taken; |
| 1712 | * 0 - success, lock not taken | 1713 | * 0 - success, lock not taken; |
| 1713 | * <0 - on error (-EFAULT) | 1714 | * <0 - on error (-EFAULT) |
| 1714 | */ | 1715 | */ |
| 1715 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) | 1716 | static int fixup_owner(u32 __user *uaddr, struct futex_q *q, int locked) |
| @@ -1824,8 +1825,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q, | |||
| 1824 | * Return with the hb lock held and a q.key reference on success, and unlocked | 1825 | * Return with the hb lock held and a q.key reference on success, and unlocked |
| 1825 | * with no q.key reference on failure. | 1826 | * with no q.key reference on failure. |
| 1826 | * | 1827 | * |
| 1827 | * Returns: | 1828 | * Return: |
| 1828 | * 0 - uaddr contains val and hb has been locked | 1829 | * 0 - uaddr contains val and hb has been locked; |
| 1829 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked | 1830 | * <1 - -EFAULT or -EWOULDBLOCK (uaddr does not contain val) and hb is unlocked |
| 1830 | */ | 1831 | */ |
| 1831 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, | 1832 | static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, |
| @@ -2203,9 +2204,9 @@ pi_faulted: | |||
| 2203 | * the wakeup and return the appropriate error code to the caller. Must be | 2204 | * the wakeup and return the appropriate error code to the caller. Must be |
| 2204 | * called with the hb lock held. | 2205 | * called with the hb lock held. |
| 2205 | * | 2206 | * |
| 2206 | * Returns | 2207 | * Return: |
| 2207 | * 0 - no early wakeup detected | 2208 | * 0 = no early wakeup detected; |
| 2208 | * <0 - -ETIMEDOUT or -ERESTARTNOINTR | 2209 | * <0 = -ETIMEDOUT or -ERESTARTNOINTR |
| 2209 | */ | 2210 | */ |
| 2210 | static inline | 2211 | static inline |
| 2211 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | 2212 | int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, |
| @@ -2247,7 +2248,6 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2247 | * @val: the expected value of uaddr | 2248 | * @val: the expected value of uaddr |
| 2248 | * @abs_time: absolute timeout | 2249 | * @abs_time: absolute timeout |
| 2249 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all | 2250 | * @bitset: 32 bit wakeup bitset set by userspace, defaults to all |
| 2250 | * @clockrt: whether to use CLOCK_REALTIME (1) or CLOCK_MONOTONIC (0) | ||
| 2251 | * @uaddr2: the pi futex we will take prior to returning to user-space | 2251 | * @uaddr2: the pi futex we will take prior to returning to user-space |
| 2252 | * | 2252 | * |
| 2253 | * The caller will wait on uaddr and will be requeued by futex_requeue() to | 2253 | * The caller will wait on uaddr and will be requeued by futex_requeue() to |
| @@ -2258,7 +2258,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2258 | * there was a need to. | 2258 | * there was a need to. |
| 2259 | * | 2259 | * |
| 2260 | * We call schedule in futex_wait_queue_me() when we enqueue and return there | 2260 | * We call schedule in futex_wait_queue_me() when we enqueue and return there |
| 2261 | * via the following: | 2261 | * via the following-- |
| 2262 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() | 2262 | * 1) wakeup on uaddr2 after an atomic lock acquisition by futex_requeue() |
| 2263 | * 2) wakeup on uaddr2 after a requeue | 2263 | * 2) wakeup on uaddr2 after a requeue |
| 2264 | * 3) signal | 2264 | * 3) signal |
| @@ -2276,8 +2276,8 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, | |||
| 2276 | * | 2276 | * |
| 2277 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. | 2277 | * If 4 or 7, we cleanup and return with -ETIMEDOUT. |
| 2278 | * | 2278 | * |
| 2279 | * Returns: | 2279 | * Return: |
| 2280 | * 0 - On success | 2280 | * 0 - On success; |
| 2281 | * <0 - On error | 2281 | * <0 - On error |
| 2282 | */ | 2282 | */ |
| 2283 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, | 2283 | static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, |
diff --git a/kernel/printk.c b/kernel/printk.c index 0b31715f335a..abbdd9e2ac82 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -63,8 +63,6 @@ void asmlinkage __attribute__((weak)) early_printk(const char *fmt, ...) | |||
| 63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ | 63 | #define MINIMUM_CONSOLE_LOGLEVEL 1 /* Minimum loglevel we let people use */ |
| 64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ | 64 | #define DEFAULT_CONSOLE_LOGLEVEL 7 /* anything MORE serious than KERN_DEBUG */ |
| 65 | 65 | ||
| 66 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
| 67 | |||
| 68 | int console_printk[4] = { | 66 | int console_printk[4] = { |
| 69 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ | 67 | DEFAULT_CONSOLE_LOGLEVEL, /* console_loglevel */ |
| 70 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ | 68 | DEFAULT_MESSAGE_LOGLEVEL, /* default_message_loglevel */ |
| @@ -224,6 +222,7 @@ struct log { | |||
| 224 | static DEFINE_RAW_SPINLOCK(logbuf_lock); | 222 | static DEFINE_RAW_SPINLOCK(logbuf_lock); |
| 225 | 223 | ||
| 226 | #ifdef CONFIG_PRINTK | 224 | #ifdef CONFIG_PRINTK |
| 225 | DECLARE_WAIT_QUEUE_HEAD(log_wait); | ||
| 227 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ | 226 | /* the next printk record to read by syslog(READ) or /proc/kmsg */ |
| 228 | static u64 syslog_seq; | 227 | static u64 syslog_seq; |
| 229 | static u32 syslog_idx; | 228 | static u32 syslog_idx; |
| @@ -1957,45 +1956,6 @@ int is_console_locked(void) | |||
| 1957 | return console_locked; | 1956 | return console_locked; |
| 1958 | } | 1957 | } |
| 1959 | 1958 | ||
| 1960 | /* | ||
| 1961 | * Delayed printk version, for scheduler-internal messages: | ||
| 1962 | */ | ||
| 1963 | #define PRINTK_BUF_SIZE 512 | ||
| 1964 | |||
| 1965 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
| 1966 | #define PRINTK_PENDING_SCHED 0x02 | ||
| 1967 | |||
| 1968 | static DEFINE_PER_CPU(int, printk_pending); | ||
| 1969 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
| 1970 | |||
| 1971 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
| 1972 | { | ||
| 1973 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
| 1974 | |||
| 1975 | if (pending & PRINTK_PENDING_SCHED) { | ||
| 1976 | char *buf = __get_cpu_var(printk_sched_buf); | ||
| 1977 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
| 1978 | } | ||
| 1979 | |||
| 1980 | if (pending & PRINTK_PENDING_WAKEUP) | ||
| 1981 | wake_up_interruptible(&log_wait); | ||
| 1982 | } | ||
| 1983 | |||
| 1984 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
| 1985 | .func = wake_up_klogd_work_func, | ||
| 1986 | .flags = IRQ_WORK_LAZY, | ||
| 1987 | }; | ||
| 1988 | |||
| 1989 | void wake_up_klogd(void) | ||
| 1990 | { | ||
| 1991 | preempt_disable(); | ||
| 1992 | if (waitqueue_active(&log_wait)) { | ||
| 1993 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
| 1994 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
| 1995 | } | ||
| 1996 | preempt_enable(); | ||
| 1997 | } | ||
| 1998 | |||
| 1999 | static void console_cont_flush(char *text, size_t size) | 1959 | static void console_cont_flush(char *text, size_t size) |
| 2000 | { | 1960 | { |
| 2001 | unsigned long flags; | 1961 | unsigned long flags; |
| @@ -2458,6 +2418,44 @@ static int __init printk_late_init(void) | |||
| 2458 | late_initcall(printk_late_init); | 2418 | late_initcall(printk_late_init); |
| 2459 | 2419 | ||
| 2460 | #if defined CONFIG_PRINTK | 2420 | #if defined CONFIG_PRINTK |
| 2421 | /* | ||
| 2422 | * Delayed printk version, for scheduler-internal messages: | ||
| 2423 | */ | ||
| 2424 | #define PRINTK_BUF_SIZE 512 | ||
| 2425 | |||
| 2426 | #define PRINTK_PENDING_WAKEUP 0x01 | ||
| 2427 | #define PRINTK_PENDING_SCHED 0x02 | ||
| 2428 | |||
| 2429 | static DEFINE_PER_CPU(int, printk_pending); | ||
| 2430 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | ||
| 2431 | |||
| 2432 | static void wake_up_klogd_work_func(struct irq_work *irq_work) | ||
| 2433 | { | ||
| 2434 | int pending = __this_cpu_xchg(printk_pending, 0); | ||
| 2435 | |||
| 2436 | if (pending & PRINTK_PENDING_SCHED) { | ||
| 2437 | char *buf = __get_cpu_var(printk_sched_buf); | ||
| 2438 | printk(KERN_WARNING "[sched_delayed] %s", buf); | ||
| 2439 | } | ||
| 2440 | |||
| 2441 | if (pending & PRINTK_PENDING_WAKEUP) | ||
| 2442 | wake_up_interruptible(&log_wait); | ||
| 2443 | } | ||
| 2444 | |||
| 2445 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
| 2446 | .func = wake_up_klogd_work_func, | ||
| 2447 | .flags = IRQ_WORK_LAZY, | ||
| 2448 | }; | ||
| 2449 | |||
| 2450 | void wake_up_klogd(void) | ||
| 2451 | { | ||
| 2452 | preempt_disable(); | ||
| 2453 | if (waitqueue_active(&log_wait)) { | ||
| 2454 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | ||
| 2455 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
| 2456 | } | ||
| 2457 | preempt_enable(); | ||
| 2458 | } | ||
| 2461 | 2459 | ||
| 2462 | int printk_sched(const char *fmt, ...) | 2460 | int printk_sched(const char *fmt, ...) |
| 2463 | { | 2461 | { |
diff --git a/kernel/signal.c b/kernel/signal.c index 2ec870a4c3c4..dd72567767d9 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
| @@ -485,6 +485,9 @@ flush_signal_handlers(struct task_struct *t, int force_default) | |||
| 485 | if (force_default || ka->sa.sa_handler != SIG_IGN) | 485 | if (force_default || ka->sa.sa_handler != SIG_IGN) |
| 486 | ka->sa.sa_handler = SIG_DFL; | 486 | ka->sa.sa_handler = SIG_DFL; |
| 487 | ka->sa.sa_flags = 0; | 487 | ka->sa.sa_flags = 0; |
| 488 | #ifdef __ARCH_HAS_SA_RESTORER | ||
| 489 | ka->sa.sa_restorer = NULL; | ||
| 490 | #endif | ||
| 488 | sigemptyset(&ka->sa.sa_mask); | 491 | sigemptyset(&ka->sa.sa_mask); |
| 489 | ka++; | 492 | ka++; |
| 490 | } | 493 | } |
| @@ -2682,7 +2685,7 @@ static int do_sigpending(void *set, unsigned long sigsetsize) | |||
| 2682 | /** | 2685 | /** |
| 2683 | * sys_rt_sigpending - examine a pending signal that has been raised | 2686 | * sys_rt_sigpending - examine a pending signal that has been raised |
| 2684 | * while blocked | 2687 | * while blocked |
| 2685 | * @set: stores pending signals | 2688 | * @uset: stores pending signals |
| 2686 | * @sigsetsize: size of sigset_t type or larger | 2689 | * @sigsetsize: size of sigset_t type or larger |
| 2687 | */ | 2690 | */ |
| 2688 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) | 2691 | SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, uset, size_t, sigsetsize) |
diff --git a/kernel/sys.c b/kernel/sys.c index 81f56445fba9..39c9c4a2949f 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
| @@ -2185,9 +2185,8 @@ SYSCALL_DEFINE3(getcpu, unsigned __user *, cpup, unsigned __user *, nodep, | |||
| 2185 | 2185 | ||
| 2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 2186 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
| 2187 | 2187 | ||
| 2188 | static int __orderly_poweroff(void) | 2188 | static int __orderly_poweroff(bool force) |
| 2189 | { | 2189 | { |
| 2190 | int argc; | ||
| 2191 | char **argv; | 2190 | char **argv; |
| 2192 | static char *envp[] = { | 2191 | static char *envp[] = { |
| 2193 | "HOME=/", | 2192 | "HOME=/", |
| @@ -2196,20 +2195,40 @@ static int __orderly_poweroff(void) | |||
| 2196 | }; | 2195 | }; |
| 2197 | int ret; | 2196 | int ret; |
| 2198 | 2197 | ||
| 2199 | argv = argv_split(GFP_ATOMIC, poweroff_cmd, &argc); | 2198 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); |
| 2200 | if (argv == NULL) { | 2199 | if (argv) { |
| 2200 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | ||
| 2201 | argv_free(argv); | ||
| 2202 | } else { | ||
| 2201 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", | 2203 | printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", |
| 2202 | __func__, poweroff_cmd); | 2204 | __func__, poweroff_cmd); |
| 2203 | return -ENOMEM; | 2205 | ret = -ENOMEM; |
| 2204 | } | 2206 | } |
| 2205 | 2207 | ||
| 2206 | ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_WAIT_EXEC, | 2208 | if (ret && force) { |
| 2207 | NULL, NULL, NULL); | 2209 | printk(KERN_WARNING "Failed to start orderly shutdown: " |
| 2208 | argv_free(argv); | 2210 | "forcing the issue\n"); |
| 2211 | /* | ||
| 2212 | * I guess this should try to kick off some daemon to sync and | ||
| 2213 | * poweroff asap. Or not even bother syncing if we're doing an | ||
| 2214 | * emergency shutdown? | ||
| 2215 | */ | ||
| 2216 | emergency_sync(); | ||
| 2217 | kernel_power_off(); | ||
| 2218 | } | ||
| 2209 | 2219 | ||
| 2210 | return ret; | 2220 | return ret; |
| 2211 | } | 2221 | } |
| 2212 | 2222 | ||
| 2223 | static bool poweroff_force; | ||
| 2224 | |||
| 2225 | static void poweroff_work_func(struct work_struct *work) | ||
| 2226 | { | ||
| 2227 | __orderly_poweroff(poweroff_force); | ||
| 2228 | } | ||
| 2229 | |||
| 2230 | static DECLARE_WORK(poweroff_work, poweroff_work_func); | ||
| 2231 | |||
| 2213 | /** | 2232 | /** |
| 2214 | * orderly_poweroff - Trigger an orderly system poweroff | 2233 | * orderly_poweroff - Trigger an orderly system poweroff |
| 2215 | * @force: force poweroff if command execution fails | 2234 | * @force: force poweroff if command execution fails |
| @@ -2219,21 +2238,9 @@ static int __orderly_poweroff(void) | |||
| 2219 | */ | 2238 | */ |
| 2220 | int orderly_poweroff(bool force) | 2239 | int orderly_poweroff(bool force) |
| 2221 | { | 2240 | { |
| 2222 | int ret = __orderly_poweroff(); | 2241 | if (force) /* do not override the pending "true" */ |
| 2223 | 2242 | poweroff_force = true; | |
| 2224 | if (ret && force) { | 2243 | schedule_work(&poweroff_work); |
| 2225 | printk(KERN_WARNING "Failed to start orderly shutdown: " | 2244 | return 0; |
| 2226 | "forcing the issue\n"); | ||
| 2227 | |||
| 2228 | /* | ||
| 2229 | * I guess this should try to kick off some daemon to sync and | ||
| 2230 | * poweroff asap. Or not even bother syncing if we're doing an | ||
| 2231 | * emergency shutdown? | ||
| 2232 | */ | ||
| 2233 | emergency_sync(); | ||
| 2234 | kernel_power_off(); | ||
| 2235 | } | ||
| 2236 | |||
| 2237 | return ret; | ||
| 2238 | } | 2245 | } |
| 2239 | EXPORT_SYMBOL_GPL(orderly_poweroff); | 2246 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index 192473b22799..fc382d6e2765 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -414,24 +414,28 @@ config PROBE_EVENTS | |||
| 414 | def_bool n | 414 | def_bool n |
| 415 | 415 | ||
| 416 | config DYNAMIC_FTRACE | 416 | config DYNAMIC_FTRACE |
| 417 | bool "enable/disable ftrace tracepoints dynamically" | 417 | bool "enable/disable function tracing dynamically" |
| 418 | depends on FUNCTION_TRACER | 418 | depends on FUNCTION_TRACER |
| 419 | depends on HAVE_DYNAMIC_FTRACE | 419 | depends on HAVE_DYNAMIC_FTRACE |
| 420 | default y | 420 | default y |
| 421 | help | 421 | help |
| 422 | This option will modify all the calls to ftrace dynamically | 422 | This option will modify all the calls to function tracing |
| 423 | (will patch them out of the binary image and replace them | 423 | dynamically (will patch them out of the binary image and |
| 424 | with a No-Op instruction) as they are called. A table is | 424 | replace them with a No-Op instruction) on boot up. During |
| 425 | created to dynamically enable them again. | 425 | compile time, a table is made of all the locations that ftrace |
| 426 | can function trace, and this table is linked into the kernel | ||
| 427 | image. When this is enabled, functions can be individually | ||
| 428 | enabled, and the functions not enabled will not affect | ||
| 429 | performance of the system. | ||
| 430 | |||
| 431 | See the files in /sys/kernel/debug/tracing: | ||
| 432 | available_filter_functions | ||
| 433 | set_ftrace_filter | ||
| 434 | set_ftrace_notrace | ||
| 426 | 435 | ||
| 427 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but | 436 | This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but |
| 428 | otherwise has native performance as long as no tracing is active. | 437 | otherwise has native performance as long as no tracing is active. |
| 429 | 438 | ||
| 430 | The changes to the code are done by a kernel thread that | ||
| 431 | wakes up once a second and checks to see if any ftrace calls | ||
| 432 | were made. If so, it runs stop_machine (stops all CPUS) | ||
| 433 | and modifies the code to jump over the call to ftrace. | ||
| 434 | |||
| 435 | config DYNAMIC_FTRACE_WITH_REGS | 439 | config DYNAMIC_FTRACE_WITH_REGS |
| 436 | def_bool y | 440 | def_bool y |
| 437 | depends on DYNAMIC_FTRACE | 441 | depends on DYNAMIC_FTRACE |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ab25b88aae56..6893d5a2bf08 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
| @@ -3104,8 +3104,8 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, | |||
| 3104 | continue; | 3104 | continue; |
| 3105 | } | 3105 | } |
| 3106 | 3106 | ||
| 3107 | hlist_del(&entry->node); | 3107 | hlist_del_rcu(&entry->node); |
| 3108 | call_rcu(&entry->rcu, ftrace_free_entry_rcu); | 3108 | call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); |
| 3109 | } | 3109 | } |
| 3110 | } | 3110 | } |
| 3111 | __disable_ftrace_function_probe(); | 3111 | __disable_ftrace_function_probe(); |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index c2e2c2310374..4f1dade56981 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -704,7 +704,7 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 704 | void | 704 | void |
| 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | 705 | update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) |
| 706 | { | 706 | { |
| 707 | struct ring_buffer *buf = tr->buffer; | 707 | struct ring_buffer *buf; |
| 708 | 708 | ||
| 709 | if (trace_stop_count) | 709 | if (trace_stop_count) |
| 710 | return; | 710 | return; |
| @@ -719,6 +719,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) | |||
| 719 | 719 | ||
| 720 | arch_spin_lock(&ftrace_max_lock); | 720 | arch_spin_lock(&ftrace_max_lock); |
| 721 | 721 | ||
| 722 | buf = tr->buffer; | ||
| 722 | tr->buffer = max_tr.buffer; | 723 | tr->buffer = max_tr.buffer; |
| 723 | max_tr.buffer = buf; | 724 | max_tr.buffer = buf; |
| 724 | 725 | ||
| @@ -2400,6 +2401,27 @@ static void test_ftrace_alive(struct seq_file *m) | |||
| 2400 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); | 2401 | seq_printf(m, "# MAY BE MISSING FUNCTION EVENTS\n"); |
| 2401 | } | 2402 | } |
| 2402 | 2403 | ||
| 2404 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 2405 | static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) | ||
| 2406 | { | ||
| 2407 | if (iter->trace->allocated_snapshot) | ||
| 2408 | seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); | ||
| 2409 | else | ||
| 2410 | seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); | ||
| 2411 | |||
| 2412 | seq_printf(m, "# Snapshot commands:\n"); | ||
| 2413 | seq_printf(m, "# echo 0 > snapshot : Clears and frees snapshot buffer\n"); | ||
| 2414 | seq_printf(m, "# echo 1 > snapshot : Allocates snapshot buffer, if not already allocated.\n"); | ||
| 2415 | seq_printf(m, "# Takes a snapshot of the main buffer.\n"); | ||
| 2416 | seq_printf(m, "# echo 2 > snapshot : Clears snapshot buffer (but does not allocate)\n"); | ||
| 2417 | seq_printf(m, "# (Doesn't have to be '2' works with any number that\n"); | ||
| 2418 | seq_printf(m, "# is not a '0' or '1')\n"); | ||
| 2419 | } | ||
| 2420 | #else | ||
| 2421 | /* Should never be called */ | ||
| 2422 | static inline void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) { } | ||
| 2423 | #endif | ||
| 2424 | |||
| 2403 | static int s_show(struct seq_file *m, void *v) | 2425 | static int s_show(struct seq_file *m, void *v) |
| 2404 | { | 2426 | { |
| 2405 | struct trace_iterator *iter = v; | 2427 | struct trace_iterator *iter = v; |
| @@ -2411,7 +2433,9 @@ static int s_show(struct seq_file *m, void *v) | |||
| 2411 | seq_puts(m, "#\n"); | 2433 | seq_puts(m, "#\n"); |
| 2412 | test_ftrace_alive(m); | 2434 | test_ftrace_alive(m); |
| 2413 | } | 2435 | } |
| 2414 | if (iter->trace && iter->trace->print_header) | 2436 | if (iter->snapshot && trace_empty(iter)) |
| 2437 | print_snapshot_help(m, iter); | ||
| 2438 | else if (iter->trace && iter->trace->print_header) | ||
| 2415 | iter->trace->print_header(m); | 2439 | iter->trace->print_header(m); |
| 2416 | else | 2440 | else |
| 2417 | trace_default_header(m); | 2441 | trace_default_header(m); |
| @@ -2857,11 +2881,25 @@ static int set_tracer_option(struct tracer *trace, char *cmp, int neg) | |||
| 2857 | return -EINVAL; | 2881 | return -EINVAL; |
| 2858 | } | 2882 | } |
| 2859 | 2883 | ||
| 2860 | static void set_tracer_flags(unsigned int mask, int enabled) | 2884 | /* Some tracers require overwrite to stay enabled */ |
| 2885 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set) | ||
| 2886 | { | ||
| 2887 | if (tracer->enabled && (mask & TRACE_ITER_OVERWRITE) && !set) | ||
| 2888 | return -1; | ||
| 2889 | |||
| 2890 | return 0; | ||
| 2891 | } | ||
| 2892 | |||
| 2893 | int set_tracer_flag(unsigned int mask, int enabled) | ||
| 2861 | { | 2894 | { |
| 2862 | /* do nothing if flag is already set */ | 2895 | /* do nothing if flag is already set */ |
| 2863 | if (!!(trace_flags & mask) == !!enabled) | 2896 | if (!!(trace_flags & mask) == !!enabled) |
| 2864 | return; | 2897 | return 0; |
| 2898 | |||
| 2899 | /* Give the tracer a chance to approve the change */ | ||
| 2900 | if (current_trace->flag_changed) | ||
| 2901 | if (current_trace->flag_changed(current_trace, mask, !!enabled)) | ||
| 2902 | return -EINVAL; | ||
| 2865 | 2903 | ||
| 2866 | if (enabled) | 2904 | if (enabled) |
| 2867 | trace_flags |= mask; | 2905 | trace_flags |= mask; |
| @@ -2871,18 +2909,24 @@ static void set_tracer_flags(unsigned int mask, int enabled) | |||
| 2871 | if (mask == TRACE_ITER_RECORD_CMD) | 2909 | if (mask == TRACE_ITER_RECORD_CMD) |
| 2872 | trace_event_enable_cmd_record(enabled); | 2910 | trace_event_enable_cmd_record(enabled); |
| 2873 | 2911 | ||
| 2874 | if (mask == TRACE_ITER_OVERWRITE) | 2912 | if (mask == TRACE_ITER_OVERWRITE) { |
| 2875 | ring_buffer_change_overwrite(global_trace.buffer, enabled); | 2913 | ring_buffer_change_overwrite(global_trace.buffer, enabled); |
| 2914 | #ifdef CONFIG_TRACER_MAX_TRACE | ||
| 2915 | ring_buffer_change_overwrite(max_tr.buffer, enabled); | ||
| 2916 | #endif | ||
| 2917 | } | ||
| 2876 | 2918 | ||
| 2877 | if (mask == TRACE_ITER_PRINTK) | 2919 | if (mask == TRACE_ITER_PRINTK) |
| 2878 | trace_printk_start_stop_comm(enabled); | 2920 | trace_printk_start_stop_comm(enabled); |
| 2921 | |||
| 2922 | return 0; | ||
| 2879 | } | 2923 | } |
| 2880 | 2924 | ||
| 2881 | static int trace_set_options(char *option) | 2925 | static int trace_set_options(char *option) |
| 2882 | { | 2926 | { |
| 2883 | char *cmp; | 2927 | char *cmp; |
| 2884 | int neg = 0; | 2928 | int neg = 0; |
| 2885 | int ret = 0; | 2929 | int ret = -ENODEV; |
| 2886 | int i; | 2930 | int i; |
| 2887 | 2931 | ||
| 2888 | cmp = strstrip(option); | 2932 | cmp = strstrip(option); |
| @@ -2892,19 +2936,20 @@ static int trace_set_options(char *option) | |||
| 2892 | cmp += 2; | 2936 | cmp += 2; |
| 2893 | } | 2937 | } |
| 2894 | 2938 | ||
| 2939 | mutex_lock(&trace_types_lock); | ||
| 2940 | |||
| 2895 | for (i = 0; trace_options[i]; i++) { | 2941 | for (i = 0; trace_options[i]; i++) { |
| 2896 | if (strcmp(cmp, trace_options[i]) == 0) { | 2942 | if (strcmp(cmp, trace_options[i]) == 0) { |
| 2897 | set_tracer_flags(1 << i, !neg); | 2943 | ret = set_tracer_flag(1 << i, !neg); |
| 2898 | break; | 2944 | break; |
| 2899 | } | 2945 | } |
| 2900 | } | 2946 | } |
| 2901 | 2947 | ||
| 2902 | /* If no option could be set, test the specific tracer options */ | 2948 | /* If no option could be set, test the specific tracer options */ |
| 2903 | if (!trace_options[i]) { | 2949 | if (!trace_options[i]) |
| 2904 | mutex_lock(&trace_types_lock); | ||
| 2905 | ret = set_tracer_option(current_trace, cmp, neg); | 2950 | ret = set_tracer_option(current_trace, cmp, neg); |
| 2906 | mutex_unlock(&trace_types_lock); | 2951 | |
| 2907 | } | 2952 | mutex_unlock(&trace_types_lock); |
| 2908 | 2953 | ||
| 2909 | return ret; | 2954 | return ret; |
| 2910 | } | 2955 | } |
| @@ -2914,6 +2959,7 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2914 | size_t cnt, loff_t *ppos) | 2959 | size_t cnt, loff_t *ppos) |
| 2915 | { | 2960 | { |
| 2916 | char buf[64]; | 2961 | char buf[64]; |
| 2962 | int ret; | ||
| 2917 | 2963 | ||
| 2918 | if (cnt >= sizeof(buf)) | 2964 | if (cnt >= sizeof(buf)) |
| 2919 | return -EINVAL; | 2965 | return -EINVAL; |
| @@ -2923,7 +2969,9 @@ tracing_trace_options_write(struct file *filp, const char __user *ubuf, | |||
| 2923 | 2969 | ||
| 2924 | buf[cnt] = 0; | 2970 | buf[cnt] = 0; |
| 2925 | 2971 | ||
| 2926 | trace_set_options(buf); | 2972 | ret = trace_set_options(buf); |
| 2973 | if (ret < 0) | ||
| 2974 | return ret; | ||
| 2927 | 2975 | ||
| 2928 | *ppos += cnt; | 2976 | *ppos += cnt; |
| 2929 | 2977 | ||
| @@ -3227,6 +3275,9 @@ static int tracing_set_tracer(const char *buf) | |||
| 3227 | goto out; | 3275 | goto out; |
| 3228 | 3276 | ||
| 3229 | trace_branch_disable(); | 3277 | trace_branch_disable(); |
| 3278 | |||
| 3279 | current_trace->enabled = false; | ||
| 3280 | |||
| 3230 | if (current_trace->reset) | 3281 | if (current_trace->reset) |
| 3231 | current_trace->reset(tr); | 3282 | current_trace->reset(tr); |
| 3232 | 3283 | ||
| @@ -3271,6 +3322,7 @@ static int tracing_set_tracer(const char *buf) | |||
| 3271 | } | 3322 | } |
| 3272 | 3323 | ||
| 3273 | current_trace = t; | 3324 | current_trace = t; |
| 3325 | current_trace->enabled = true; | ||
| 3274 | trace_branch_enable(tr); | 3326 | trace_branch_enable(tr); |
| 3275 | out: | 3327 | out: |
| 3276 | mutex_unlock(&trace_types_lock); | 3328 | mutex_unlock(&trace_types_lock); |
| @@ -4144,8 +4196,6 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 4144 | default: | 4196 | default: |
| 4145 | if (current_trace->allocated_snapshot) | 4197 | if (current_trace->allocated_snapshot) |
| 4146 | tracing_reset_online_cpus(&max_tr); | 4198 | tracing_reset_online_cpus(&max_tr); |
| 4147 | else | ||
| 4148 | ret = -EINVAL; | ||
| 4149 | break; | 4199 | break; |
| 4150 | } | 4200 | } |
| 4151 | 4201 | ||
| @@ -4759,7 +4809,13 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt, | |||
| 4759 | 4809 | ||
| 4760 | if (val != 0 && val != 1) | 4810 | if (val != 0 && val != 1) |
| 4761 | return -EINVAL; | 4811 | return -EINVAL; |
| 4762 | set_tracer_flags(1 << index, val); | 4812 | |
| 4813 | mutex_lock(&trace_types_lock); | ||
| 4814 | ret = set_tracer_flag(1 << index, val); | ||
| 4815 | mutex_unlock(&trace_types_lock); | ||
| 4816 | |||
| 4817 | if (ret < 0) | ||
| 4818 | return ret; | ||
| 4763 | 4819 | ||
| 4764 | *ppos += cnt; | 4820 | *ppos += cnt; |
| 4765 | 4821 | ||
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 57d7e5397d56..2081971367ea 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
| @@ -283,11 +283,15 @@ struct tracer { | |||
| 283 | enum print_line_t (*print_line)(struct trace_iterator *iter); | 283 | enum print_line_t (*print_line)(struct trace_iterator *iter); |
| 284 | /* If you handled the flag setting, return 0 */ | 284 | /* If you handled the flag setting, return 0 */ |
| 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); | 285 | int (*set_flag)(u32 old_flags, u32 bit, int set); |
| 286 | /* Return 0 if OK with change, else return non-zero */ | ||
| 287 | int (*flag_changed)(struct tracer *tracer, | ||
| 288 | u32 mask, int set); | ||
| 286 | struct tracer *next; | 289 | struct tracer *next; |
| 287 | struct tracer_flags *flags; | 290 | struct tracer_flags *flags; |
| 288 | bool print_max; | 291 | bool print_max; |
| 289 | bool use_max_tr; | 292 | bool use_max_tr; |
| 290 | bool allocated_snapshot; | 293 | bool allocated_snapshot; |
| 294 | bool enabled; | ||
| 291 | }; | 295 | }; |
| 292 | 296 | ||
| 293 | 297 | ||
| @@ -943,6 +947,8 @@ extern const char *__stop___trace_bprintk_fmt[]; | |||
| 943 | 947 | ||
| 944 | void trace_printk_init_buffers(void); | 948 | void trace_printk_init_buffers(void); |
| 945 | void trace_printk_start_comm(void); | 949 | void trace_printk_start_comm(void); |
| 950 | int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); | ||
| 951 | int set_tracer_flag(unsigned int mask, int enabled); | ||
| 946 | 952 | ||
| 947 | #undef FTRACE_ENTRY | 953 | #undef FTRACE_ENTRY |
| 948 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ | 954 | #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ |
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 713a2cac4881..443b25b43b4f 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c | |||
| @@ -32,7 +32,7 @@ enum { | |||
| 32 | 32 | ||
| 33 | static int trace_type __read_mostly; | 33 | static int trace_type __read_mostly; |
| 34 | 34 | ||
| 35 | static int save_lat_flag; | 35 | static int save_flags; |
| 36 | 36 | ||
| 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); | 37 | static void stop_irqsoff_tracer(struct trace_array *tr, int graph); |
| 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); | 38 | static int start_irqsoff_tracer(struct trace_array *tr, int graph); |
| @@ -558,8 +558,11 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) | |||
| 558 | 558 | ||
| 559 | static void __irqsoff_tracer_init(struct trace_array *tr) | 559 | static void __irqsoff_tracer_init(struct trace_array *tr) |
| 560 | { | 560 | { |
| 561 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 561 | save_flags = trace_flags; |
| 562 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 562 | |
| 563 | /* non overwrite screws up the latency tracers */ | ||
| 564 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
| 565 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
| 563 | 566 | ||
| 564 | tracing_max_latency = 0; | 567 | tracing_max_latency = 0; |
| 565 | irqsoff_trace = tr; | 568 | irqsoff_trace = tr; |
| @@ -573,10 +576,13 @@ static void __irqsoff_tracer_init(struct trace_array *tr) | |||
| 573 | 576 | ||
| 574 | static void irqsoff_tracer_reset(struct trace_array *tr) | 577 | static void irqsoff_tracer_reset(struct trace_array *tr) |
| 575 | { | 578 | { |
| 579 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
| 580 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
| 581 | |||
| 576 | stop_irqsoff_tracer(tr, is_graph()); | 582 | stop_irqsoff_tracer(tr, is_graph()); |
| 577 | 583 | ||
| 578 | if (!save_lat_flag) | 584 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
| 579 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 585 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
| 580 | } | 586 | } |
| 581 | 587 | ||
| 582 | static void irqsoff_tracer_start(struct trace_array *tr) | 588 | static void irqsoff_tracer_start(struct trace_array *tr) |
| @@ -609,6 +615,7 @@ static struct tracer irqsoff_tracer __read_mostly = | |||
| 609 | .print_line = irqsoff_print_line, | 615 | .print_line = irqsoff_print_line, |
| 610 | .flags = &tracer_flags, | 616 | .flags = &tracer_flags, |
| 611 | .set_flag = irqsoff_set_flag, | 617 | .set_flag = irqsoff_set_flag, |
| 618 | .flag_changed = trace_keep_overwrite, | ||
| 612 | #ifdef CONFIG_FTRACE_SELFTEST | 619 | #ifdef CONFIG_FTRACE_SELFTEST |
| 613 | .selftest = trace_selftest_startup_irqsoff, | 620 | .selftest = trace_selftest_startup_irqsoff, |
| 614 | #endif | 621 | #endif |
| @@ -642,6 +649,7 @@ static struct tracer preemptoff_tracer __read_mostly = | |||
| 642 | .print_line = irqsoff_print_line, | 649 | .print_line = irqsoff_print_line, |
| 643 | .flags = &tracer_flags, | 650 | .flags = &tracer_flags, |
| 644 | .set_flag = irqsoff_set_flag, | 651 | .set_flag = irqsoff_set_flag, |
| 652 | .flag_changed = trace_keep_overwrite, | ||
| 645 | #ifdef CONFIG_FTRACE_SELFTEST | 653 | #ifdef CONFIG_FTRACE_SELFTEST |
| 646 | .selftest = trace_selftest_startup_preemptoff, | 654 | .selftest = trace_selftest_startup_preemptoff, |
| 647 | #endif | 655 | #endif |
| @@ -677,6 +685,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = | |||
| 677 | .print_line = irqsoff_print_line, | 685 | .print_line = irqsoff_print_line, |
| 678 | .flags = &tracer_flags, | 686 | .flags = &tracer_flags, |
| 679 | .set_flag = irqsoff_set_flag, | 687 | .set_flag = irqsoff_set_flag, |
| 688 | .flag_changed = trace_keep_overwrite, | ||
| 680 | #ifdef CONFIG_FTRACE_SELFTEST | 689 | #ifdef CONFIG_FTRACE_SELFTEST |
| 681 | .selftest = trace_selftest_startup_preemptirqsoff, | 690 | .selftest = trace_selftest_startup_preemptirqsoff, |
| 682 | #endif | 691 | #endif |
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c index 75aa97fbe1a1..fde652c9a511 100644 --- a/kernel/trace/trace_sched_wakeup.c +++ b/kernel/trace/trace_sched_wakeup.c | |||
| @@ -36,7 +36,7 @@ static void __wakeup_reset(struct trace_array *tr); | |||
| 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); | 36 | static int wakeup_graph_entry(struct ftrace_graph_ent *trace); |
| 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); | 37 | static void wakeup_graph_return(struct ftrace_graph_ret *trace); |
| 38 | 38 | ||
| 39 | static int save_lat_flag; | 39 | static int save_flags; |
| 40 | 40 | ||
| 41 | #define TRACE_DISPLAY_GRAPH 1 | 41 | #define TRACE_DISPLAY_GRAPH 1 |
| 42 | 42 | ||
| @@ -540,8 +540,11 @@ static void stop_wakeup_tracer(struct trace_array *tr) | |||
| 540 | 540 | ||
| 541 | static int __wakeup_tracer_init(struct trace_array *tr) | 541 | static int __wakeup_tracer_init(struct trace_array *tr) |
| 542 | { | 542 | { |
| 543 | save_lat_flag = trace_flags & TRACE_ITER_LATENCY_FMT; | 543 | save_flags = trace_flags; |
| 544 | trace_flags |= TRACE_ITER_LATENCY_FMT; | 544 | |
| 545 | /* non overwrite screws up the latency tracers */ | ||
| 546 | set_tracer_flag(TRACE_ITER_OVERWRITE, 1); | ||
| 547 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); | ||
| 545 | 548 | ||
| 546 | tracing_max_latency = 0; | 549 | tracing_max_latency = 0; |
| 547 | wakeup_trace = tr; | 550 | wakeup_trace = tr; |
| @@ -563,12 +566,15 @@ static int wakeup_rt_tracer_init(struct trace_array *tr) | |||
| 563 | 566 | ||
| 564 | static void wakeup_tracer_reset(struct trace_array *tr) | 567 | static void wakeup_tracer_reset(struct trace_array *tr) |
| 565 | { | 568 | { |
| 569 | int lat_flag = save_flags & TRACE_ITER_LATENCY_FMT; | ||
| 570 | int overwrite_flag = save_flags & TRACE_ITER_OVERWRITE; | ||
| 571 | |||
| 566 | stop_wakeup_tracer(tr); | 572 | stop_wakeup_tracer(tr); |
| 567 | /* make sure we put back any tasks we are tracing */ | 573 | /* make sure we put back any tasks we are tracing */ |
| 568 | wakeup_reset(tr); | 574 | wakeup_reset(tr); |
| 569 | 575 | ||
| 570 | if (!save_lat_flag) | 576 | set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); |
| 571 | trace_flags &= ~TRACE_ITER_LATENCY_FMT; | 577 | set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); |
| 572 | } | 578 | } |
| 573 | 579 | ||
| 574 | static void wakeup_tracer_start(struct trace_array *tr) | 580 | static void wakeup_tracer_start(struct trace_array *tr) |
| @@ -594,6 +600,7 @@ static struct tracer wakeup_tracer __read_mostly = | |||
| 594 | .print_line = wakeup_print_line, | 600 | .print_line = wakeup_print_line, |
| 595 | .flags = &tracer_flags, | 601 | .flags = &tracer_flags, |
| 596 | .set_flag = wakeup_set_flag, | 602 | .set_flag = wakeup_set_flag, |
| 603 | .flag_changed = trace_keep_overwrite, | ||
| 597 | #ifdef CONFIG_FTRACE_SELFTEST | 604 | #ifdef CONFIG_FTRACE_SELFTEST |
| 598 | .selftest = trace_selftest_startup_wakeup, | 605 | .selftest = trace_selftest_startup_wakeup, |
| 599 | #endif | 606 | #endif |
| @@ -615,6 +622,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = | |||
| 615 | .print_line = wakeup_print_line, | 622 | .print_line = wakeup_print_line, |
| 616 | .flags = &tracer_flags, | 623 | .flags = &tracer_flags, |
| 617 | .set_flag = wakeup_set_flag, | 624 | .set_flag = wakeup_set_flag, |
| 625 | .flag_changed = trace_keep_overwrite, | ||
| 618 | #ifdef CONFIG_FTRACE_SELFTEST | 626 | #ifdef CONFIG_FTRACE_SELFTEST |
| 619 | .selftest = trace_selftest_startup_wakeup, | 627 | .selftest = trace_selftest_startup_wakeup, |
| 620 | #endif | 628 | #endif |
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 8b650837083e..b14f4d342043 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <linux/uaccess.h> | 21 | #include <linux/uaccess.h> |
| 22 | #include <linux/ctype.h> | 22 | #include <linux/ctype.h> |
| 23 | #include <linux/projid.h> | 23 | #include <linux/projid.h> |
| 24 | #include <linux/fs_struct.h> | ||
| 24 | 25 | ||
| 25 | static struct kmem_cache *user_ns_cachep __read_mostly; | 26 | static struct kmem_cache *user_ns_cachep __read_mostly; |
| 26 | 27 | ||
| @@ -837,6 +838,9 @@ static int userns_install(struct nsproxy *nsproxy, void *ns) | |||
| 837 | if (atomic_read(¤t->mm->mm_users) > 1) | 838 | if (atomic_read(¤t->mm->mm_users) > 1) |
| 838 | return -EINVAL; | 839 | return -EINVAL; |
| 839 | 840 | ||
| 841 | if (current->fs->users != 1) | ||
| 842 | return -EINVAL; | ||
| 843 | |||
| 840 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) | 844 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) |
| 841 | return -EPERM; | 845 | return -EPERM; |
| 842 | 846 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 81f2457811eb..b48cd597145d 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -457,11 +457,12 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
| 457 | int ret; | 457 | int ret; |
| 458 | 458 | ||
| 459 | mutex_lock(&worker_pool_idr_mutex); | 459 | mutex_lock(&worker_pool_idr_mutex); |
| 460 | idr_pre_get(&worker_pool_idr, GFP_KERNEL); | 460 | ret = idr_alloc(&worker_pool_idr, pool, 0, 0, GFP_KERNEL); |
| 461 | ret = idr_get_new(&worker_pool_idr, pool, &pool->id); | 461 | if (ret >= 0) |
| 462 | pool->id = ret; | ||
| 462 | mutex_unlock(&worker_pool_idr_mutex); | 463 | mutex_unlock(&worker_pool_idr_mutex); |
| 463 | 464 | ||
| 464 | return ret; | 465 | return ret < 0 ? ret : 0; |
| 465 | } | 466 | } |
| 466 | 467 | ||
| 467 | /* | 468 | /* |
| @@ -3446,28 +3447,34 @@ static void wq_unbind_fn(struct work_struct *work) | |||
| 3446 | 3447 | ||
| 3447 | spin_unlock_irq(&pool->lock); | 3448 | spin_unlock_irq(&pool->lock); |
| 3448 | mutex_unlock(&pool->assoc_mutex); | 3449 | mutex_unlock(&pool->assoc_mutex); |
| 3449 | } | ||
| 3450 | 3450 | ||
| 3451 | /* | 3451 | /* |
| 3452 | * Call schedule() so that we cross rq->lock and thus can guarantee | 3452 | * Call schedule() so that we cross rq->lock and thus can |
| 3453 | * sched callbacks see the %WORKER_UNBOUND flag. This is necessary | 3453 | * guarantee sched callbacks see the %WORKER_UNBOUND flag. |
| 3454 | * as scheduler callbacks may be invoked from other cpus. | 3454 | * This is necessary as scheduler callbacks may be invoked |
| 3455 | */ | 3455 | * from other cpus. |
| 3456 | schedule(); | 3456 | */ |
| 3457 | schedule(); | ||
| 3457 | 3458 | ||
| 3458 | /* | 3459 | /* |
| 3459 | * Sched callbacks are disabled now. Zap nr_running. After this, | 3460 | * Sched callbacks are disabled now. Zap nr_running. |
| 3460 | * nr_running stays zero and need_more_worker() and keep_working() | 3461 | * After this, nr_running stays zero and need_more_worker() |
| 3461 | * are always true as long as the worklist is not empty. Pools on | 3462 | * and keep_working() are always true as long as the |
| 3462 | * @cpu now behave as unbound (in terms of concurrency management) | 3463 | * worklist is not empty. This pool now behaves as an |
| 3463 | * pools which are served by workers tied to the CPU. | 3464 | * unbound (in terms of concurrency management) pool which |
| 3464 | * | 3465 | * are served by workers tied to the pool. |
| 3465 | * On return from this function, the current worker would trigger | 3466 | */ |
| 3466 | * unbound chain execution of pending work items if other workers | ||
| 3467 | * didn't already. | ||
| 3468 | */ | ||
| 3469 | for_each_std_worker_pool(pool, cpu) | ||
| 3470 | atomic_set(&pool->nr_running, 0); | 3467 | atomic_set(&pool->nr_running, 0); |
| 3468 | |||
| 3469 | /* | ||
| 3470 | * With concurrency management just turned off, a busy | ||
| 3471 | * worker blocking could lead to lengthy stalls. Kick off | ||
| 3472 | * unbound chain execution of currently pending work items. | ||
| 3473 | */ | ||
| 3474 | spin_lock_irq(&pool->lock); | ||
| 3475 | wake_up_worker(pool); | ||
| 3476 | spin_unlock_irq(&pool->lock); | ||
| 3477 | } | ||
| 3471 | } | 3478 | } |
| 3472 | 3479 | ||
| 3473 | /* | 3480 | /* |
