aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-05-12 13:19:14 -0400
committerDavid S. Miller <davem@davemloft.net>2014-05-12 13:19:14 -0400
commit5f013c9bc70214dcacd5fbed5a06c217d6ff9c59 (patch)
tree34c3a633000e03bca57d0ce55d8759f86edecc03 /kernel
parent51ee42efa0829cf9e46f8e1c0ab7a9ab6facf3f2 (diff)
parent1a466ae96e9f749d02a73315a3e66375e61a61dd (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/ethernet/altera/altera_sgdma.c net/netlink/af_netlink.c net/sched/cls_api.c net/sched/sch_api.c The netlink conflict dealt with moving to netlink_capable() and netlink_ns_capable() in the 'net' tree vs. supporting 'tc' operations in non-init namespaces. These were simple transformations from netlink_capable to netlink_ns_capable. The Altera driver conflict was simply code removal overlapping some void pointer cast cleanups in net-next. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c4
-rw-r--r--kernel/hrtimer.c22
-rw-r--r--kernel/irq/irqdesc.c7
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/softirq.c5
-rw-r--r--kernel/timer.c2
-rw-r--r--kernel/trace/ftrace.c27
-rw-r--r--kernel/trace/trace_events_trigger.c2
10 files changed, 54 insertions, 41 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index 33531d72e4a2..81f5f49479da 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -675,13 +675,13 @@ static int audit_netlink_ok(struct sk_buff *skb, u16 msg_type)
675 if ((task_active_pid_ns(current) != &init_pid_ns)) 675 if ((task_active_pid_ns(current) != &init_pid_ns))
676 return -EPERM; 676 return -EPERM;
677 677
678 if (!capable(CAP_AUDIT_CONTROL)) 678 if (!netlink_capable(skb, CAP_AUDIT_CONTROL))
679 err = -EPERM; 679 err = -EPERM;
680 break; 680 break;
681 case AUDIT_USER: 681 case AUDIT_USER:
682 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG: 682 case AUDIT_FIRST_USER_MSG ... AUDIT_LAST_USER_MSG:
683 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2: 683 case AUDIT_FIRST_USER_MSG2 ... AUDIT_LAST_USER_MSG2:
684 if (!capable(CAP_AUDIT_WRITE)) 684 if (!netlink_capable(skb, CAP_AUDIT_WRITE))
685 err = -EPERM; 685 err = -EPERM;
686 break; 686 break;
687 default: /* bad msg */ 687 default: /* bad msg */
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d55092ceee29..6b715c0af1b1 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -234,6 +234,11 @@ again:
234 goto again; 234 goto again;
235 } 235 }
236 timer->base = new_base; 236 timer->base = new_base;
237 } else {
238 if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
239 cpu = this_cpu;
240 goto again;
241 }
237 } 242 }
238 return new_base; 243 return new_base;
239} 244}
@@ -569,6 +574,23 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
569 574
570 cpu_base->expires_next.tv64 = expires_next.tv64; 575 cpu_base->expires_next.tv64 = expires_next.tv64;
571 576
577 /*
578 * If a hang was detected in the last timer interrupt then we
579 * leave the hang delay active in the hardware. We want the
580 * system to make progress. That also prevents the following
581 * scenario:
582 * T1 expires 50ms from now
583 * T2 expires 5s from now
584 *
585 * T1 is removed, so this code is called and would reprogram
586 * the hardware to 5s from now. Any hrtimer_start after that
587 * will not reprogram the hardware due to hang_detected being
588 * set. So we'd effectivly block all timers until the T2 event
589 * fires.
590 */
591 if (cpu_base->hang_detected)
592 return;
593
572 if (cpu_base->expires_next.tv64 != KTIME_MAX) 594 if (cpu_base->expires_next.tv64 != KTIME_MAX)
573 tick_program_event(cpu_base->expires_next, 1); 595 tick_program_event(cpu_base->expires_next, 1);
574} 596}
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index a7174617616b..bb07f2928f4b 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -363,6 +363,13 @@ __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
363 if (from > irq) 363 if (from > irq)
364 return -EINVAL; 364 return -EINVAL;
365 from = irq; 365 from = irq;
366 } else {
367 /*
368 * For interrupts which are freely allocated the
369 * architecture can force a lower bound to the @from
370 * argument. x86 uses this to exclude the GSI space.
371 */
372 from = arch_dynirq_lower_bound(from);
366 } 373 }
367 374
368 mutex_lock(&sparse_irq_lock); 375 mutex_lock(&sparse_irq_lock);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 2486a4c1a710..d34131ca372b 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -180,7 +180,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
180 struct irq_chip *chip = irq_data_get_irq_chip(data); 180 struct irq_chip *chip = irq_data_get_irq_chip(data);
181 int ret; 181 int ret;
182 182
183 ret = chip->irq_set_affinity(data, mask, false); 183 ret = chip->irq_set_affinity(data, mask, force);
184 switch (ret) { 184 switch (ret) {
185 case IRQ_SET_MASK_OK: 185 case IRQ_SET_MASK_OK:
186 cpumask_copy(data->affinity, mask); 186 cpumask_copy(data->affinity, mask);
@@ -192,7 +192,8 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
192 return ret; 192 return ret;
193} 193}
194 194
195int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 195int irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask,
196 bool force)
196{ 197{
197 struct irq_chip *chip = irq_data_get_irq_chip(data); 198 struct irq_chip *chip = irq_data_get_irq_chip(data);
198 struct irq_desc *desc = irq_data_to_desc(data); 199 struct irq_desc *desc = irq_data_to_desc(data);
@@ -202,7 +203,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
202 return -EINVAL; 203 return -EINVAL;
203 204
204 if (irq_can_move_pcntxt(data)) { 205 if (irq_can_move_pcntxt(data)) {
205 ret = irq_do_set_affinity(data, mask, false); 206 ret = irq_do_set_affinity(data, mask, force);
206 } else { 207 } else {
207 irqd_set_move_pending(data); 208 irqd_set_move_pending(data);
208 irq_copy_pending(desc, mask); 209 irq_copy_pending(desc, mask);
@@ -217,13 +218,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
217 return ret; 218 return ret;
218} 219}
219 220
220/** 221int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force)
221 * irq_set_affinity - Set the irq affinity of a given irq
222 * @irq: Interrupt to set affinity
223 * @mask: cpumask
224 *
225 */
226int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
227{ 222{
228 struct irq_desc *desc = irq_to_desc(irq); 223 struct irq_desc *desc = irq_to_desc(irq);
229 unsigned long flags; 224 unsigned long flags;
@@ -233,7 +228,7 @@ int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
233 return -EINVAL; 228 return -EINVAL;
234 229
235 raw_spin_lock_irqsave(&desc->lock, flags); 230 raw_spin_lock_irqsave(&desc->lock, flags);
236 ret = __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask); 231 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force);
237 raw_spin_unlock_irqrestore(&desc->lock, flags); 232 raw_spin_unlock_irqrestore(&desc->lock, flags);
238 return ret; 233 return ret;
239} 234}
diff --git a/kernel/module.c b/kernel/module.c
index 11869408f79b..079c4615607d 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -815,9 +815,6 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
815 return -EFAULT; 815 return -EFAULT;
816 name[MODULE_NAME_LEN-1] = '\0'; 816 name[MODULE_NAME_LEN-1] = '\0';
817 817
818 if (!(flags & O_NONBLOCK))
819 pr_warn("waiting module removal not supported: please upgrade\n");
820
821 if (mutex_lock_interruptible(&module_mutex) != 0) 818 if (mutex_lock_interruptible(&module_mutex) != 0)
822 return -EINTR; 819 return -EINTR;
823 820
@@ -3271,6 +3268,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
3271 3268
3272 dynamic_debug_setup(info->debug, info->num_debug); 3269 dynamic_debug_setup(info->debug, info->num_debug);
3273 3270
3271 /* Ftrace init must be called in the MODULE_STATE_UNFORMED state */
3272 ftrace_module_init(mod);
3273
3274 /* Finally it's fully formed, ready to start executing. */ 3274 /* Finally it's fully formed, ready to start executing. */
3275 err = complete_formation(mod, info); 3275 err = complete_formation(mod, info);
3276 if (err) 3276 if (err)
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index c3ad9cafe930..8233cd4047d7 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -14,6 +14,7 @@
14#include <linux/init.h> 14#include <linux/init.h>
15#include <linux/console.h> 15#include <linux/console.h>
16#include <linux/cpu.h> 16#include <linux/cpu.h>
17#include <linux/cpuidle.h>
17#include <linux/syscalls.h> 18#include <linux/syscalls.h>
18#include <linux/gfp.h> 19#include <linux/gfp.h>
19#include <linux/io.h> 20#include <linux/io.h>
@@ -53,7 +54,9 @@ static void freeze_begin(void)
53 54
54static void freeze_enter(void) 55static void freeze_enter(void)
55{ 56{
57 cpuidle_resume();
56 wait_event(suspend_freeze_wait_head, suspend_freeze_wake); 58 wait_event(suspend_freeze_wait_head, suspend_freeze_wake);
59 cpuidle_pause();
57} 60}
58 61
59void freeze_wake(void) 62void freeze_wake(void)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index b50990a5bea0..33e4648ae0e7 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -779,3 +779,8 @@ int __init __weak arch_early_irq_init(void)
779{ 779{
780 return 0; 780 return 0;
781} 781}
782
783unsigned int __weak arch_dynirq_lower_bound(unsigned int from)
784{
785 return from;
786}
diff --git a/kernel/timer.c b/kernel/timer.c
index 87bd529879c2..3bb01a323b2a 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -838,7 +838,7 @@ unsigned long apply_slack(struct timer_list *timer, unsigned long expires)
838 838
839 bit = find_last_bit(&mask, BITS_PER_LONG); 839 bit = find_last_bit(&mask, BITS_PER_LONG);
840 840
841 mask = (1 << bit) - 1; 841 mask = (1UL << bit) - 1;
842 842
843 expires_limit = expires_limit & ~(mask); 843 expires_limit = expires_limit & ~(mask);
844 844
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1fd4b9479210..4a54a25afa2f 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4330,16 +4330,11 @@ static void ftrace_init_module(struct module *mod,
4330 ftrace_process_locs(mod, start, end); 4330 ftrace_process_locs(mod, start, end);
4331} 4331}
4332 4332
4333static int ftrace_module_notify_enter(struct notifier_block *self, 4333void ftrace_module_init(struct module *mod)
4334 unsigned long val, void *data)
4335{ 4334{
4336 struct module *mod = data; 4335 ftrace_init_module(mod, mod->ftrace_callsites,
4337 4336 mod->ftrace_callsites +
4338 if (val == MODULE_STATE_COMING) 4337 mod->num_ftrace_callsites);
4339 ftrace_init_module(mod, mod->ftrace_callsites,
4340 mod->ftrace_callsites +
4341 mod->num_ftrace_callsites);
4342 return 0;
4343} 4338}
4344 4339
4345static int ftrace_module_notify_exit(struct notifier_block *self, 4340static int ftrace_module_notify_exit(struct notifier_block *self,
@@ -4353,11 +4348,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
4353 return 0; 4348 return 0;
4354} 4349}
4355#else 4350#else
4356static int ftrace_module_notify_enter(struct notifier_block *self,
4357 unsigned long val, void *data)
4358{
4359 return 0;
4360}
4361static int ftrace_module_notify_exit(struct notifier_block *self, 4351static int ftrace_module_notify_exit(struct notifier_block *self,
4362 unsigned long val, void *data) 4352 unsigned long val, void *data)
4363{ 4353{
@@ -4365,11 +4355,6 @@ static int ftrace_module_notify_exit(struct notifier_block *self,
4365} 4355}
4366#endif /* CONFIG_MODULES */ 4356#endif /* CONFIG_MODULES */
4367 4357
4368struct notifier_block ftrace_module_enter_nb = {
4369 .notifier_call = ftrace_module_notify_enter,
4370 .priority = INT_MAX, /* Run before anything that can use kprobes */
4371};
4372
4373struct notifier_block ftrace_module_exit_nb = { 4358struct notifier_block ftrace_module_exit_nb = {
4374 .notifier_call = ftrace_module_notify_exit, 4359 .notifier_call = ftrace_module_notify_exit,
4375 .priority = INT_MIN, /* Run after anything that can remove kprobes */ 4360 .priority = INT_MIN, /* Run after anything that can remove kprobes */
@@ -4403,10 +4388,6 @@ void __init ftrace_init(void)
4403 __start_mcount_loc, 4388 __start_mcount_loc,
4404 __stop_mcount_loc); 4389 __stop_mcount_loc);
4405 4390
4406 ret = register_module_notifier(&ftrace_module_enter_nb);
4407 if (ret)
4408 pr_warning("Failed to register trace ftrace module enter notifier\n");
4409
4410 ret = register_module_notifier(&ftrace_module_exit_nb); 4391 ret = register_module_notifier(&ftrace_module_exit_nb);
4411 if (ret) 4392 if (ret)
4412 pr_warning("Failed to register trace ftrace module exit notifier\n"); 4393 pr_warning("Failed to register trace ftrace module exit notifier\n");
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index 925f537f07d1..4747b476a030 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -77,7 +77,7 @@ event_triggers_call(struct ftrace_event_file *file, void *rec)
77 data->ops->func(data); 77 data->ops->func(data);
78 continue; 78 continue;
79 } 79 }
80 filter = rcu_dereference(data->filter); 80 filter = rcu_dereference_sched(data->filter);
81 if (filter && !filter_match_preds(filter, rec)) 81 if (filter && !filter_match_preds(filter, rec))
82 continue; 82 continue;
83 if (data->cmd_ops->post_trigger) { 83 if (data->cmd_ops->post_trigger) {