diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/debug/kdb/kdb_main.c | 21 | ||||
| -rw-r--r-- | kernel/exit.c | 8 | ||||
| -rw-r--r-- | kernel/futex.c | 3 | ||||
| -rw-r--r-- | kernel/futex_compat.c | 3 | ||||
| -rw-r--r-- | kernel/latencytop.c | 17 | ||||
| -rw-r--r-- | kernel/perf_event.c | 42 | ||||
| -rw-r--r-- | kernel/pm_qos_params.c | 4 | ||||
| -rw-r--r-- | kernel/power/Kconfig | 4 | ||||
| -rw-r--r-- | kernel/printk.c | 21 | ||||
| -rw-r--r-- | kernel/range.c | 2 | ||||
| -rw-r--r-- | kernel/relay.c | 15 | ||||
| -rw-r--r-- | kernel/sched.c | 39 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 40 | ||||
| -rw-r--r-- | kernel/sched_stoptask.c | 4 | ||||
| -rw-r--r-- | kernel/sysctl.c | 9 | ||||
| -rw-r--r-- | kernel/trace/Kconfig | 2 | ||||
| -rw-r--r-- | kernel/trace/blktrace.c | 4 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 1 | ||||
| -rw-r--r-- | kernel/watchdog.c | 2 |
19 files changed, 168 insertions, 73 deletions
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 37755d62192..a6e72976682 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c | |||
| @@ -82,7 +82,7 @@ static kdbtab_t kdb_base_commands[50]; | |||
| 82 | #define for_each_kdbcmd(cmd, num) \ | 82 | #define for_each_kdbcmd(cmd, num) \ |
| 83 | for ((cmd) = kdb_base_commands, (num) = 0; \ | 83 | for ((cmd) = kdb_base_commands, (num) = 0; \ |
| 84 | num < kdb_max_commands; \ | 84 | num < kdb_max_commands; \ |
| 85 | num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++, num++) | 85 | num++, num == KDB_BASE_CMD_MAX ? cmd = kdb_commands : cmd++) |
| 86 | 86 | ||
| 87 | typedef struct _kdbmsg { | 87 | typedef struct _kdbmsg { |
| 88 | int km_diag; /* kdb diagnostic */ | 88 | int km_diag; /* kdb diagnostic */ |
| @@ -646,7 +646,7 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) | |||
| 646 | } | 646 | } |
| 647 | if (!s->usable) | 647 | if (!s->usable) |
| 648 | return KDB_NOTIMP; | 648 | return KDB_NOTIMP; |
| 649 | s->command = kmalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); | 649 | s->command = kzalloc((s->count + 1) * sizeof(*(s->command)), GFP_KDB); |
| 650 | if (!s->command) { | 650 | if (!s->command) { |
| 651 | kdb_printf("Could not allocate new kdb_defcmd table for %s\n", | 651 | kdb_printf("Could not allocate new kdb_defcmd table for %s\n", |
| 652 | cmdstr); | 652 | cmdstr); |
| @@ -2361,7 +2361,7 @@ static int kdb_pid(int argc, const char **argv) | |||
| 2361 | */ | 2361 | */ |
| 2362 | static int kdb_ll(int argc, const char **argv) | 2362 | static int kdb_ll(int argc, const char **argv) |
| 2363 | { | 2363 | { |
| 2364 | int diag; | 2364 | int diag = 0; |
| 2365 | unsigned long addr; | 2365 | unsigned long addr; |
| 2366 | long offset = 0; | 2366 | long offset = 0; |
| 2367 | unsigned long va; | 2367 | unsigned long va; |
| @@ -2400,20 +2400,21 @@ static int kdb_ll(int argc, const char **argv) | |||
| 2400 | char buf[80]; | 2400 | char buf[80]; |
| 2401 | 2401 | ||
| 2402 | if (KDB_FLAG(CMD_INTERRUPT)) | 2402 | if (KDB_FLAG(CMD_INTERRUPT)) |
| 2403 | return 0; | 2403 | goto out; |
| 2404 | 2404 | ||
| 2405 | sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); | 2405 | sprintf(buf, "%s " kdb_machreg_fmt "\n", command, va); |
| 2406 | diag = kdb_parse(buf); | 2406 | diag = kdb_parse(buf); |
| 2407 | if (diag) | 2407 | if (diag) |
| 2408 | return diag; | 2408 | goto out; |
| 2409 | 2409 | ||
| 2410 | addr = va + linkoffset; | 2410 | addr = va + linkoffset; |
| 2411 | if (kdb_getword(&va, addr, sizeof(va))) | 2411 | if (kdb_getword(&va, addr, sizeof(va))) |
| 2412 | return 0; | 2412 | goto out; |
| 2413 | } | 2413 | } |
| 2414 | kfree(command); | ||
| 2415 | 2414 | ||
| 2416 | return 0; | 2415 | out: |
| 2416 | kfree(command); | ||
| 2417 | return diag; | ||
| 2417 | } | 2418 | } |
| 2418 | 2419 | ||
| 2419 | static int kdb_kgdb(int argc, const char **argv) | 2420 | static int kdb_kgdb(int argc, const char **argv) |
| @@ -2739,13 +2740,13 @@ int kdb_register_repeat(char *cmd, | |||
| 2739 | } | 2740 | } |
| 2740 | if (kdb_commands) { | 2741 | if (kdb_commands) { |
| 2741 | memcpy(new, kdb_commands, | 2742 | memcpy(new, kdb_commands, |
| 2742 | kdb_max_commands * sizeof(*new)); | 2743 | (kdb_max_commands - KDB_BASE_CMD_MAX) * sizeof(*new)); |
| 2743 | kfree(kdb_commands); | 2744 | kfree(kdb_commands); |
| 2744 | } | 2745 | } |
| 2745 | memset(new + kdb_max_commands, 0, | 2746 | memset(new + kdb_max_commands, 0, |
| 2746 | kdb_command_extend * sizeof(*new)); | 2747 | kdb_command_extend * sizeof(*new)); |
| 2747 | kdb_commands = new; | 2748 | kdb_commands = new; |
| 2748 | kp = kdb_commands + kdb_max_commands; | 2749 | kp = kdb_commands + kdb_max_commands - KDB_BASE_CMD_MAX; |
| 2749 | kdb_max_commands += kdb_command_extend; | 2750 | kdb_max_commands += kdb_command_extend; |
| 2750 | } | 2751 | } |
| 2751 | 2752 | ||
diff --git a/kernel/exit.c b/kernel/exit.c index b194febf579..21aa7b3001f 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -96,6 +96,14 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 96 | sig->tty = NULL; | 96 | sig->tty = NULL; |
| 97 | } else { | 97 | } else { |
| 98 | /* | 98 | /* |
| 99 | * This can only happen if the caller is de_thread(). | ||
| 100 | * FIXME: this is the temporary hack, we should teach | ||
| 101 | * posix-cpu-timers to handle this case correctly. | ||
| 102 | */ | ||
| 103 | if (unlikely(has_group_leader_pid(tsk))) | ||
| 104 | posix_cpu_timers_exit_group(tsk); | ||
| 105 | |||
| 106 | /* | ||
| 99 | * If there is any task waiting for the group exit | 107 | * If there is any task waiting for the group exit |
| 100 | * then notify it: | 108 | * then notify it: |
| 101 | */ | 109 | */ |
diff --git a/kernel/futex.c b/kernel/futex.c index 6c683b37f2c..40a8777a27d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c | |||
| @@ -2489,7 +2489,8 @@ void exit_robust_list(struct task_struct *curr) | |||
| 2489 | { | 2489 | { |
| 2490 | struct robust_list_head __user *head = curr->robust_list; | 2490 | struct robust_list_head __user *head = curr->robust_list; |
| 2491 | struct robust_list __user *entry, *next_entry, *pending; | 2491 | struct robust_list __user *entry, *next_entry, *pending; |
| 2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 2492 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
| 2493 | unsigned int uninitialized_var(next_pi); | ||
| 2493 | unsigned long futex_offset; | 2494 | unsigned long futex_offset; |
| 2494 | int rc; | 2495 | int rc; |
| 2495 | 2496 | ||
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c index 06da4dfc339..a7934ac75e5 100644 --- a/kernel/futex_compat.c +++ b/kernel/futex_compat.c | |||
| @@ -49,7 +49,8 @@ void compat_exit_robust_list(struct task_struct *curr) | |||
| 49 | { | 49 | { |
| 50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; | 50 | struct compat_robust_list_head __user *head = curr->compat_robust_list; |
| 51 | struct robust_list __user *entry, *next_entry, *pending; | 51 | struct robust_list __user *entry, *next_entry, *pending; |
| 52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, next_pi, pip; | 52 | unsigned int limit = ROBUST_LIST_LIMIT, pi, pip; |
| 53 | unsigned int uninitialized_var(next_pi); | ||
| 53 | compat_uptr_t uentry, next_uentry, upending; | 54 | compat_uptr_t uentry, next_uentry, upending; |
| 54 | compat_long_t futex_offset; | 55 | compat_long_t futex_offset; |
| 55 | int rc; | 56 | int rc; |
diff --git a/kernel/latencytop.c b/kernel/latencytop.c index 877fb306d41..17110a4a4fc 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c | |||
| @@ -194,14 +194,7 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
| 194 | 194 | ||
| 195 | account_global_scheduler_latency(tsk, &lat); | 195 | account_global_scheduler_latency(tsk, &lat); |
| 196 | 196 | ||
| 197 | /* | 197 | for (i = 0; i < tsk->latency_record_count; i++) { |
| 198 | * short term hack; if we're > 32 we stop; future we recycle: | ||
| 199 | */ | ||
| 200 | tsk->latency_record_count++; | ||
| 201 | if (tsk->latency_record_count >= LT_SAVECOUNT) | ||
| 202 | goto out_unlock; | ||
| 203 | |||
| 204 | for (i = 0; i < LT_SAVECOUNT; i++) { | ||
| 205 | struct latency_record *mylat; | 198 | struct latency_record *mylat; |
| 206 | int same = 1; | 199 | int same = 1; |
| 207 | 200 | ||
| @@ -227,8 +220,14 @@ __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) | |||
| 227 | } | 220 | } |
| 228 | } | 221 | } |
| 229 | 222 | ||
| 223 | /* | ||
| 224 | * short term hack; if we're > 32 we stop; future we recycle: | ||
| 225 | */ | ||
| 226 | if (tsk->latency_record_count >= LT_SAVECOUNT) | ||
| 227 | goto out_unlock; | ||
| 228 | |||
| 230 | /* Allocated a new one: */ | 229 | /* Allocated a new one: */ |
| 231 | i = tsk->latency_record_count; | 230 | i = tsk->latency_record_count++; |
| 232 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); | 231 | memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); |
| 233 | 232 | ||
| 234 | out_unlock: | 233 | out_unlock: |
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 517d827f498..cb6c0d2af68 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
| @@ -674,6 +674,8 @@ event_sched_in(struct perf_event *event, | |||
| 674 | 674 | ||
| 675 | event->tstamp_running += ctx->time - event->tstamp_stopped; | 675 | event->tstamp_running += ctx->time - event->tstamp_stopped; |
| 676 | 676 | ||
| 677 | event->shadow_ctx_time = ctx->time - ctx->timestamp; | ||
| 678 | |||
| 677 | if (!is_software_event(event)) | 679 | if (!is_software_event(event)) |
| 678 | cpuctx->active_oncpu++; | 680 | cpuctx->active_oncpu++; |
| 679 | ctx->nr_active++; | 681 | ctx->nr_active++; |
| @@ -3396,7 +3398,8 @@ static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) | |||
| 3396 | } | 3398 | } |
| 3397 | 3399 | ||
| 3398 | static void perf_output_read_one(struct perf_output_handle *handle, | 3400 | static void perf_output_read_one(struct perf_output_handle *handle, |
| 3399 | struct perf_event *event) | 3401 | struct perf_event *event, |
| 3402 | u64 enabled, u64 running) | ||
| 3400 | { | 3403 | { |
| 3401 | u64 read_format = event->attr.read_format; | 3404 | u64 read_format = event->attr.read_format; |
| 3402 | u64 values[4]; | 3405 | u64 values[4]; |
| @@ -3404,11 +3407,11 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
| 3404 | 3407 | ||
| 3405 | values[n++] = perf_event_count(event); | 3408 | values[n++] = perf_event_count(event); |
| 3406 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { | 3409 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) { |
| 3407 | values[n++] = event->total_time_enabled + | 3410 | values[n++] = enabled + |
| 3408 | atomic64_read(&event->child_total_time_enabled); | 3411 | atomic64_read(&event->child_total_time_enabled); |
| 3409 | } | 3412 | } |
| 3410 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { | 3413 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) { |
| 3411 | values[n++] = event->total_time_running + | 3414 | values[n++] = running + |
| 3412 | atomic64_read(&event->child_total_time_running); | 3415 | atomic64_read(&event->child_total_time_running); |
| 3413 | } | 3416 | } |
| 3414 | if (read_format & PERF_FORMAT_ID) | 3417 | if (read_format & PERF_FORMAT_ID) |
| @@ -3421,7 +3424,8 @@ static void perf_output_read_one(struct perf_output_handle *handle, | |||
| 3421 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. | 3424 | * XXX PERF_FORMAT_GROUP vs inherited events seems difficult. |
| 3422 | */ | 3425 | */ |
| 3423 | static void perf_output_read_group(struct perf_output_handle *handle, | 3426 | static void perf_output_read_group(struct perf_output_handle *handle, |
| 3424 | struct perf_event *event) | 3427 | struct perf_event *event, |
| 3428 | u64 enabled, u64 running) | ||
| 3425 | { | 3429 | { |
| 3426 | struct perf_event *leader = event->group_leader, *sub; | 3430 | struct perf_event *leader = event->group_leader, *sub; |
| 3427 | u64 read_format = event->attr.read_format; | 3431 | u64 read_format = event->attr.read_format; |
| @@ -3431,10 +3435,10 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
| 3431 | values[n++] = 1 + leader->nr_siblings; | 3435 | values[n++] = 1 + leader->nr_siblings; |
| 3432 | 3436 | ||
| 3433 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | 3437 | if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) |
| 3434 | values[n++] = leader->total_time_enabled; | 3438 | values[n++] = enabled; |
| 3435 | 3439 | ||
| 3436 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | 3440 | if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) |
| 3437 | values[n++] = leader->total_time_running; | 3441 | values[n++] = running; |
| 3438 | 3442 | ||
| 3439 | if (leader != event) | 3443 | if (leader != event) |
| 3440 | leader->pmu->read(leader); | 3444 | leader->pmu->read(leader); |
| @@ -3459,13 +3463,35 @@ static void perf_output_read_group(struct perf_output_handle *handle, | |||
| 3459 | } | 3463 | } |
| 3460 | } | 3464 | } |
| 3461 | 3465 | ||
| 3466 | #define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\ | ||
| 3467 | PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
| 3468 | |||
| 3462 | static void perf_output_read(struct perf_output_handle *handle, | 3469 | static void perf_output_read(struct perf_output_handle *handle, |
| 3463 | struct perf_event *event) | 3470 | struct perf_event *event) |
| 3464 | { | 3471 | { |
| 3472 | u64 enabled = 0, running = 0, now, ctx_time; | ||
| 3473 | u64 read_format = event->attr.read_format; | ||
| 3474 | |||
| 3475 | /* | ||
| 3476 | * compute total_time_enabled, total_time_running | ||
| 3477 | * based on snapshot values taken when the event | ||
| 3478 | * was last scheduled in. | ||
| 3479 | * | ||
| 3480 | * we cannot simply called update_context_time() | ||
| 3481 | * because of locking issue as we are called in | ||
| 3482 | * NMI context | ||
| 3483 | */ | ||
| 3484 | if (read_format & PERF_FORMAT_TOTAL_TIMES) { | ||
| 3485 | now = perf_clock(); | ||
| 3486 | ctx_time = event->shadow_ctx_time + now; | ||
| 3487 | enabled = ctx_time - event->tstamp_enabled; | ||
| 3488 | running = ctx_time - event->tstamp_running; | ||
| 3489 | } | ||
| 3490 | |||
| 3465 | if (event->attr.read_format & PERF_FORMAT_GROUP) | 3491 | if (event->attr.read_format & PERF_FORMAT_GROUP) |
| 3466 | perf_output_read_group(handle, event); | 3492 | perf_output_read_group(handle, event, enabled, running); |
| 3467 | else | 3493 | else |
| 3468 | perf_output_read_one(handle, event); | 3494 | perf_output_read_one(handle, event, enabled, running); |
| 3469 | } | 3495 | } |
| 3470 | 3496 | ||
| 3471 | void perf_output_sample(struct perf_output_handle *handle, | 3497 | void perf_output_sample(struct perf_output_handle *handle, |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index c7a8f453919..aeaa7f84682 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
| @@ -121,10 +121,10 @@ static inline int pm_qos_get_value(struct pm_qos_object *o) | |||
| 121 | 121 | ||
| 122 | switch (o->type) { | 122 | switch (o->type) { |
| 123 | case PM_QOS_MIN: | 123 | case PM_QOS_MIN: |
| 124 | return plist_last(&o->requests)->prio; | 124 | return plist_first(&o->requests)->prio; |
| 125 | 125 | ||
| 126 | case PM_QOS_MAX: | 126 | case PM_QOS_MAX: |
| 127 | return plist_first(&o->requests)->prio; | 127 | return plist_last(&o->requests)->prio; |
| 128 | 128 | ||
| 129 | default: | 129 | default: |
| 130 | /* runtime check for not using enum */ | 130 | /* runtime check for not using enum */ |
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig index 29bff6117ab..a5aff3ebad3 100644 --- a/kernel/power/Kconfig +++ b/kernel/power/Kconfig | |||
| @@ -246,9 +246,13 @@ config PM_OPS | |||
| 246 | depends on PM_SLEEP || PM_RUNTIME | 246 | depends on PM_SLEEP || PM_RUNTIME |
| 247 | default y | 247 | default y |
| 248 | 248 | ||
| 249 | config ARCH_HAS_OPP | ||
| 250 | bool | ||
| 251 | |||
| 249 | config PM_OPP | 252 | config PM_OPP |
| 250 | bool "Operating Performance Point (OPP) Layer library" | 253 | bool "Operating Performance Point (OPP) Layer library" |
| 251 | depends on PM | 254 | depends on PM |
| 255 | depends on ARCH_HAS_OPP | ||
| 252 | ---help--- | 256 | ---help--- |
| 253 | SOCs have a standard set of tuples consisting of frequency and | 257 | SOCs have a standard set of tuples consisting of frequency and |
| 254 | voltage pairs that the device will support per voltage domain. This | 258 | voltage pairs that the device will support per voltage domain. This |
diff --git a/kernel/printk.c b/kernel/printk.c index b2ebaee8c37..9a2264fc42c 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
| @@ -261,6 +261,12 @@ static inline void boot_delay_msec(void) | |||
| 261 | } | 261 | } |
| 262 | #endif | 262 | #endif |
| 263 | 263 | ||
| 264 | #ifdef CONFIG_SECURITY_DMESG_RESTRICT | ||
| 265 | int dmesg_restrict = 1; | ||
| 266 | #else | ||
| 267 | int dmesg_restrict; | ||
| 268 | #endif | ||
| 269 | |||
| 264 | int do_syslog(int type, char __user *buf, int len, bool from_file) | 270 | int do_syslog(int type, char __user *buf, int len, bool from_file) |
| 265 | { | 271 | { |
| 266 | unsigned i, j, limit, count; | 272 | unsigned i, j, limit, count; |
| @@ -268,7 +274,20 @@ int do_syslog(int type, char __user *buf, int len, bool from_file) | |||
| 268 | char c; | 274 | char c; |
| 269 | int error = 0; | 275 | int error = 0; |
| 270 | 276 | ||
| 271 | error = security_syslog(type, from_file); | 277 | /* |
| 278 | * If this is from /proc/kmsg we only do the capabilities checks | ||
| 279 | * at open time. | ||
| 280 | */ | ||
| 281 | if (type == SYSLOG_ACTION_OPEN || !from_file) { | ||
| 282 | if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) | ||
| 283 | return -EPERM; | ||
| 284 | if ((type != SYSLOG_ACTION_READ_ALL && | ||
| 285 | type != SYSLOG_ACTION_SIZE_BUFFER) && | ||
| 286 | !capable(CAP_SYS_ADMIN)) | ||
| 287 | return -EPERM; | ||
| 288 | } | ||
| 289 | |||
| 290 | error = security_syslog(type); | ||
| 272 | if (error) | 291 | if (error) |
| 273 | return error; | 292 | return error; |
| 274 | 293 | ||
diff --git a/kernel/range.c b/kernel/range.c index 471b66acabb..37fa9b99ad5 100644 --- a/kernel/range.c +++ b/kernel/range.c | |||
| @@ -119,7 +119,7 @@ static int cmp_range(const void *x1, const void *x2) | |||
| 119 | 119 | ||
| 120 | int clean_sort_range(struct range *range, int az) | 120 | int clean_sort_range(struct range *range, int az) |
| 121 | { | 121 | { |
| 122 | int i, j, k = az - 1, nr_range = 0; | 122 | int i, j, k = az - 1, nr_range = az; |
| 123 | 123 | ||
| 124 | for (i = 0; i < k; i++) { | 124 | for (i = 0; i < k; i++) { |
| 125 | if (range[i].end) | 125 | if (range[i].end) |
diff --git a/kernel/relay.c b/kernel/relay.c index c7cf397fb92..859ea5a9605 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
| @@ -70,17 +70,10 @@ static const struct vm_operations_struct relay_file_mmap_ops = { | |||
| 70 | */ | 70 | */ |
| 71 | static struct page **relay_alloc_page_array(unsigned int n_pages) | 71 | static struct page **relay_alloc_page_array(unsigned int n_pages) |
| 72 | { | 72 | { |
| 73 | struct page **array; | 73 | const size_t pa_size = n_pages * sizeof(struct page *); |
| 74 | size_t pa_size = n_pages * sizeof(struct page *); | 74 | if (pa_size > PAGE_SIZE) |
| 75 | 75 | return vzalloc(pa_size); | |
| 76 | if (pa_size > PAGE_SIZE) { | 76 | return kzalloc(pa_size, GFP_KERNEL); |
| 77 | array = vmalloc(pa_size); | ||
| 78 | if (array) | ||
| 79 | memset(array, 0, pa_size); | ||
| 80 | } else { | ||
| 81 | array = kzalloc(pa_size, GFP_KERNEL); | ||
| 82 | } | ||
| 83 | return array; | ||
| 84 | } | 77 | } |
| 85 | 78 | ||
| 86 | /* | 79 | /* |
diff --git a/kernel/sched.c b/kernel/sched.c index aa14a56f9d0..dc91a4d09ac 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -560,18 +560,8 @@ struct rq { | |||
| 560 | 560 | ||
| 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); | 561 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); |
| 562 | 562 | ||
| 563 | static inline | ||
| 564 | void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
| 565 | { | ||
| 566 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
| 567 | 563 | ||
| 568 | /* | 564 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags); |
| 569 | * A queue event has occurred, and we're going to schedule. In | ||
| 570 | * this case, we can save a useless back to back clock update. | ||
| 571 | */ | ||
| 572 | if (test_tsk_need_resched(p)) | ||
| 573 | rq->skip_clock_update = 1; | ||
| 574 | } | ||
| 575 | 565 | ||
| 576 | static inline int cpu_of(struct rq *rq) | 566 | static inline int cpu_of(struct rq *rq) |
| 577 | { | 567 | { |
| @@ -2118,6 +2108,31 @@ static inline void check_class_changed(struct rq *rq, struct task_struct *p, | |||
| 2118 | p->sched_class->prio_changed(rq, p, oldprio, running); | 2108 | p->sched_class->prio_changed(rq, p, oldprio, running); |
| 2119 | } | 2109 | } |
| 2120 | 2110 | ||
| 2111 | static void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | ||
| 2112 | { | ||
| 2113 | const struct sched_class *class; | ||
| 2114 | |||
| 2115 | if (p->sched_class == rq->curr->sched_class) { | ||
| 2116 | rq->curr->sched_class->check_preempt_curr(rq, p, flags); | ||
| 2117 | } else { | ||
| 2118 | for_each_class(class) { | ||
| 2119 | if (class == rq->curr->sched_class) | ||
| 2120 | break; | ||
| 2121 | if (class == p->sched_class) { | ||
| 2122 | resched_task(rq->curr); | ||
| 2123 | break; | ||
| 2124 | } | ||
| 2125 | } | ||
| 2126 | } | ||
| 2127 | |||
| 2128 | /* | ||
| 2129 | * A queue event has occurred, and we're going to schedule. In | ||
| 2130 | * this case, we can save a useless back to back clock update. | ||
| 2131 | */ | ||
| 2132 | if (test_tsk_need_resched(rq->curr)) | ||
| 2133 | rq->skip_clock_update = 1; | ||
| 2134 | } | ||
| 2135 | |||
| 2121 | #ifdef CONFIG_SMP | 2136 | #ifdef CONFIG_SMP |
| 2122 | /* | 2137 | /* |
| 2123 | * Is this task likely cache-hot: | 2138 | * Is this task likely cache-hot: |
| @@ -6960,6 +6975,8 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd) | |||
| 6960 | if (cpu != group_first_cpu(sd->groups)) | 6975 | if (cpu != group_first_cpu(sd->groups)) |
| 6961 | return; | 6976 | return; |
| 6962 | 6977 | ||
| 6978 | sd->groups->group_weight = cpumask_weight(sched_group_cpus(sd->groups)); | ||
| 6979 | |||
| 6963 | child = sd->child; | 6980 | child = sd->child; |
| 6964 | 6981 | ||
| 6965 | sd->groups->cpu_power = 0; | 6982 | sd->groups->cpu_power = 0; |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f4f6a8326dd..52ab113d8bb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -1654,12 +1654,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ | |||
| 1654 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); | 1654 | struct cfs_rq *cfs_rq = task_cfs_rq(curr); |
| 1655 | int scale = cfs_rq->nr_running >= sched_nr_latency; | 1655 | int scale = cfs_rq->nr_running >= sched_nr_latency; |
| 1656 | 1656 | ||
| 1657 | if (unlikely(rt_prio(p->prio))) | ||
| 1658 | goto preempt; | ||
| 1659 | |||
| 1660 | if (unlikely(p->sched_class != &fair_sched_class)) | ||
| 1661 | return; | ||
| 1662 | |||
| 1663 | if (unlikely(se == pse)) | 1657 | if (unlikely(se == pse)) |
| 1664 | return; | 1658 | return; |
| 1665 | 1659 | ||
| @@ -2035,13 +2029,16 @@ struct sd_lb_stats { | |||
| 2035 | unsigned long this_load_per_task; | 2029 | unsigned long this_load_per_task; |
| 2036 | unsigned long this_nr_running; | 2030 | unsigned long this_nr_running; |
| 2037 | unsigned long this_has_capacity; | 2031 | unsigned long this_has_capacity; |
| 2032 | unsigned int this_idle_cpus; | ||
| 2038 | 2033 | ||
| 2039 | /* Statistics of the busiest group */ | 2034 | /* Statistics of the busiest group */ |
| 2035 | unsigned int busiest_idle_cpus; | ||
| 2040 | unsigned long max_load; | 2036 | unsigned long max_load; |
| 2041 | unsigned long busiest_load_per_task; | 2037 | unsigned long busiest_load_per_task; |
| 2042 | unsigned long busiest_nr_running; | 2038 | unsigned long busiest_nr_running; |
| 2043 | unsigned long busiest_group_capacity; | 2039 | unsigned long busiest_group_capacity; |
| 2044 | unsigned long busiest_has_capacity; | 2040 | unsigned long busiest_has_capacity; |
| 2041 | unsigned int busiest_group_weight; | ||
| 2045 | 2042 | ||
| 2046 | int group_imb; /* Is there imbalance in this sd */ | 2043 | int group_imb; /* Is there imbalance in this sd */ |
| 2047 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) | 2044 | #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) |
| @@ -2063,6 +2060,8 @@ struct sg_lb_stats { | |||
| 2063 | unsigned long sum_nr_running; /* Nr tasks running in the group */ | 2060 | unsigned long sum_nr_running; /* Nr tasks running in the group */ |
| 2064 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ | 2061 | unsigned long sum_weighted_load; /* Weighted load of group's tasks */ |
| 2065 | unsigned long group_capacity; | 2062 | unsigned long group_capacity; |
| 2063 | unsigned long idle_cpus; | ||
| 2064 | unsigned long group_weight; | ||
| 2066 | int group_imb; /* Is there an imbalance in the group ? */ | 2065 | int group_imb; /* Is there an imbalance in the group ? */ |
| 2067 | int group_has_capacity; /* Is there extra capacity in the group? */ | 2066 | int group_has_capacity; /* Is there extra capacity in the group? */ |
| 2068 | }; | 2067 | }; |
| @@ -2431,7 +2430,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2431 | sgs->group_load += load; | 2430 | sgs->group_load += load; |
| 2432 | sgs->sum_nr_running += rq->nr_running; | 2431 | sgs->sum_nr_running += rq->nr_running; |
| 2433 | sgs->sum_weighted_load += weighted_cpuload(i); | 2432 | sgs->sum_weighted_load += weighted_cpuload(i); |
| 2434 | 2433 | if (idle_cpu(i)) | |
| 2434 | sgs->idle_cpus++; | ||
| 2435 | } | 2435 | } |
| 2436 | 2436 | ||
| 2437 | /* | 2437 | /* |
| @@ -2469,6 +2469,7 @@ static inline void update_sg_lb_stats(struct sched_domain *sd, | |||
| 2469 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); | 2469 | sgs->group_capacity = DIV_ROUND_CLOSEST(group->cpu_power, SCHED_LOAD_SCALE); |
| 2470 | if (!sgs->group_capacity) | 2470 | if (!sgs->group_capacity) |
| 2471 | sgs->group_capacity = fix_small_capacity(sd, group); | 2471 | sgs->group_capacity = fix_small_capacity(sd, group); |
| 2472 | sgs->group_weight = group->group_weight; | ||
| 2472 | 2473 | ||
| 2473 | if (sgs->group_capacity > sgs->sum_nr_running) | 2474 | if (sgs->group_capacity > sgs->sum_nr_running) |
| 2474 | sgs->group_has_capacity = 1; | 2475 | sgs->group_has_capacity = 1; |
| @@ -2576,13 +2577,16 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu, | |||
| 2576 | sds->this_nr_running = sgs.sum_nr_running; | 2577 | sds->this_nr_running = sgs.sum_nr_running; |
| 2577 | sds->this_load_per_task = sgs.sum_weighted_load; | 2578 | sds->this_load_per_task = sgs.sum_weighted_load; |
| 2578 | sds->this_has_capacity = sgs.group_has_capacity; | 2579 | sds->this_has_capacity = sgs.group_has_capacity; |
| 2580 | sds->this_idle_cpus = sgs.idle_cpus; | ||
| 2579 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { | 2581 | } else if (update_sd_pick_busiest(sd, sds, sg, &sgs, this_cpu)) { |
| 2580 | sds->max_load = sgs.avg_load; | 2582 | sds->max_load = sgs.avg_load; |
| 2581 | sds->busiest = sg; | 2583 | sds->busiest = sg; |
| 2582 | sds->busiest_nr_running = sgs.sum_nr_running; | 2584 | sds->busiest_nr_running = sgs.sum_nr_running; |
| 2585 | sds->busiest_idle_cpus = sgs.idle_cpus; | ||
| 2583 | sds->busiest_group_capacity = sgs.group_capacity; | 2586 | sds->busiest_group_capacity = sgs.group_capacity; |
| 2584 | sds->busiest_load_per_task = sgs.sum_weighted_load; | 2587 | sds->busiest_load_per_task = sgs.sum_weighted_load; |
| 2585 | sds->busiest_has_capacity = sgs.group_has_capacity; | 2588 | sds->busiest_has_capacity = sgs.group_has_capacity; |
| 2589 | sds->busiest_group_weight = sgs.group_weight; | ||
| 2586 | sds->group_imb = sgs.group_imb; | 2590 | sds->group_imb = sgs.group_imb; |
| 2587 | } | 2591 | } |
| 2588 | 2592 | ||
| @@ -2860,8 +2864,26 @@ find_busiest_group(struct sched_domain *sd, int this_cpu, | |||
| 2860 | if (sds.this_load >= sds.avg_load) | 2864 | if (sds.this_load >= sds.avg_load) |
| 2861 | goto out_balanced; | 2865 | goto out_balanced; |
| 2862 | 2866 | ||
| 2863 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | 2867 | /* |
| 2864 | goto out_balanced; | 2868 | * In the CPU_NEWLY_IDLE, use imbalance_pct to be conservative. |
| 2869 | * And to check for busy balance use !idle_cpu instead of | ||
| 2870 | * CPU_NOT_IDLE. This is because HT siblings will use CPU_NOT_IDLE | ||
| 2871 | * even when they are idle. | ||
| 2872 | */ | ||
| 2873 | if (idle == CPU_NEWLY_IDLE || !idle_cpu(this_cpu)) { | ||
| 2874 | if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load) | ||
| 2875 | goto out_balanced; | ||
| 2876 | } else { | ||
| 2877 | /* | ||
| 2878 | * This cpu is idle. If the busiest group load doesn't | ||
| 2879 | * have more tasks than the number of available cpu's and | ||
| 2880 | * there is no imbalance between this and busiest group | ||
| 2881 | * wrt to idle cpu's, it is balanced. | ||
| 2882 | */ | ||
| 2883 | if ((sds.this_idle_cpus <= sds.busiest_idle_cpus + 1) && | ||
| 2884 | sds.busiest_nr_running <= sds.busiest_group_weight) | ||
| 2885 | goto out_balanced; | ||
| 2886 | } | ||
| 2865 | 2887 | ||
| 2866 | force_balance: | 2888 | force_balance: |
| 2867 | /* Looks like there is an imbalance. Compute it */ | 2889 | /* Looks like there is an imbalance. Compute it */ |
diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 45bddc0c104..2bf6b47058c 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c | |||
| @@ -19,14 +19,14 @@ select_task_rq_stop(struct rq *rq, struct task_struct *p, | |||
| 19 | static void | 19 | static void |
| 20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) | 20 | check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) |
| 21 | { | 21 | { |
| 22 | resched_task(rq->curr); /* we preempt everything */ | 22 | /* we're never preempted */ |
| 23 | } | 23 | } |
| 24 | 24 | ||
| 25 | static struct task_struct *pick_next_task_stop(struct rq *rq) | 25 | static struct task_struct *pick_next_task_stop(struct rq *rq) |
| 26 | { | 26 | { |
| 27 | struct task_struct *stop = rq->stop; | 27 | struct task_struct *stop = rq->stop; |
| 28 | 28 | ||
| 29 | if (stop && stop->state == TASK_RUNNING) | 29 | if (stop && stop->se.on_rq) |
| 30 | return stop; | 30 | return stop; |
| 31 | 31 | ||
| 32 | return NULL; | 32 | return NULL; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index c33a1edb799..5abfa151855 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
| @@ -702,6 +702,15 @@ static struct ctl_table kern_table[] = { | |||
| 702 | .extra1 = &zero, | 702 | .extra1 = &zero, |
| 703 | .extra2 = &ten_thousand, | 703 | .extra2 = &ten_thousand, |
| 704 | }, | 704 | }, |
| 705 | { | ||
| 706 | .procname = "dmesg_restrict", | ||
| 707 | .data = &dmesg_restrict, | ||
| 708 | .maxlen = sizeof(int), | ||
| 709 | .mode = 0644, | ||
| 710 | .proc_handler = proc_dointvec_minmax, | ||
| 711 | .extra1 = &zero, | ||
| 712 | .extra2 = &one, | ||
| 713 | }, | ||
| 705 | #endif | 714 | #endif |
| 706 | { | 715 | { |
| 707 | .procname = "ngroups_max", | 716 | .procname = "ngroups_max", |
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig index e04b8bcdef8..ea37e2ff416 100644 --- a/kernel/trace/Kconfig +++ b/kernel/trace/Kconfig | |||
| @@ -126,7 +126,7 @@ if FTRACE | |||
| 126 | config FUNCTION_TRACER | 126 | config FUNCTION_TRACER |
| 127 | bool "Kernel Function Tracer" | 127 | bool "Kernel Function Tracer" |
| 128 | depends on HAVE_FUNCTION_TRACER | 128 | depends on HAVE_FUNCTION_TRACER |
| 129 | select FRAME_POINTER if (!ARM_UNWIND) | 129 | select FRAME_POINTER if !ARM_UNWIND && !S390 |
| 130 | select KALLSYMS | 130 | select KALLSYMS |
| 131 | select GENERIC_TRACER | 131 | select GENERIC_TRACER |
| 132 | select CONTEXT_SWITCH_TRACER | 132 | select CONTEXT_SWITCH_TRACER |
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c index bc251ed6672..7b8ec028154 100644 --- a/kernel/trace/blktrace.c +++ b/kernel/trace/blktrace.c | |||
| @@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, | |||
| 168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), | 168 | static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), |
| 169 | BLK_TC_ACT(BLK_TC_WRITE) }; | 169 | BLK_TC_ACT(BLK_TC_WRITE) }; |
| 170 | 170 | ||
| 171 | #define BLK_TC_HARDBARRIER BLK_TC_BARRIER | ||
| 172 | #define BLK_TC_RAHEAD BLK_TC_AHEAD | 171 | #define BLK_TC_RAHEAD BLK_TC_AHEAD |
| 173 | 172 | ||
| 174 | /* The ilog2() calls fall out because they're constant */ | 173 | /* The ilog2() calls fall out because they're constant */ |
| @@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, | |||
| 196 | return; | 195 | return; |
| 197 | 196 | ||
| 198 | what |= ddir_act[rw & WRITE]; | 197 | what |= ddir_act[rw & WRITE]; |
| 199 | what |= MASK_TC_BIT(rw, HARDBARRIER); | ||
| 200 | what |= MASK_TC_BIT(rw, SYNC); | 198 | what |= MASK_TC_BIT(rw, SYNC); |
| 201 | what |= MASK_TC_BIT(rw, RAHEAD); | 199 | what |= MASK_TC_BIT(rw, RAHEAD); |
| 202 | what |= MASK_TC_BIT(rw, META); | 200 | what |= MASK_TC_BIT(rw, META); |
| @@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) | |||
| 1807 | 1805 | ||
| 1808 | if (rw & REQ_RAHEAD) | 1806 | if (rw & REQ_RAHEAD) |
| 1809 | rwbs[i++] = 'A'; | 1807 | rwbs[i++] = 'A'; |
| 1810 | if (rw & REQ_HARDBARRIER) | ||
| 1811 | rwbs[i++] = 'B'; | ||
| 1812 | if (rw & REQ_SYNC) | 1808 | if (rw & REQ_SYNC) |
| 1813 | rwbs[i++] = 'S'; | 1809 | rwbs[i++] = 'S'; |
| 1814 | if (rw & REQ_META) | 1810 | if (rw & REQ_META) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 82d9b8106cd..04208415798 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
| @@ -17,7 +17,6 @@ | |||
| 17 | #include <linux/writeback.h> | 17 | #include <linux/writeback.h> |
| 18 | #include <linux/kallsyms.h> | 18 | #include <linux/kallsyms.h> |
| 19 | #include <linux/seq_file.h> | 19 | #include <linux/seq_file.h> |
| 20 | #include <linux/smp_lock.h> | ||
| 21 | #include <linux/notifier.h> | 20 | #include <linux/notifier.h> |
| 22 | #include <linux/irqflags.h> | 21 | #include <linux/irqflags.h> |
| 23 | #include <linux/debugfs.h> | 22 | #include <linux/debugfs.h> |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index bafba687a6d..6e3c41a4024 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
| @@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
| 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
| 44 | #endif | 44 | #endif |
| 45 | 45 | ||
| 46 | static int __initdata no_watchdog; | 46 | static int no_watchdog; |
| 47 | 47 | ||
| 48 | 48 | ||
| 49 | /* boot commands */ | 49 | /* boot commands */ |
