diff options
| author | Kevin Hilman <khilman@linaro.org> | 2014-09-05 11:01:52 -0400 |
|---|---|---|
| committer | Kevin Hilman <khilman@linaro.org> | 2014-09-05 11:05:56 -0400 |
| commit | 95f6e8142d82789eca977ccdd6153a48b343fde9 (patch) | |
| tree | ef39263a5bbc5c497d8603b77d2db1316a2845fc /kernel | |
| parent | 28c2260f13c8ea3be6fcba1609502874f868284b (diff) | |
| parent | c7cc9ba11f8c09a4d12af0fc4aa9f9b026cdd354 (diff) | |
Merge tag 'omap-fixes-against-v3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap into fixes
Merge "omap fixes against v3.17-rc3" from Tony Lindgren:
Few fixes for omaps mostly for various devices to get them working
properly on the new am437x and dra7 hardware for several devices
such as I2C, NAND, DDR3 and USB. There's also a clock fix for omap3.
And also included are two minor cosmetic fixes that are not
stictly fixes for the new hardware support added recently to
downgrade a GPMC warning into a debug statement, and fix the
confusing comments for dra7-evm spi1 mux.
Note that these are all .dts changes except for a GPMC change.
* tag 'omap-fixes-against-v3.17-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap: (255 commits)
ARM: dts: dra7-evm: Add vtt regulator support
ARM: dts: dra7-evm: Fix spi1 mux documentation
ARM: dts: am43x-epos-evm: Disable QSPI to prevent conflict with GPMC-NAND
ARM: OMAP2+: gpmc: Don't complain if wait pin is used without r/w monitoring
ARM: dts: am43xx-epos-evm: Don't use read/write wait monitoring
ARM: dts: am437x-gp-evm: Don't use read/write wait monitoring
ARM: dts: am437x-gp-evm: Use BCH16 ECC scheme instead of BCH8
ARM: dts: am43x-epos-evm: Use BCH16 ECC scheme instead of BCH8
ARM: dts: am4372: fix USB regs size
ARM: dts: am437x-gp: switch i2c0 to 100KHz
ARM: dts: dra7-evm: Fix 8th NAND partition's name
ARM: dts: dra7-evm: Fix i2c3 pinmux and frequency
Linux 3.17-rc3
...
Signed-off-by: Kevin Hilman <khilman@linaro.org>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/kexec.c | 11 | ||||
| -rw-r--r-- | kernel/resource.c | 11 | ||||
| -rw-r--r-- | kernel/trace/ring_buffer.c | 16 |
3 files changed, 30 insertions, 8 deletions
diff --git a/kernel/kexec.c b/kernel/kexec.c index 0b49a0a58102..2bee072268d9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
| @@ -64,7 +64,9 @@ bool kexec_in_progress = false; | |||
| 64 | char __weak kexec_purgatory[0]; | 64 | char __weak kexec_purgatory[0]; |
| 65 | size_t __weak kexec_purgatory_size = 0; | 65 | size_t __weak kexec_purgatory_size = 0; |
| 66 | 66 | ||
| 67 | #ifdef CONFIG_KEXEC_FILE | ||
| 67 | static int kexec_calculate_store_digests(struct kimage *image); | 68 | static int kexec_calculate_store_digests(struct kimage *image); |
| 69 | #endif | ||
| 68 | 70 | ||
| 69 | /* Location of the reserved area for the crash kernel */ | 71 | /* Location of the reserved area for the crash kernel */ |
| 70 | struct resource crashk_res = { | 72 | struct resource crashk_res = { |
| @@ -341,6 +343,7 @@ out_free_image: | |||
| 341 | return ret; | 343 | return ret; |
| 342 | } | 344 | } |
| 343 | 345 | ||
| 346 | #ifdef CONFIG_KEXEC_FILE | ||
| 344 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) | 347 | static int copy_file_from_fd(int fd, void **buf, unsigned long *buf_len) |
| 345 | { | 348 | { |
| 346 | struct fd f = fdget(fd); | 349 | struct fd f = fdget(fd); |
| @@ -612,6 +615,9 @@ out_free_image: | |||
| 612 | kfree(image); | 615 | kfree(image); |
| 613 | return ret; | 616 | return ret; |
| 614 | } | 617 | } |
| 618 | #else /* CONFIG_KEXEC_FILE */ | ||
| 619 | static inline void kimage_file_post_load_cleanup(struct kimage *image) { } | ||
| 620 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 615 | 621 | ||
| 616 | static int kimage_is_destination_range(struct kimage *image, | 622 | static int kimage_is_destination_range(struct kimage *image, |
| 617 | unsigned long start, | 623 | unsigned long start, |
| @@ -1375,6 +1381,7 @@ COMPAT_SYSCALL_DEFINE4(kexec_load, compat_ulong_t, entry, | |||
| 1375 | } | 1381 | } |
| 1376 | #endif | 1382 | #endif |
| 1377 | 1383 | ||
| 1384 | #ifdef CONFIG_KEXEC_FILE | ||
| 1378 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, | 1385 | SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd, |
| 1379 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, | 1386 | unsigned long, cmdline_len, const char __user *, cmdline_ptr, |
| 1380 | unsigned long, flags) | 1387 | unsigned long, flags) |
| @@ -1451,6 +1458,8 @@ out: | |||
| 1451 | return ret; | 1458 | return ret; |
| 1452 | } | 1459 | } |
| 1453 | 1460 | ||
| 1461 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 1462 | |||
| 1454 | void crash_kexec(struct pt_regs *regs) | 1463 | void crash_kexec(struct pt_regs *regs) |
| 1455 | { | 1464 | { |
| 1456 | /* Take the kexec_mutex here to prevent sys_kexec_load | 1465 | /* Take the kexec_mutex here to prevent sys_kexec_load |
| @@ -2006,6 +2015,7 @@ static int __init crash_save_vmcoreinfo_init(void) | |||
| 2006 | 2015 | ||
| 2007 | subsys_initcall(crash_save_vmcoreinfo_init); | 2016 | subsys_initcall(crash_save_vmcoreinfo_init); |
| 2008 | 2017 | ||
| 2018 | #ifdef CONFIG_KEXEC_FILE | ||
| 2009 | static int __kexec_add_segment(struct kimage *image, char *buf, | 2019 | static int __kexec_add_segment(struct kimage *image, char *buf, |
| 2010 | unsigned long bufsz, unsigned long mem, | 2020 | unsigned long bufsz, unsigned long mem, |
| 2011 | unsigned long memsz) | 2021 | unsigned long memsz) |
| @@ -2682,6 +2692,7 @@ int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name, | |||
| 2682 | 2692 | ||
| 2683 | return 0; | 2693 | return 0; |
| 2684 | } | 2694 | } |
| 2695 | #endif /* CONFIG_KEXEC_FILE */ | ||
| 2685 | 2696 | ||
| 2686 | /* | 2697 | /* |
| 2687 | * Move into place and start executing a preloaded standalone | 2698 | * Move into place and start executing a preloaded standalone |
diff --git a/kernel/resource.c b/kernel/resource.c index da14b8d09296..60c5a3856ab7 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -351,15 +351,12 @@ static int find_next_iomem_res(struct resource *res, char *name, | |||
| 351 | end = res->end; | 351 | end = res->end; |
| 352 | BUG_ON(start >= end); | 352 | BUG_ON(start >= end); |
| 353 | 353 | ||
| 354 | read_lock(&resource_lock); | 354 | if (first_level_children_only) |
| 355 | |||
| 356 | if (first_level_children_only) { | ||
| 357 | p = iomem_resource.child; | ||
| 358 | sibling_only = true; | 355 | sibling_only = true; |
| 359 | } else | ||
| 360 | p = &iomem_resource; | ||
| 361 | 356 | ||
| 362 | while ((p = next_resource(p, sibling_only))) { | 357 | read_lock(&resource_lock); |
| 358 | |||
| 359 | for (p = iomem_resource.child; p; p = next_resource(p, sibling_only)) { | ||
| 363 | if (p->flags != res->flags) | 360 | if (p->flags != res->flags) |
| 364 | continue; | 361 | continue; |
| 365 | if (name && strcmp(p->name, name)) | 362 | if (name && strcmp(p->name, name)) |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index afb04b9b818a..b38fb2b9e237 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
| @@ -626,8 +626,22 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, | |||
| 626 | work = &cpu_buffer->irq_work; | 626 | work = &cpu_buffer->irq_work; |
| 627 | } | 627 | } |
| 628 | 628 | ||
| 629 | work->waiters_pending = true; | ||
| 630 | poll_wait(filp, &work->waiters, poll_table); | 629 | poll_wait(filp, &work->waiters, poll_table); |
| 630 | work->waiters_pending = true; | ||
| 631 | /* | ||
| 632 | * There's a tight race between setting the waiters_pending and | ||
| 633 | * checking if the ring buffer is empty. Once the waiters_pending bit | ||
| 634 | * is set, the next event will wake the task up, but we can get stuck | ||
| 635 | * if there's only a single event in. | ||
| 636 | * | ||
| 637 | * FIXME: Ideally, we need a memory barrier on the writer side as well, | ||
| 638 | * but adding a memory barrier to all events will cause too much of a | ||
| 639 | * performance hit in the fast path. We only need a memory barrier when | ||
| 640 | * the buffer goes from empty to having content. But as this race is | ||
| 641 | * extremely small, and it's not a problem if another event comes in, we | ||
| 642 | * will fix it later. | ||
| 643 | */ | ||
| 644 | smp_mb(); | ||
| 631 | 645 | ||
| 632 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || | 646 | if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || |
| 633 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) | 647 | (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) |
